Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1094,6 +1094,346 @@ tests:
operator: "Exists"
effect: "NoSchedule"
expectedError: 'spec.telemeterClientConfig.tolerations: Too many: 11: must have at most 10 items'
- name: Should be able to create ThanosQuerierConfig with valid resources
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
resources:
- name: "cpu"
request: "5m"
limit: "200m"
- name: "memory"
request: "12Mi"
limit: "200Mi"
expected: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
resources:
- name: "cpu"
request: "5m"
limit: "200m"
- name: "memory"
request: "12Mi"
limit: "200Mi"
- name: Should be able to create ThanosQuerierConfig with valid tolerations
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
tolerations:
- key: "node-role.kubernetes.io/infra"
operator: "Exists"
effect: "NoSchedule"
expected: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
tolerations:
- key: "node-role.kubernetes.io/infra"
operator: "Exists"
effect: "NoSchedule"
- name: Should be able to create ThanosQuerierConfig with valid topologySpreadConstraints
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: thanos-querier
- maxSkew: 2
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: thanos-querier
expected: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app: thanos-querier
- maxSkew: 2
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: thanos-querier
- name: Should be able to create ThanosQuerierConfig with all fields
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
nodeSelector:
kubernetes.io/os: linux
resources:
- name: "cpu"
request: "5m"
- name: "memory"
request: "12Mi"
tolerations:
- key: "node-role.kubernetes.io/infra"
operator: "Exists"
effect: "NoSchedule"
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
expected: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
nodeSelector:
kubernetes.io/os: linux
resources:
- name: "cpu"
request: "5m"
- name: "memory"
request: "12Mi"
tolerations:
- key: "node-role.kubernetes.io/infra"
operator: "Exists"
effect: "NoSchedule"
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
- name: Should reject ThanosQuerierConfig with empty object
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig: {}
expectedError: 'spec.thanosQuerierConfig: Invalid value: 0: spec.thanosQuerierConfig in body should have at least 1 properties'
- name: Should reject ThanosQuerierConfig with duplicate resource names
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
resources:
- name: "cpu"
request: "100m"
- name: "cpu"
request: "200m"
expectedError: 'spec.thanosQuerierConfig.resources[1]: Duplicate value: map[string]interface {}{"name":"cpu"}'
- name: Should reject ThanosQuerierConfig with too many resources
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
resources:
- name: "cpu"
request: "100m"
- name: "memory"
request: "64Mi"
- name: "hugepages-2Mi"
request: "32Mi"
- name: "hugepages-1Gi"
request: "1Gi"
- name: "ephemeral-storage"
request: "1Gi"
- name: "nvidia.com/gpu"
request: "1"
expectedError: 'spec.thanosQuerierConfig.resources: Too many: 6: must have at most 5 items'
- name: Should reject ThanosQuerierConfig with limit less than request
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
resources:
- name: "cpu"
request: "500m"
limit: "200m"
expectedError: 'spec.thanosQuerierConfig.resources[0]: Invalid value: "object": limit must be greater than or equal to request'
- name: Should reject ThanosQuerierConfig with zero request
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
resources:
- name: "cpu"
request: "0"
expectedError: 'spec.thanosQuerierConfig.resources[0].request: Invalid value: "": request must be a positive, non-zero quantity'
- name: Should reject ThanosQuerierConfig with zero limit
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
resources:
- name: "cpu"
request: "100m"
limit: "0"
expectedError: 'spec.thanosQuerierConfig.resources[0].limit: Invalid value: "": limit must be a positive, non-zero quantity'
Comment on lines +1269 to +1289
Copy link
Copy Markdown

@coderabbitai coderabbitai bot Apr 7, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Zero-quantity assertions are inconsistent with the ContainerResource validation change.

Line 1269 and Line 1280 still expect “positive, non-zero quantity” errors, but this PR explicitly removes per-field positive-quantity CEL checks on request/limit.
Also, the Line 1280 case (request: "100m", limit: "0") should fail because limit < request, not because limit is non-positive.

Please update these tests to reflect the new behavior (accept zero where applicable, and keep inequality validation separate).

Suggested test adjustment
-    - name: Should reject ThanosQuerierConfig with zero request
+    - name: Should allow ThanosQuerierConfig with zero request
       initial: |
         apiVersion: config.openshift.io/v1alpha1
         kind: ClusterMonitoring
         spec:
           thanosQuerierConfig:
             resources:
               - name: "cpu"
                 request: "0"
-      expectedError: 'spec.thanosQuerierConfig.resources[0].request: Invalid value: "": request must be a positive, non-zero quantity'
+      expected: |
+        apiVersion: config.openshift.io/v1alpha1
+        kind: ClusterMonitoring
+        spec:
+          thanosQuerierConfig:
+            resources:
+              - name: "cpu"
+                request: "0"

-    - name: Should reject ThanosQuerierConfig with zero limit
+    - name: Should allow ThanosQuerierConfig with zero limit when request is omitted
       initial: |
         apiVersion: config.openshift.io/v1alpha1
         kind: ClusterMonitoring
         spec:
           thanosQuerierConfig:
             resources:
               - name: "cpu"
-                request: "100m"
                 limit: "0"
-      expectedError: 'spec.thanosQuerierConfig.resources[0].limit: Invalid value: "": limit must be a positive, non-zero quantity'
+      expected: |
+        apiVersion: config.openshift.io/v1alpha1
+        kind: ClusterMonitoring
+        spec:
+          thanosQuerierConfig:
+            resources:
+              - name: "cpu"
+                limit: "0"
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In
`@config/v1alpha1/tests/clustermonitorings.config.openshift.io/ClusterMonitoringConfig.yaml`
around lines 1269 - 1289, Update the two failing tests in
ClusterMonitoringConfig.yaml: for the case named "Should reject
ThanosQuerierConfig with zero request" stop expecting a "positive, non‑zero
quantity" error (remove the expectedError or mark the input as valid) because
per-field positive checks on request were removed; for "Should reject
ThanosQuerierConfig with zero limit" change the expectedError to assert the
inequality validation (i.e., that limit is less than request) instead of a
non‑positive error—update the expected error to reference
spec.thanosQuerierConfig.resources[0].limit and a message stating that limit
must be >= request.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

but this PR explicitly removes per-field positive-quantity CEL checks on request/limit.

Does it? Looks to me like there are still CEL expressions that enforce the parsed quantity value is greater than 0.

Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧩 Analysis chain

Let me verify what CEL validations actually exist on the thanosQuerierConfig resources in the CRD.


🏁 Script executed:

#!/bin/bash
# Check CEL validations for thanosQuerierConfig resources in the CRD
rg -A 30 "thanosQuerierConfig" payload-manifests/crds/0000_10_config-operator_01_clustermonitorings.crd.yaml | grep -A 10 "x-kubernetes-validations" | head -80

# Also check the types file directly
rg -B5 -A10 "positive.*non-zero\|non-zero.*positive\|quantity.*greater" config/v1alpha1/types_cluster_monitoring.go

# Check the CRD YAML for the request/limit validation rules within thanosQuerierConfig
python3 - <<'EOF'
import yaml, sys

with open("payload-manifests/crds/0000_10_config-operator_01_clustermonitorings.crd.yaml") as f:
    crd = yaml.safe_load(f)

spec_props = crd["spec"]["versions"][0]["schema"]["openAPIV3Schema"]["properties"]["spec"]["properties"]

if "thanosQuerierConfig" in spec_props:
    tqc = spec_props["thanosQuerierConfig"]
    resources = tqc.get("properties", {}).get("resources", {})
    items = resources.get("items", {})
    print("=== ThanosQuerierConfig resources items validations ===")
    print(yaml.dump(items, default_flow_style=False))
else:
    print("thanosQuerierConfig not found")
EOF

Length of output: 2356


@everettraven, you're absolutely right. The per-field positive-quantity CEL checks are still present in the CRD schema for thanosQuerierConfig.resources items:

x-kubernetes-validations:
- message: request must be a positive, non-zero quantity
  rule: quantity(self).isGreaterThan(quantity('0'))
x-kubernetes-validations:
- message: limit must be a positive, non-zero quantity
  rule: quantity(self).isGreaterThan(quantity('0'))

My original comment was incorrect — the tests at those lines are valid and should stay as-is. I apologize for the noise.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It is true that this would also trigger the limit < request validation to fail though.

This could be simplified to only specifying limit because there is a requirement is that at least one of limit and request is specified, meaning both aren't strictly required. I wouldn't block on that though. As long as we have a signal that the validation we want to fail is failing, this should be OK.

Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@everettraven, that's a fair point. The current test with request: "100m" and limit: "0" does exercise both validations simultaneously, whereas limit: "0" alone would more precisely isolate the positive-quantity check. Either way the signal is there. Since you're not blocking on it, the author can choose to simplify or leave as-is — both are correct.

- name: Should reject ThanosQuerierConfig with too many topologySpreadConstraints
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: "zone1"
whenUnsatisfiable: DoNotSchedule
- maxSkew: 1
topologyKey: "zone2"
whenUnsatisfiable: DoNotSchedule
- maxSkew: 1
topologyKey: "zone3"
whenUnsatisfiable: DoNotSchedule
- maxSkew: 1
topologyKey: "zone4"
whenUnsatisfiable: DoNotSchedule
- maxSkew: 1
topologyKey: "zone5"
whenUnsatisfiable: DoNotSchedule
- maxSkew: 1
topologyKey: "zone6"
whenUnsatisfiable: DoNotSchedule
- maxSkew: 1
topologyKey: "zone7"
whenUnsatisfiable: DoNotSchedule
- maxSkew: 1
topologyKey: "zone8"
whenUnsatisfiable: DoNotSchedule
- maxSkew: 1
topologyKey: "zone9"
whenUnsatisfiable: DoNotSchedule
- maxSkew: 1
topologyKey: "zone10"
whenUnsatisfiable: DoNotSchedule
- maxSkew: 1
topologyKey: "zone11"
whenUnsatisfiable: DoNotSchedule
expectedError: 'spec.thanosQuerierConfig.topologySpreadConstraints: Too many: 11: must have at most 10 items'
- name: Should reject ThanosQuerierConfig with empty topologySpreadConstraints array
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
topologySpreadConstraints: []
expectedError: 'spec.thanosQuerierConfig.topologySpreadConstraints: Invalid value: 0: spec.thanosQuerierConfig.topologySpreadConstraints in body should have at least 1 items'
- name: Should reject ThanosQuerierConfig with empty resources array
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
resources: []
expectedError: 'spec.thanosQuerierConfig.resources: Invalid value: 0: spec.thanosQuerierConfig.resources in body should have at least 1 items'
- name: Should reject ThanosQuerierConfig with empty nodeSelector
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
nodeSelector: {}
expectedError: 'spec.thanosQuerierConfig.nodeSelector: Invalid value: 0: spec.thanosQuerierConfig.nodeSelector in body should have at least 1 properties'
- name: Should reject ThanosQuerierConfig with more than 10 nodeSelector entries
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
nodeSelector:
key1: val1
key2: val2
key3: val3
key4: val4
key5: val5
key6: val6
key7: val7
key8: val8
key9: val9
key10: val10
key11: val11
expectedError: 'spec.thanosQuerierConfig.nodeSelector: Too many: 11: must have at most 10 items'
- name: Should reject ThanosQuerierConfig with empty tolerations array
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
tolerations: []
expectedError: 'spec.thanosQuerierConfig.tolerations: Invalid value: 0: spec.thanosQuerierConfig.tolerations in body should have at least 1 items'
- name: Should reject ThanosQuerierConfig with more than 10 tolerations
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
tolerations:
- key: "key1"
operator: "Exists"
effect: "NoSchedule"
- key: "key2"
operator: "Exists"
effect: "NoSchedule"
- key: "key3"
operator: "Exists"
effect: "NoSchedule"
- key: "key4"
operator: "Exists"
effect: "NoSchedule"
- key: "key5"
operator: "Exists"
effect: "NoSchedule"
- key: "key6"
operator: "Exists"
effect: "NoSchedule"
- key: "key7"
operator: "Exists"
effect: "NoSchedule"
- key: "key8"
operator: "Exists"
effect: "NoSchedule"
- key: "key9"
operator: "Exists"
effect: "NoSchedule"
- key: "key10"
operator: "Exists"
effect: "NoSchedule"
- key: "key11"
operator: "Exists"
effect: "NoSchedule"
expectedError: 'spec.thanosQuerierConfig.tolerations: Too many: 11: must have at most 10 items'
- name: Should reject ThanosQuerierConfig with duplicate topologySpreadConstraints
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
- maxSkew: 2
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
expectedError: "Duplicate value"
onUpdate:
- name: Should reject updating TelemeterClientConfig to empty object
initial: |
Expand Down Expand Up @@ -2089,3 +2429,18 @@ tests:
request: "100m"
- name: "memory"
request: "64Mi"
- name: Should reject updating ThanosQuerierConfig to empty object
initial: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig:
resources:
- name: "cpu"
request: "5m"
updated: |
apiVersion: config.openshift.io/v1alpha1
kind: ClusterMonitoring
spec:
thanosQuerierConfig: {}
expectedError: 'spec.thanosQuerierConfig: Invalid value: 0: spec.thanosQuerierConfig in body should have at least 1 properties'
Loading