diff --git a/AGENTS.md b/AGENTS.md index 139da527444..dcb08b36bd3 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -114,3 +114,7 @@ Import order: stdlib, third-party packages, internal Cortex packages (separated - Sign commits with DCO: `git commit -s -m "message"` - Run `make doc` if config/flags changed - Include CHANGELOG entry for user-facing changes + +## Related Policies + +This file (`AGENTS.md`) provides technical guidance **to** AI coding agents working in this repository (build commands, architecture, conventions). For the policy governing **human use** of AI tools when preparing contributions, see [GENAI_POLICY.md](GENAI_POLICY.md). diff --git a/CHANGELOG.md b/CHANGELOG.md index 44ced0e184e..81affa07f81 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,14 +1,22 @@ # Changelog ## master / unreleased +* [FEATURE] Distributor: Add experimental `-distributor.enable-start-timestamp` flag for Prometheus Remote Write 2.0. When enabled, `StartTimestamp (ST)` is ingested. #7371 +* [FEATURE] Memberlist: Add `-memberlist.cluster-label` and `-memberlist.cluster-label-verification-disabled` to prevent accidental cross-cluster gossip joins and support rolling label rollout. #7385 +* [ENHANCEMENT] Distributor: Introduce dynamic `Symbols` slice capacity pooling. #7398 #7401 * [ENHANCEMENT] Metrics Helper: Add native histogram support for aggregating and merging, including dual-format histogram handling that exposes both native and classic bucket formats. #7359 * [ENHANCEMENT] Cache: Add per-tenant TTL configuration for query results cache to control cache expiration on a per-tenant basis with separate TTLs for regular and out-of-order data. #7357 +* [CHANGE] Querier: Make query time range configurations per-tenant: `query_ingesters_within`, `query_store_after`, and `shuffle_sharding_ingesters_lookback_period`. Uses `model.Duration` instead of `time.Duration` to support serialization but has minimum unit of 1ms (nanoseconds/microseconds not supported). #7160 * [ENHANCEMENT] Tenant Federation: Add a local cache to regex resolver. #7363 * [ENHANCEMENT] Query Scheduler: Add `cortex_query_scheduler_tracked_requests` metric to track the current number of requests held by the scheduler. #7355 +* [ENHANCEMENT] Distributor: Optimize memory allocations by reusing the existing capacity of these pooled slices in the Prometheus Remote Write 2.0 path. #7392 * [BUGFIX] Alertmanager: Fix disappearing user config and state when ring is temporarily unreachable. #7372 * [BUGFIX] Fix nil when ingester_query_max_attempts > 1. #7369 * [BUGFIX] Querier: Fix queryWithRetry and labelsWithRetry returning (nil, nil) on cancelled context by propagating ctx.Err(). #7370 * [BUGFIX] Metrics Helper: Fix non-deterministic bucket order in merged histograms by sorting buckets after map iteration, matching Prometheus client library behavior. #7380 +* [BUGFIX] Fix memory leak in `ReuseWriteRequestV2` by explicitly clearing the `Symbols` backing array string pointers before returning the object to `sync.Pool`. #7373 +* [BUGFIX] Distributor: Return HTTP 401 Unauthorized when tenant ID resolution fails in the Prometheus Remote Write 2.0 path. #7389 +* [BUGFIX] KV store: Fix false-positive `status_code="500"` metrics for HA tracker CAS operations when using memberlist. #7408 ## 1.21.0 in progress diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1186e1ce336..476ffc52b1c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,3 +1,5 @@ # Contributing to Cortex See [https://cortexmetrics.io/docs/contributing/](https://cortexmetrics.io/docs/contributing/). + +If using generative AI tools, please also review our [Generative AI Contribution Policy](GENAI_POLICY.md). diff --git a/GENAI_POLICY.md b/GENAI_POLICY.md new file mode 100644 index 00000000000..45bdc54e75e --- /dev/null +++ b/GENAI_POLICY.md @@ -0,0 +1,62 @@ +# Generative AI Contribution Policy + +## Purpose + +The Cortex project welcomes contributions that make use of generative AI (GenAI) tools. AI assistants can help contributors write code, explore the codebase, draft documentation, and improve productivity. However, **humans bear full responsibility** for every contribution they submit, regardless of how it was produced. + +This policy applies to all repositories under the [cortexproject](https://github.com/cortexproject) GitHub organisation. + +## Permitted Use of AI Tools + +The following uses of AI tools are encouraged and permitted: + +- **Coding assistants** - Using tools like GitHub Copilot, Claude Code, Cursor, or similar to help write, refactor, or debug code. +- **Codebase exploration** - Querying AI tools to understand project architecture, locate relevant code, or learn conventions. +- **Documentation drafting** - Generating initial drafts of documentation, comments, or commit messages. +- **PR review assistance** - Using AI to help review code, identify potential issues, or suggest improvements. +- **Maintainer-configured review bots** - Automated review bots configured by project maintainers. + +## Contributor Responsibilities + +When using AI tools to assist with contributions, you must: + +1. **Understand every line you submit.** You must be able to independently explain any change in your contribution. "The AI wrote it" is not an acceptable justification during review. + +2. **Review and validate AI output.** Never submit AI-generated content verbatim without careful review. Verify correctness, check for hallucinated APIs or dependencies, and ensure the output follows Cortex conventions. + +3. **Disclose significant AI usage.** If AI generated the bulk of a contribution (e.g., an entire new feature, large refactors, or substantial documentation), note this in the PR description. Minor assistance (autocomplete, small suggestions) does not require disclosure. + +4. **Honour the DCO.** Your `Signed-off-by` line on each commit certifies the [Developer Certificate of Origin](https://developercertificate.org/) for **all** content in that commit, including any AI-generated portions. You are attesting that you have the right to submit the work. + +5. **Meet the same quality bar.** AI-assisted contributions are held to the same standards as any other contribution: tests, documentation, CHANGELOG entries, passing CI, and adherence to the project's [design patterns and conventions](docs/contributing/design-patterns-and-conventions.md). + +## GitHub Communications + +- **Issues, pull request reviews, and discussions** must be substantively human-authored. Do not submit bulk AI-generated comments, reviews, or issue reports. +- Sharing AI-generated analyses (e.g., "I asked an AI to summarise the failure modes and here is what it found") is acceptable when clearly attributed and verified by the contributor. +- Do not use AI tools to generate large volumes of low-quality issues or review comments. + +## Maintainer Authority + +Maintainers may: + +- **Request disclosure** of AI tool usage for any contribution. +- **Close or request revision** of PRs or issues that appear to contain unreviewed AI-generated content. +- **Escalate persistent low-effort submissions** through the project's normal [Code of Conduct](code-of-conduct.md) enforcement process. + +## Relationship to Other Policies + +| Document | Purpose | +|----------|---------| +| [Contributing Guide](CONTRIBUTING.md) | General contribution workflow and requirements | +| [Code of Conduct](code-of-conduct.md) | Community behaviour standards | +| [Governance](GOVERNANCE.md) | Project governance and decision-making | +| [AGENTS.md](AGENTS.md) | Technical guidance **to** AI coding agents working in this repo | + +**AGENTS.md vs GENAI_POLICY.md:** `AGENTS.md` provides instructions that AI coding agents consume when working with the codebase (build commands, architecture, conventions). This document (`GENAI_POLICY.md`) governs how **human contributors** use AI tools when preparing their contributions. + +## References + +- [OpenTelemetry GenAI Contribution Policy](https://github.com/open-telemetry/community/blob/main/policies/genai.md) +- [Linux Foundation AI Guidelines](https://www.linuxfoundation.org/legal/generative-ai) +- [Developer Certificate of Origin](https://developercertificate.org/) diff --git a/code-of-conduct.md b/code-of-conduct.md index fb32cfa04ee..53ea5476196 100644 --- a/code-of-conduct.md +++ b/code-of-conduct.md @@ -1,3 +1,3 @@ ## Cortex Community Code of Conduct -Cortex follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). +Cortex follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). diff --git a/docs/blocks-storage/querier.md b/docs/blocks-storage/querier.md index b1aa703feed..eade17576f6 100644 --- a/docs/blocks-storage/querier.md +++ b/docs/blocks-storage/querier.md @@ -117,11 +117,6 @@ querier: # CLI flag: -querier.max-samples [max_samples: | default = 50000000] - # Maximum lookback beyond which queries are not sent to ingester. 0 means all - # queries are sent to ingester. - # CLI flag: -querier.query-ingesters-within - [query_ingesters_within: | default = 0s] - # Enable returning samples stats per steps in query response. # CLI flag: -querier.per-step-stats-enabled [per_step_stats_enabled: | default = false] @@ -131,14 +126,6 @@ querier: # CLI flag: -querier.response-compression [response_compression: | default = "gzip"] - # The time after which a metric should be queried from storage and not just - # ingesters. 0 means all queries are sent to store. When running the blocks - # storage, if this option is enabled, the time range of the query sent to the - # store will be manipulated to ensure the query end is not more recent than - # 'now - query-store-after'. - # CLI flag: -querier.query-store-after - [query_store_after: | default = 0s] - # Maximum duration into the future you can query. 0 to disable. # CLI flag: -querier.max-query-into-future [max_query_into_future: | default = 10m] @@ -247,16 +234,6 @@ querier: # CLI flag: -querier.ingester-query-max-attempts [ingester_query_max_attempts: | default = 1] - # When distributor's sharding strategy is shuffle-sharding and this setting is - # > 0, queriers fetch in-memory series from the minimum set of required - # ingesters, selecting only ingesters which may have received series since - # 'now - lookback period'. The lookback period should be greater or equal than - # the configured 'query store after' and 'query ingesters within'. If this - # setting is 0, queriers always query all ingesters (ingesters shuffle - # sharding on read path is disabled). - # CLI flag: -querier.shuffle-sharding-ingesters-lookback-period - [shuffle_sharding_ingesters_lookback_period: | default = 0s] - thanos_engine: # Experimental. Use Thanos promql engine # https://github.com/thanos-io/promql-engine rather than the Prometheus diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 7c1cd7265df..4a083422adc 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -4111,6 +4111,12 @@ The `limits_config` configures default and per-tenant limits imposed by Cortex s # CLI flag: -distributor.enable-type-and-unit-labels [enable_type_and_unit_labels: | default = false] +# EXPERIMENTAL: If true, StartTimestampMs (ST) is handled for remote write v2 +# samples and histograms. CreatedTimestamp (CT) is used as a fallback when ST is +# not set. +# CLI flag: -distributor.enable-start-timestamp +[enable_start_timestamp: | default = false] + # The maximum number of active series per user, per ingester. 0 to disable. # CLI flag: -ingester.max-series-per-user [max_series_per_user: | default = 5000000] @@ -4286,6 +4292,25 @@ The `limits_config` configures default and per-tenant limits imposed by Cortex s # zones are not available. [query_partial_data: | default = false] +# Maximum lookback duration for querying data from ingesters. Queries for data +# older than this will only query the long-term storage. This is a per-tenant +# limit that can be overridden in the runtime configuration. Should be less than +# or equal to close-idle-tsdb-timeout. +# CLI flag: -limits.query-ingesters-within +[query_ingesters_within: | default = 0s] + +# Minimum age of data before querying the long-term storage. Queries for data +# younger than this will only query ingesters. This is a per-tenant limit that +# can be overridden in the runtime configuration. +# CLI flag: -limits.query-store-after +[query_store_after: | default = 0s] + +# Lookback period for shuffle sharding of ingesters. This is a per-tenant limit +# that can be overridden in the runtime configuration. Should be greater than or +# equal to query-ingesters-within. +# CLI flag: -limits.shuffle-sharding-ingesters-lookback-period +[shuffle_sharding_ingesters_lookback_period: | default = 0s] + # The maximum number of rows that can be fetched when querying parquet storage. # Each row maps to a series in a parquet file. This limit applies before # materializing chunks. 0 to disable. @@ -4564,6 +4589,20 @@ The `memberlist_config` configures the Gossip memberlist. # CLI flag: -memberlist.advertise-port [advertise_port: | default = 7946] +# The cluster label is an optional string to include in outbound packets and +# gossip streams. Other members in the memberlist cluster will discard any +# message whose label doesn't match the configured one, unless the +# 'cluster-label-verification-disabled' configuration option is set to true. +# CLI flag: -memberlist.cluster-label +[cluster_label: | default = ""] + +# When true, memberlist doesn't verify that inbound packets and gossip streams +# have the cluster label matching the configured one. This verification should +# be disabled while rolling out the change to the configured cluster label in a +# live memberlist cluster. +# CLI flag: -memberlist.cluster-label-verification-disabled +[cluster_label_verification_disabled: | default = false] + # Other cluster members to join. Can be specified multiple times. It can be an # IP, hostname or an entry specified in the DNS Service Discovery format. # CLI flag: -memberlist.join @@ -4755,11 +4794,6 @@ The `querier_config` configures the Cortex querier. # CLI flag: -querier.max-samples [max_samples: | default = 50000000] -# Maximum lookback beyond which queries are not sent to ingester. 0 means all -# queries are sent to ingester. -# CLI flag: -querier.query-ingesters-within -[query_ingesters_within: | default = 0s] - # Enable returning samples stats per steps in query response. # CLI flag: -querier.per-step-stats-enabled [per_step_stats_enabled: | default = false] @@ -4769,14 +4803,6 @@ The `querier_config` configures the Cortex querier. # CLI flag: -querier.response-compression [response_compression: | default = "gzip"] -# The time after which a metric should be queried from storage and not just -# ingesters. 0 means all queries are sent to store. When running the blocks -# storage, if this option is enabled, the time range of the query sent to the -# store will be manipulated to ensure the query end is not more recent than 'now -# - query-store-after'. -# CLI flag: -querier.query-store-after -[query_store_after: | default = 0s] - # Maximum duration into the future you can query. 0 to disable. # CLI flag: -querier.max-query-into-future [max_query_into_future: | default = 10m] @@ -4885,16 +4911,6 @@ store_gateway_client: # CLI flag: -querier.ingester-query-max-attempts [ingester_query_max_attempts: | default = 1] -# When distributor's sharding strategy is shuffle-sharding and this setting is > -# 0, queriers fetch in-memory series from the minimum set of required ingesters, -# selecting only ingesters which may have received series since 'now - lookback -# period'. The lookback period should be greater or equal than the configured -# 'query store after' and 'query ingesters within'. If this setting is 0, -# queriers always query all ingesters (ingesters shuffle sharding on read path -# is disabled). -# CLI flag: -querier.shuffle-sharding-ingesters-lookback-period -[shuffle_sharding_ingesters_lookback_period: | default = 0s] - thanos_engine: # Experimental. Use Thanos promql engine # https://github.com/thanos-io/promql-engine rather than the Prometheus promql diff --git a/docs/configuration/single-process-config-blocks-gossip-1.yaml b/docs/configuration/single-process-config-blocks-gossip-1.yaml index 11f147e8ff6..ad3770974c7 100644 --- a/docs/configuration/single-process-config-blocks-gossip-1.yaml +++ b/docs/configuration/single-process-config-blocks-gossip-1.yaml @@ -57,6 +57,7 @@ ingester: memberlist: bind_port: 7946 + cluster_label: gossip-demo join_members: - localhost:7947 abort_if_cluster_join_fails: false diff --git a/docs/configuration/single-process-config-blocks-gossip-2.yaml b/docs/configuration/single-process-config-blocks-gossip-2.yaml index db1683afc3b..35840ff7b31 100644 --- a/docs/configuration/single-process-config-blocks-gossip-2.yaml +++ b/docs/configuration/single-process-config-blocks-gossip-2.yaml @@ -56,6 +56,7 @@ ingester: memberlist: bind_port: 7947 + cluster_label: gossip-demo join_members: - localhost:7946 abort_if_cluster_join_fails: false diff --git a/docs/configuration/v1-guarantees.md b/docs/configuration/v1-guarantees.md index 4bb632023e5..be3ee78ce59 100644 --- a/docs/configuration/v1-guarantees.md +++ b/docs/configuration/v1-guarantees.md @@ -115,6 +115,7 @@ Currently experimental features are: - Distributor/Ingester: Stream push connection - Enable stream push connection between distributor and ingester by setting `-distributor.use-stream-push=true` on Distributor. - Add `__type__` and `__unit__` labels to OTLP and remote write v2 requests (`-distributor.enable-type-and-unit-labels`) + - Handle StartTimestampMs (ST) for remote write v2 samples and histograms, using CreatedTimestamp (CT) as a fallback when ST is not set (`-distributor.enable-start-timestamp`) - Ingester: Series Queried Metric - Enable on Ingester via `-ingester.active-queried-series-metrics-enabled=true` - Set the time window to expose via metrics using `-ingester.active-queried-series-metrics-windows=2h`. At least 1 time window is required to expose the metric. diff --git a/docs/contributing/_index.md b/docs/contributing/_index.md index 1f7b7bb8f5e..ae7cd41f015 100644 --- a/docs/contributing/_index.md +++ b/docs/contributing/_index.md @@ -21,6 +21,13 @@ a piece of work is finished it should: * Include a CHANGELOG message if users of Cortex need to hear about what you did. * If you have made any changes to flags or config, run `make doc` and commit the changed files to update the config file documentation. +## Use of AI Tools + +Cortex permits the use of generative AI tools to assist with contributions. Contributors remain +fully responsible for all submitted content. If AI generated the bulk of a contribution, please +disclose this in the PR description. See the full `GENAI_POLICY.md` +for details. + ## Formatting Cortex projects uses `goimports` tool (`go get golang.org/x/tools/cmd/goimports` to install) to format the Go files, and sort imports. We use goimports with `-local github.com/cortexproject/cortex` parameter, to put Cortex internal imports into a separate group. We try to keep imports sorted into three groups: imports from standard library, imports of 3rd party packages and internal Cortex imports. Goimports will fix the order, but will keep existing newlines between imports in the groups. We try to avoid extra newlines like that. diff --git a/docs/guides/gossip-ring-getting-started.md b/docs/guides/gossip-ring-getting-started.md index 13cf2c493bd..0ca7bd6650b 100644 --- a/docs/guides/gossip-ring-getting-started.md +++ b/docs/guides/gossip-ring-getting-started.md @@ -50,6 +50,7 @@ memberlist: # defaults to hostname node_name: "Ingester 1" bind_port: 7946 + cluster_label: "gossip-demo" join_members: - localhost:7947 abort_if_cluster_join_fails: false @@ -127,9 +128,10 @@ We don't need to change or add `memberlist.join_members` list. This new instance will discover other peers through it. When using Kubernetes, the suggested setup is to have a headless service pointing to all pods that want to be part of the gossip cluster, and then point `join_members` to this headless service. +In production, set `memberlist.cluster_label` to the same value on every Cortex process that should share the same gossip cluster. This helps avoid accidentally merging rings with other Cortex, Mimir, or Loki deployments that can reach the same seed addresses. + We also don't need to change `/tmp/cortex/storage` directory in the `blocks_storage.filesystem.dir` field. This is the directory where all ingesters will "upload" finished blocks. This can also be an S3 or GCP storage, but for simplicity, we use the local filesystem in this example. After these changes, we can start another Cortex instance using the modified configuration file. This instance will join the ring and will start receiving samples after it enters the ACTIVE state. - diff --git a/docs/guides/migration-kv-store-to-memberlist.md b/docs/guides/migration-kv-store-to-memberlist.md index 8ebbcb0ac2e..c01eaebffc9 100644 --- a/docs/guides/migration-kv-store-to-memberlist.md +++ b/docs/guides/migration-kv-store-to-memberlist.md @@ -29,8 +29,10 @@ Update your configuration file and deploy the changes: ring: store: memberlist memberlist: - abort_if_join_fails: false + abort_if_cluster_join_fails: false bind_port: + cluster_label: + cluster_label_verification_disabled: true join_members: - gossip-ring..svc.cluster.local: ... @@ -54,6 +56,19 @@ ingester: > The Memberlist gossip protocol requires a bit of time to propagate the state across the cluster. Setting a 60-second delay ensures that the ingester has enough time to fully sync the existing ring topology from other peers before actively joining and receiving traffic. > > **Note:** Make sure to apply this multi KV store configuration to all other components that interact with the ring (e.g. distributors, store-gateways), not just the ingesters. +> +> **Note:** If multiple Cortex, Mimir, or Loki clusters could reach the same gossip seed addresses, configure a shared `memberlist.cluster_label` for your Cortex cluster. +> +> **For a fresh Memberlist rollout:** +> 1. Deploy with `memberlist.cluster_label` set and `memberlist.cluster_label_verification_disabled: true`. +> 2. Once every memberlist-enabled process is running with the same label, set `memberlist.cluster_label_verification_disabled: false`. +> +> **For an existing unlabeled Memberlist cluster:** +> 1. Roll out `memberlist.cluster_label_verification_disabled: true` everywhere (leave `memberlist.cluster_label` empty). +> 2. Roll out the shared `memberlist.cluster_label` value to all processes. +> 3. Set `memberlist.cluster_label_verification_disabled: false`. +> +> This isolates Memberlist traffic only; it does not isolate Consul or Etcd prefixes. Once deployed, Cortex will begin mirroring primary (Consul) data to Memberlist. @@ -87,4 +102,6 @@ ingester: ``` > **Note:** Again, ensure this update is applied across all components. -After the updated configuration is fully deployed across your cluster and everything is running stably, you can remove your Consul cluster. \ No newline at end of file +After the updated configuration is fully deployed across your cluster and everything is running stably, you can remove your Consul cluster. + +If you enabled `memberlist.cluster_label_verification_disabled: true` during the migration, finish the rollout by setting it back to `false` once every memberlist-enabled Cortex process is using the same `memberlist.cluster_label`. diff --git a/flaky-tests/audit-log.md b/flaky-tests/audit-log.md new file mode 100644 index 00000000000..17b9c840886 --- /dev/null +++ b/flaky-tests/audit-log.md @@ -0,0 +1,6 @@ +# Flaky Test Audit Log + +This file tracks every CI run on the `flaky-test-audit` branch. Any test failure on this branch is a flaky test since no test logic has been modified from `master`. + +| Timestamp | Result | Details | CI Job | +|-----------|--------|---------|--------| diff --git a/go.mod b/go.mod index 1dcad128579..c89288d8ffc 100644 --- a/go.mod +++ b/go.mod @@ -51,19 +51,19 @@ require ( github.com/stretchr/testify v1.11.1 github.com/thanos-io/objstore v0.0.0-20250804093838-71d60dfee488 github.com/thanos-io/promql-engine v0.0.0-20260119085929-dd5223783674 - github.com/thanos-io/thanos v0.40.1-0.20260204190131-802f43f3bc64 + github.com/thanos-io/thanos v0.41.0 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5 go.etcd.io/etcd/api/v3 v3.5.17 go.etcd.io/etcd/client/pkg/v3 v3.5.17 go.etcd.io/etcd/client/v3 v3.5.17 go.opentelemetry.io/contrib/propagators/aws v1.36.0 - go.opentelemetry.io/otel v1.40.0 + go.opentelemetry.io/otel v1.43.0 go.opentelemetry.io/otel/bridge/opentracing v1.36.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 - go.opentelemetry.io/otel/sdk v1.40.0 - go.opentelemetry.io/otel/trace v1.40.0 + go.opentelemetry.io/otel/sdk v1.43.0 + go.opentelemetry.io/otel/trace v1.43.0 go.uber.org/atomic v1.11.0 golang.org/x/net v0.49.0 golang.org/x/sync v0.19.0 @@ -282,8 +282,8 @@ require ( go.opentelemetry.io/contrib/propagators/jaeger v1.36.0 // indirect go.opentelemetry.io/contrib/propagators/ot v1.36.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0 // indirect - go.opentelemetry.io/otel/metric v1.40.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.43.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.uber.org/goleak v1.3.0 // indirect go.uber.org/multierr v1.11.0 // indirect @@ -296,14 +296,14 @@ require ( golang.org/x/exp v0.0.0-20250808145144-a408d31f581a // indirect golang.org/x/mod v0.32.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect - golang.org/x/sys v0.40.0 // indirect + golang.org/x/sys v0.42.0 // indirect golang.org/x/text v0.33.0 // indirect golang.org/x/tools v0.41.0 // indirect gonum.org/v1/gonum v0.16.0 // indirect google.golang.org/api v0.252.0 // indirect google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3 // indirect gopkg.in/telebot.v3 v3.3.8 // indirect k8s.io/apimachinery v0.34.1 // indirect k8s.io/client-go v0.34.1 // indirect diff --git a/go.sum b/go.sum index 01f0c92b538..b082228526c 100644 --- a/go.sum +++ b/go.sum @@ -1791,8 +1791,8 @@ github.com/thanos-io/objstore v0.0.0-20250804093838-71d60dfee488 h1:khBsQLLRoF1K github.com/thanos-io/objstore v0.0.0-20250804093838-71d60dfee488/go.mod h1:uDHLkMKOGDAnlN75EAz8VrRzob1+VbgYSuUleatWuF0= github.com/thanos-io/promql-engine v0.0.0-20260119085929-dd5223783674 h1:C5yBEuIZCaeLh90lcUGfnGepmwDfGGYLu6+w7RxR7og= github.com/thanos-io/promql-engine v0.0.0-20260119085929-dd5223783674/go.mod h1:uzn40oZHPXvfdP498h+MiRL2fN7RF519gNaV3LyhChc= -github.com/thanos-io/thanos v0.40.1-0.20260204190131-802f43f3bc64 h1:9gQnp3k3te0BIbLFA/diuOgoJMQ2vhvTo8hzrPMcfiI= -github.com/thanos-io/thanos v0.40.1-0.20260204190131-802f43f3bc64/go.mod h1:ZgknkTTliTvUeZy4lqVYrRp2l5TxUGLWcaonGyUbynw= +github.com/thanos-io/thanos v0.41.0 h1:GDPGynjHBa8ORAX7DfluBFjHbMeY1BzjLTGdviFvo7Q= +github.com/thanos-io/thanos v0.41.0/go.mod h1:ppdHafpAT8WAbcwgLiNU4jNtNe17Ct3xX9dXq+h6g2k= github.com/tinylib/msgp v1.6.1 h1:ESRv8eL3u+DNHUoSAAQRE50Hm162zqAnBoGv9PzScPY= github.com/tinylib/msgp v1.6.1/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA= github.com/tjhop/slog-gokit v0.1.4 h1:uj/vbDt3HaF0Py8bHPV4ti/s0utnO0miRbO277FLBKM= @@ -1904,8 +1904,8 @@ go.opentelemetry.io/contrib/propagators/ot v1.36.0 h1:UBoZjbx483GslNKYK2YpfvePTJ go.opentelemetry.io/contrib/propagators/ot v1.36.0/go.mod h1:adDDRry19/n9WoA7mSCMjoVJcmzK/bZYzX9SR+g2+W4= go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= -go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= -go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= +go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= go.opentelemetry.io/otel/bridge/opentracing v1.36.0 h1:GWGmcYhMCu6+K/Yz5KWSETU/esd/mkVGx+77uKtLjpk= go.opentelemetry.io/otel/bridge/opentracing v1.36.0/go.mod h1:bW7xTHgtWSNqY8QjhqXzloXBkw3iQIa8uBqCF/0EUbc= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0= @@ -1918,18 +1918,18 @@ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKI go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= -go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM= +go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY= go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= -go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg= +go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg= go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= -go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw= +go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A= go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= -go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= -go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A= +go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -2301,8 +2301,8 @@ golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= -golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= +golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2696,8 +2696,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4= google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3 h1:C4WAdL+FbjnGlpp2S+HMVhBeCq2Lcib4xZqfPNF6OoQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.71.2 h1:KnzCueW4s+8ojAPZ+NnyZAELjsIMJGteKjKejieEC7M= google.golang.org/grpc v1.71.2/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= diff --git a/integration/distributor_mixed_ha_samples_test.go b/integration/distributor_mixed_ha_samples_test.go index 7e66f6045f2..58c65572548 100644 --- a/integration/distributor_mixed_ha_samples_test.go +++ b/integration/distributor_mixed_ha_samples_test.go @@ -41,7 +41,7 @@ func TestDistriubtorAcceptMixedHASamplesRunningInMicroservicesMode(t *testing.T) "-distributor.ha-tracker.etcd.endpoints": "etcd:2379", } querierFlags := mergeFlags(BlocksStorageFlags(), map[string]string{ - "-querier.query-store-after": (1 * time.Hour).String(), + "-limits.query-store-after": (1 * time.Hour).String(), }) flags := mergeFlags(BlocksStorageFlags(), map[string]string{ "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), diff --git a/integration/ingester_sharding_test.go b/integration/ingester_sharding_test.go index 0a7571b94a6..90b7fe7f999 100644 --- a/integration/ingester_sharding_test.go +++ b/integration/ingester_sharding_test.go @@ -64,7 +64,7 @@ func TestIngesterSharding(t *testing.T) { if testData.shardingStrategy == "shuffle-sharding" { // Enable shuffle sharding on read path but not lookback, otherwise all ingesters would be // queried being just registered. - flags["-querier.shuffle-sharding-ingesters-lookback-period"] = "1ns" + flags["-limits.shuffle-sharding-ingesters-lookback-period"] = "1ms" } // Start dependencies. diff --git a/integration/integration_memberlist_single_binary_test.go b/integration/integration_memberlist_single_binary_test.go index 14a720d9a93..acd886db441 100644 --- a/integration/integration_memberlist_single_binary_test.go +++ b/integration/integration_memberlist_single_binary_test.go @@ -39,6 +39,96 @@ func TestSingleBinaryWithMemberlist(t *testing.T) { }) } +func TestSingleBinaryWithMemberlistClusterLabelIsolation(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + minio := e2edb.NewMinio(9000, bucketName) + require.NoError(t, s.StartAndWaitReady(minio)) + + clusterA1 := newSingleBinary("cluster-a-1", "", "", map[string]string{ + "-memberlist.cluster-label": "cluster-a", + "-memberlist.abort-if-join-fails": "false", + }) + clusterA2 := newSingleBinary("cluster-a-2", "", networkName+"-cluster-a-1:8000", map[string]string{ + "-memberlist.cluster-label": "cluster-a", + "-memberlist.abort-if-join-fails": "false", + }) + clusterB1 := newSingleBinary("cluster-b-1", "", networkName+"-cluster-a-1:8000", map[string]string{ + "-memberlist.cluster-label": "cluster-b", + "-memberlist.abort-if-join-fails": "false", + }) + clusterB2 := newSingleBinary("cluster-b-2", "", networkName+"-cluster-b-1:8000", map[string]string{ + "-memberlist.cluster-label": "cluster-b", + "-memberlist.abort-if-join-fails": "false", + }) + + require.NoError(t, s.StartAndWaitReady(clusterA1)) + require.NoError(t, s.StartAndWaitReady(clusterB1)) + require.NoError(t, s.StartAndWaitReady(clusterA2, clusterB2)) + + requireMemberlistClusterState(t, 2, 2*512, clusterA1, clusterA2) + requireMemberlistClusterState(t, 2, 2*512, clusterB1, clusterB2) + + // Verify cross-cluster isolation: sleep for an observation window to confirm + // member counts remain stable and no cross-cluster leakage occurs over time. + // A fixed sleep is intentional here — we are asserting that nothing changes, + // which cannot be verified with polling. + time.Sleep(5 * time.Second) + requireMemberlistClusterState(t, 2, 2*512, clusterA1, clusterA2) + requireMemberlistClusterState(t, 2, 2*512, clusterB1, clusterB2) +} + +func TestSingleBinaryWithMemberlistClusterLabelRollingMigration(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + minio := e2edb.NewMinio(9000, bucketName) + require.NoError(t, s.StartAndWaitReady(minio)) + + const clusterLabel = "migration-cluster" + + configs := []struct { + name string + join string + }{ + {name: "migration-cortex-1", join: networkName + "-migration-cortex-2:8000"}, + {name: "migration-cortex-2", join: networkName + "-migration-cortex-1:8000"}, + {name: "migration-cortex-3", join: networkName + "-migration-cortex-1:8000"}, + } + + cortexServices := make([]*e2ecortex.CortexService, 0, len(configs)) + for _, cfg := range configs { + cortexServices = append(cortexServices, newMigrationSingleBinary(cfg.name, cfg.join, "", true)) + } + + require.NoError(t, s.StartAndWaitReady(cortexServices[0])) + require.NoError(t, s.StartAndWaitReady(cortexServices[1], cortexServices[2])) + requireMemberlistClusterState(t, 3, 3*512, cortexServices...) + + for i, cfg := range configs { + replacement := newMigrationSingleBinary(cfg.name, cfg.join, clusterLabel, true) + require.NoError(t, s.Stop(cortexServices[i])) + require.NoError(t, s.StartAndWaitReady(replacement)) + cortexServices[i] = replacement + requireMemberlistClusterState(t, 3, 3*512, cortexServices...) + } + + for i, cfg := range configs { + replacement := newMigrationSingleBinary(cfg.name, cfg.join, clusterLabel, false) + require.NoError(t, s.Stop(cortexServices[i])) + require.NoError(t, s.StartAndWaitReady(replacement)) + cortexServices[i] = replacement + requireMemberlistClusterState(t, 3, 3*512, cortexServices...) + } +} + func testSingleBinaryEnv(t *testing.T, tlsEnabled bool, flags map[string]string) { s, err := e2e.NewScenario(networkName) require.NoError(t, err) @@ -162,6 +252,28 @@ func newSingleBinary(name string, servername string, join string, testFlags map[ return serv } +func newMigrationSingleBinary(name string, join string, clusterLabel string, verificationDisabled bool) *e2ecortex.CortexService { + flags := map[string]string{ + "-memberlist.abort-if-join-fails": "false", + "-memberlist.cluster-label-verification-disabled": fmt.Sprintf("%t", verificationDisabled), + } + + if clusterLabel != "" { + flags["-memberlist.cluster-label"] = clusterLabel + } + + return newSingleBinary(name, "", join, flags) +} + +func requireMemberlistClusterState(t *testing.T, expectedMembers, expectedTokens int, services ...*e2ecortex.CortexService) { + t.Helper() + + for _, service := range services { + require.NoError(t, service.WaitSumMetrics(e2e.Equals(float64(expectedMembers)), "memberlist_client_cluster_members_count")) + require.NoError(t, service.WaitSumMetrics(e2e.Equals(float64(expectedTokens)), "cortex_ring_tokens_total")) + } +} + func TestSingleBinaryWithMemberlistScaling(t *testing.T) { s, err := e2e.NewScenario(networkName) require.NoError(t, err) diff --git a/integration/parquet_querier_test.go b/integration/parquet_querier_test.go index c0a70ae04f7..2c3d8b9256b 100644 --- a/integration/parquet_querier_test.go +++ b/integration/parquet_querier_test.go @@ -225,7 +225,7 @@ func TestParquetProjectionPushdownFuzz(t *testing.T) { "-querier.honor-projection-hints": "true", // Honor projection hints // Set query-ingesters-within to 2h so queries older than 2h don't hit ingesters // Since test queries are 24-48h old, they won't query ingesters and projection will be enabled - "-querier.query-ingesters-within": "2h", + "-limits.query-ingesters-within": "2h", // Enable cache for parquet labels and chunks "-blocks-storage.bucket-store.parquet-labels-cache.backend": "inmemory,memcached", "-blocks-storage.bucket-store.parquet-labels-cache.memcached.addresses": "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort), diff --git a/integration/querier_sharding_test.go b/integration/querier_sharding_test.go index 31b688fa993..a1ece181188 100644 --- a/integration/querier_sharding_test.go +++ b/integration/querier_sharding_test.go @@ -68,7 +68,7 @@ func runQuerierShardingTest(t *testing.T, cfg querierShardingTestConfig) { flags := mergeFlags(BlocksStorageFlags(), map[string]string{ "-querier.cache-results": "true", "-querier.split-queries-by-interval": "24h", - "-querier.query-ingesters-within": "12h", // Required by the test on query /series out of ingesters time range + "-limits.query-ingesters-within": "12h", // Required by the test on query /series out of ingesters time range "-frontend.memcached.addresses": "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort), "-frontend.max-outstanding-requests-per-tenant": strconv.Itoa(numQueries), // To avoid getting errors. }) diff --git a/integration/querier_tenant_federation_test.go b/integration/querier_tenant_federation_test.go index 0ee5024be8e..46a33933ee2 100644 --- a/integration/querier_tenant_federation_test.go +++ b/integration/querier_tenant_federation_test.go @@ -72,7 +72,7 @@ func TestRegexResolver_NewlyCreatedTenant(t *testing.T) { flags := mergeFlags(BlocksStorageFlags(), map[string]string{ "-querier.cache-results": "true", "-querier.split-queries-by-interval": "24h", - "-querier.query-ingesters-within": "12h", // Required by the test on query /series out of ingesters time range + "-limits.query-ingesters-within": "12h", // Required by the test on query /series out of ingesters time range "-tenant-federation.enabled": "true", "-tenant-federation.regex-matcher-enabled": "true", @@ -141,7 +141,7 @@ func runQuerierTenantFederationTest_UseRegexResolver(t *testing.T, cfg querierTe flags := mergeFlags(BlocksStorageFlags(), map[string]string{ "-querier.cache-results": "true", "-querier.split-queries-by-interval": "24h", - "-querier.query-ingesters-within": "12h", // Required by the test on query /series out of ingesters time range + "-limits.query-ingesters-within": "12h", // Required by the test on query /series out of ingesters time range "-frontend.memcached.addresses": "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort), "-tenant-federation.enabled": "true", "-tenant-federation.regex-matcher-enabled": "true", @@ -305,7 +305,7 @@ func runQuerierTenantFederationTest(t *testing.T, cfg querierTenantFederationCon flags := mergeFlags(BlocksStorageFlags(), map[string]string{ "-querier.cache-results": "true", "-querier.split-queries-by-interval": "24h", - "-querier.query-ingesters-within": "12h", // Required by the test on query /series out of ingesters time range + "-limits.query-ingesters-within": "12h", // Required by the test on query /series out of ingesters time range "-frontend.memcached.addresses": "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort), "-tenant-federation.enabled": "true", }) diff --git a/integration/query_frontend_test.go b/integration/query_frontend_test.go index 33b1fc27d39..522e1232a26 100644 --- a/integration/query_frontend_test.go +++ b/integration/query_frontend_test.go @@ -284,7 +284,7 @@ func runQueryFrontendTest(t *testing.T, cfg queryFrontendTestConfig) { flags = mergeFlags(flags, map[string]string{ "-querier.cache-results": "true", "-querier.split-queries-by-interval": "24h", - "-querier.query-ingesters-within": "12h", // Required by the test on query /series out of ingesters time range + "-limits.query-ingesters-within": "12h", // Required by the test on query /series out of ingesters time range "-frontend.memcached.addresses": "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort), "-frontend.query-stats-enabled": "true", // Always enable query stats to capture regressions }) @@ -882,7 +882,7 @@ func TestQueryFrontendStatsFromResultsCacheShouldBeSame(t *testing.T) { flags := mergeFlags(BlocksStorageFlags(), map[string]string{ "-querier.cache-results": "true", "-querier.split-queries-by-interval": "24h", - "-querier.query-ingesters-within": "12h", // Required by the test on query /series out of ingesters time range + "-limits.query-ingesters-within": "12h", // Required by the test on query /series out of ingesters time range "-querier.per-step-stats-enabled": strconv.FormatBool(true), "-frontend.memcached.addresses": "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort), "-frontend.query-stats-enabled": strconv.FormatBool(true), diff --git a/integration/remote_write_v2_test.go b/integration/remote_write_v2_test.go index 5d8cdbc72ca..c61cc1aa6d7 100644 --- a/integration/remote_write_v2_test.go +++ b/integration/remote_write_v2_test.go @@ -3,14 +3,19 @@ package integration import ( + "bytes" + "fmt" "math/rand" + "net/http" "path" "sync" "testing" "time" + "github.com/golang/snappy" remoteapi "github.com/prometheus/client_golang/exp/api/remote" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" @@ -21,6 +26,7 @@ import ( "github.com/cortexproject/cortex/integration/e2e" e2edb "github.com/cortexproject/cortex/integration/e2e/db" "github.com/cortexproject/cortex/integration/e2ecortex" + "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/storage/tsdb" ) @@ -299,6 +305,7 @@ func TestIngest(t *testing.T) { // Distributor. "-distributor.replication-factor": "1", "-distributor.remote-writev2-enabled": "true", + "-distributor.enable-start-timestamp": "true", // Store-gateway. "-store-gateway.sharding-enabled": "false", // alert manager @@ -393,6 +400,407 @@ func TestIngest(t *testing.T) { } } +func TestIngest_StartTimestamp(t *testing.T) { + const blockRangePeriod = 5 * time.Second + + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-blocks-storage.tsdb.enable-native-histograms": "true", + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + // Distributor. + "-distributor.replication-factor": "1", + "-distributor.remote-writev2-enabled": "true", + "-distributor.enable-start-timestamp": "true", + // Store-gateway. + "-store-gateway.sharding-enabled": "false", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + path := path.Join(s.SharedDir(), "cortex-1") + flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path}) + + cortex := e2ecortex.NewSingleBinary("cortex", flags, "") + require.NoError(t, s.StartAndWaitReady(cortex)) + require.NoError(t, cortex.WaitSumMetrics(e2e.Equals(float64(512)), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + sampleTs := time.Now().Truncate(time.Second) + startTs := sampleTs.Add(-2 * time.Second) + step := sampleTs.Sub(startTs) + + sampleSymbols := []string{"", "__name__", "test_start_timestamp_sample"} + sampleSeries := []writev2.TimeSeries{ + { + LabelsRefs: []uint32{1, 2}, + Samples: []writev2.Sample{{ + Value: 42, + Timestamp: e2e.TimeToMilliseconds(sampleTs), + StartTimestamp: e2e.TimeToMilliseconds(startTs), + }}, + }, + } + + writeStats, err := c.PushV2(sampleSymbols, sampleSeries) + require.NoError(t, err) + testPushHeader(t, writeStats, 1, 0, 0) + + sampleResult, err := c.QueryRange("test_start_timestamp_sample", startTs, sampleTs, step) + require.NoError(t, err) + require.Equal(t, model.ValMatrix, sampleResult.Type()) + + sampleMatrix := sampleResult.(model.Matrix) + require.Len(t, sampleMatrix, 1) + require.Len(t, sampleMatrix[0].Values, 2) + require.Empty(t, sampleMatrix[0].Histograms) + assert.Equal(t, model.Time(e2e.TimeToMilliseconds(startTs)), sampleMatrix[0].Values[0].Timestamp) + assert.Equal(t, model.SampleValue(0), sampleMatrix[0].Values[0].Value) + assert.Equal(t, model.Time(e2e.TimeToMilliseconds(sampleTs)), sampleMatrix[0].Values[1].Timestamp) + assert.Equal(t, model.SampleValue(42), sampleMatrix[0].Values[1].Value) + + histogramCases := []struct { + metricName string + isFloat bool + isCustom bool + idx uint32 + }{ + {metricName: "test_start_timestamp_histogram", isFloat: false, isCustom: false, idx: rand.Uint32()}, + {metricName: "test_start_timestamp_histogram_float", isFloat: true, isCustom: false, idx: rand.Uint32()}, + {metricName: "test_start_timestamp_histogram_custom", isFloat: false, isCustom: true, idx: rand.Uint32()}, + {metricName: "test_start_timestamp_histogram_float_custom", isFloat: true, isCustom: true, idx: rand.Uint32()}, + } + + for _, tc := range histogramCases { + symbols, series := e2e.GenerateHistogramSeriesV2(tc.metricName, sampleTs, tc.idx, tc.isCustom, tc.isFloat) + series[0].Histograms[0].StartTimestamp = e2e.TimeToMilliseconds(startTs) + + writeStats, err = c.PushV2(symbols, series) + require.NoError(t, err) + testPushHeader(t, writeStats, 0, 1, 0) + + result, err := c.QueryRange(tc.metricName, startTs, sampleTs, step) + require.NoError(t, err) + require.Equal(t, model.ValMatrix, result.Type()) + + matrix := result.(model.Matrix) + require.Len(t, matrix, 1) + require.Empty(t, matrix[0].Values) + require.Len(t, matrix[0].Histograms, 2) + require.NotNil(t, matrix[0].Histograms[0].Histogram) + require.NotNil(t, matrix[0].Histograms[1].Histogram) + assert.Equal(t, model.Time(e2e.TimeToMilliseconds(startTs)), matrix[0].Histograms[0].Timestamp) + assert.Equal(t, model.FloatString(0), matrix[0].Histograms[0].Histogram.Count) + assert.Equal(t, model.FloatString(0), matrix[0].Histograms[0].Histogram.Sum) + + var expectedCount, expectedSum model.FloatString + if tc.isFloat { + var expected *histogram.FloatHistogram + if tc.isCustom { + expected = tsdbutil.GenerateTestCustomBucketsFloatHistogram(int64(tc.idx)) + } else { + expected = tsdbutil.GenerateTestFloatHistogram(int64(tc.idx)) + } + expectedCount = model.FloatString(expected.Count) + expectedSum = model.FloatString(expected.Sum) + } else { + var expected *histogram.Histogram + if tc.isCustom { + expected = tsdbutil.GenerateTestCustomBucketsHistogram(int64(tc.idx)) + } else { + expected = tsdbutil.GenerateTestHistogram(int64(tc.idx)) + } + expectedCount = model.FloatString(expected.Count) + expectedSum = model.FloatString(expected.Sum) + } + + assert.Equal(t, model.Time(e2e.TimeToMilliseconds(sampleTs)), matrix[0].Histograms[1].Timestamp) + assert.Equal(t, expectedCount, matrix[0].Histograms[1].Histogram.Count) + assert.Equal(t, expectedSum, matrix[0].Histograms[1].Histogram.Sum) + } +} + +func TestIngest_CreatedTimestampFallback(t *testing.T) { + const blockRangePeriod = 5 * time.Second + + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-blocks-storage.tsdb.enable-native-histograms": "true", + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + "-distributor.replication-factor": "1", + "-distributor.remote-writev2-enabled": "true", + "-distributor.enable-start-timestamp": "true", + "-store-gateway.sharding-enabled": "false", + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + path := path.Join(s.SharedDir(), "cortex-1") + flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path}) + + cortex := e2ecortex.NewSingleBinary("cortex", flags, "") + require.NoError(t, s.StartAndWaitReady(cortex)) + require.NoError(t, cortex.WaitSumMetrics(e2e.Equals(float64(512)), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + sampleTs := time.Now().Truncate(time.Second) + startTs := sampleTs.Add(-2 * time.Second) + step := sampleTs.Sub(startTs) + + // Send a PRW2 request encoded with Cortex proto carrying only created_timestamp. + sampleReq := &cortexpb.WriteRequestV2{ + Symbols: []string{"", "__name__", "test_created_timestamp_sample"}, + Timeseries: []cortexpb.PreallocTimeseriesV2{ + { + TimeSeriesV2: &cortexpb.TimeSeriesV2{ + LabelsRefs: []uint32{1, 2}, + CreatedTimestamp: e2e.TimeToMilliseconds(startTs), + Samples: []cortexpb.Sample{{Value: 7, TimestampMs: e2e.TimeToMilliseconds(sampleTs)}}, + }, + }, + }, + } + pushCortexV2Request(t, cortex.HTTPEndpoint(), "user-1", sampleReq) + + sampleResult, err := c.QueryRange("test_created_timestamp_sample", startTs, sampleTs, step) + require.NoError(t, err) + require.Equal(t, model.ValMatrix, sampleResult.Type()) + + sampleMatrix := sampleResult.(model.Matrix) + require.Len(t, sampleMatrix, 1) + require.Len(t, sampleMatrix[0].Values, 2) + require.Empty(t, sampleMatrix[0].Histograms) + assert.Equal(t, model.Time(e2e.TimeToMilliseconds(startTs)), sampleMatrix[0].Values[0].Timestamp) + assert.Equal(t, model.SampleValue(0), sampleMatrix[0].Values[0].Value) + assert.Equal(t, model.Time(e2e.TimeToMilliseconds(sampleTs)), sampleMatrix[0].Values[1].Timestamp) + assert.Equal(t, model.SampleValue(7), sampleMatrix[0].Values[1].Value) + + h := cortexpb.HistogramToHistogramProto(e2e.TimeToMilliseconds(sampleTs), tsdbutil.GenerateTestHistogram(3)) + histReq := &cortexpb.WriteRequestV2{ + Symbols: []string{"", "__name__", "test_created_timestamp_histogram"}, + Timeseries: []cortexpb.PreallocTimeseriesV2{ + { + TimeSeriesV2: &cortexpb.TimeSeriesV2{ + LabelsRefs: []uint32{1, 2}, + CreatedTimestamp: e2e.TimeToMilliseconds(startTs), + Histograms: []cortexpb.Histogram{h}, + }, + }, + }, + } + pushCortexV2Request(t, cortex.HTTPEndpoint(), "user-1", histReq) + + histResult, err := c.QueryRange("test_created_timestamp_histogram", startTs, sampleTs, step) + require.NoError(t, err) + require.Equal(t, model.ValMatrix, histResult.Type()) + + histMatrix := histResult.(model.Matrix) + require.Len(t, histMatrix, 1) + require.Empty(t, histMatrix[0].Values) + require.Len(t, histMatrix[0].Histograms, 2) + assert.Equal(t, model.Time(e2e.TimeToMilliseconds(startTs)), histMatrix[0].Histograms[0].Timestamp) + assert.Equal(t, model.FloatString(0), histMatrix[0].Histograms[0].Histogram.Count) + assert.Equal(t, model.FloatString(0), histMatrix[0].Histograms[0].Histogram.Sum) + + expectedHist := tsdbutil.GenerateTestHistogram(3) + assert.Equal(t, model.Time(e2e.TimeToMilliseconds(sampleTs)), histMatrix[0].Histograms[1].Timestamp) + assert.Equal(t, model.FloatString(expectedHist.Count), histMatrix[0].Histograms[1].Histogram.Count) + assert.Equal(t, model.FloatString(expectedHist.Sum), histMatrix[0].Histograms[1].Histogram.Sum) +} + +func TestIngest_StartAndCreatedTimestampIgnoredWhenDisabled(t *testing.T) { + const blockRangePeriod = 5 * time.Second + + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-blocks-storage.tsdb.enable-native-histograms": "true", + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + "-distributor.replication-factor": "1", + "-distributor.remote-writev2-enabled": "true", + "-distributor.enable-start-timestamp": "false", + "-store-gateway.sharding-enabled": "false", + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + path := path.Join(s.SharedDir(), "cortex-1") + flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path}) + + cortex := e2ecortex.NewSingleBinary("cortex", flags, "") + require.NoError(t, s.StartAndWaitReady(cortex)) + require.NoError(t, cortex.WaitSumMetrics(e2e.Equals(float64(512)), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + sampleTs := time.Now().Truncate(time.Second) + startTs := sampleTs.Add(-2 * time.Second) + step := sampleTs.Sub(startTs) + + t.Run("ST is ignored", func(t *testing.T) { + sampleSymbols := []string{"", "__name__", "test_start_timestamp_ignored_sample"} + sampleSeries := []writev2.TimeSeries{{ + LabelsRefs: []uint32{1, 2}, + Samples: []writev2.Sample{{ + Value: 42, + Timestamp: e2e.TimeToMilliseconds(sampleTs), + StartTimestamp: e2e.TimeToMilliseconds(startTs), + }}, + }} + + writeStats, err := c.PushV2(sampleSymbols, sampleSeries) + require.NoError(t, err) + testPushHeader(t, writeStats, 1, 0, 0) + + sampleResult, err := c.QueryRange("test_start_timestamp_ignored_sample", startTs, sampleTs, step) + require.NoError(t, err) + require.Equal(t, model.ValMatrix, sampleResult.Type()) + + sampleMatrix := sampleResult.(model.Matrix) + require.Len(t, sampleMatrix, 1) + require.Len(t, sampleMatrix[0].Values, 1) + require.Empty(t, sampleMatrix[0].Histograms) + assert.Equal(t, model.Time(e2e.TimeToMilliseconds(sampleTs)), sampleMatrix[0].Values[0].Timestamp) + assert.Equal(t, model.SampleValue(42), sampleMatrix[0].Values[0].Value) + + histogramIdx := rand.Uint32() + symbols, series := e2e.GenerateHistogramSeriesV2("test_start_timestamp_ignored_histogram", sampleTs, histogramIdx, false, false) + series[0].Histograms[0].StartTimestamp = e2e.TimeToMilliseconds(startTs) + + writeStats, err = c.PushV2(symbols, series) + require.NoError(t, err) + testPushHeader(t, writeStats, 0, 1, 0) + + histResult, err := c.QueryRange("test_start_timestamp_ignored_histogram", startTs, sampleTs, step) + require.NoError(t, err) + require.Equal(t, model.ValMatrix, histResult.Type()) + + histMatrix := histResult.(model.Matrix) + require.Len(t, histMatrix, 1) + require.Empty(t, histMatrix[0].Values) + require.Len(t, histMatrix[0].Histograms, 1) + require.NotNil(t, histMatrix[0].Histograms[0].Histogram) + + expectedHist := tsdbutil.GenerateTestHistogram(int64(histogramIdx)) + assert.Equal(t, model.Time(e2e.TimeToMilliseconds(sampleTs)), histMatrix[0].Histograms[0].Timestamp) + assert.Equal(t, model.FloatString(expectedHist.Count), histMatrix[0].Histograms[0].Histogram.Count) + assert.Equal(t, model.FloatString(expectedHist.Sum), histMatrix[0].Histograms[0].Histogram.Sum) + }) + + t.Run("CT fallback is ignored", func(t *testing.T) { + sampleReq := &cortexpb.WriteRequestV2{ + Symbols: []string{"", "__name__", "test_created_timestamp_ignored_sample"}, + Timeseries: []cortexpb.PreallocTimeseriesV2{{ + TimeSeriesV2: &cortexpb.TimeSeriesV2{ + LabelsRefs: []uint32{1, 2}, + CreatedTimestamp: e2e.TimeToMilliseconds(startTs), + Samples: []cortexpb.Sample{{Value: 7, TimestampMs: e2e.TimeToMilliseconds(sampleTs)}}, + }, + }}, + } + pushCortexV2Request(t, cortex.HTTPEndpoint(), "user-1", sampleReq) + + sampleResult, err := c.QueryRange("test_created_timestamp_ignored_sample", startTs, sampleTs, step) + require.NoError(t, err) + require.Equal(t, model.ValMatrix, sampleResult.Type()) + + sampleMatrix := sampleResult.(model.Matrix) + require.Len(t, sampleMatrix, 1) + require.Len(t, sampleMatrix[0].Values, 1) + require.Empty(t, sampleMatrix[0].Histograms) + assert.Equal(t, model.Time(e2e.TimeToMilliseconds(sampleTs)), sampleMatrix[0].Values[0].Timestamp) + assert.Equal(t, model.SampleValue(7), sampleMatrix[0].Values[0].Value) + + h := cortexpb.HistogramToHistogramProto(e2e.TimeToMilliseconds(sampleTs), tsdbutil.GenerateTestHistogram(3)) + histReq := &cortexpb.WriteRequestV2{ + Symbols: []string{"", "__name__", "test_created_timestamp_ignored_histogram"}, + Timeseries: []cortexpb.PreallocTimeseriesV2{{ + TimeSeriesV2: &cortexpb.TimeSeriesV2{ + LabelsRefs: []uint32{1, 2}, + CreatedTimestamp: e2e.TimeToMilliseconds(startTs), + Histograms: []cortexpb.Histogram{h}, + }, + }}, + } + pushCortexV2Request(t, cortex.HTTPEndpoint(), "user-1", histReq) + + histResult, err := c.QueryRange("test_created_timestamp_ignored_histogram", startTs, sampleTs, step) + require.NoError(t, err) + require.Equal(t, model.ValMatrix, histResult.Type()) + + histMatrix := histResult.(model.Matrix) + require.Len(t, histMatrix, 1) + require.Empty(t, histMatrix[0].Values) + require.Len(t, histMatrix[0].Histograms, 1) + require.NotNil(t, histMatrix[0].Histograms[0].Histogram) + + expectedHist := tsdbutil.GenerateTestHistogram(3) + assert.Equal(t, model.Time(e2e.TimeToMilliseconds(sampleTs)), histMatrix[0].Histograms[0].Timestamp) + assert.Equal(t, model.FloatString(expectedHist.Count), histMatrix[0].Histograms[0].Histogram.Count) + assert.Equal(t, model.FloatString(expectedHist.Sum), histMatrix[0].Histograms[0].Histogram.Sum) + }) +} + func TestExemplar(t *testing.T) { s, err := e2e.NewScenario(networkName) require.NoError(t, err) @@ -685,3 +1093,26 @@ func testPushHeader(t *testing.T, stats remoteapi.WriteResponseStats, expectedSa require.Equal(t, expectedHistogram, stats.Histograms) require.Equal(t, expectedExemplars, stats.Exemplars) } + +func pushCortexV2Request(t *testing.T, distributorAddr, orgID string, req *cortexpb.WriteRequestV2) { + t.Helper() + + data, err := req.Marshal() + require.NoError(t, err) + + compressed := snappy.Encode(nil, data) + httpReq, err := http.NewRequest("POST", fmt.Sprintf("http://%s/api/prom/push", distributorAddr), bytes.NewReader(compressed)) + require.NoError(t, err) + + httpReq.Header.Add("Content-Encoding", "snappy") + httpReq.Header.Set("Content-Type", "application/x-protobuf;proto=io.prometheus.write.v2.Request") + httpReq.Header.Set("X-Prometheus-Remote-Write-Version", "2.0.0") + httpReq.Header.Set("X-Scope-OrgID", orgID) + + httpClient := &http.Client{Timeout: 30 * time.Second} + res, err := httpClient.Do(httpReq) + require.NoError(t, err) + defer res.Body.Close() //nolint:errcheck + + require.Equal(t, http.StatusNoContent, res.StatusCode) +} diff --git a/integration/ruler_test.go b/integration/ruler_test.go index 56f29295aae..446f9cfade2 100644 --- a/integration/ruler_test.go +++ b/integration/ruler_test.go @@ -1274,8 +1274,8 @@ func TestRulerMetricsWhenIngesterFails(t *testing.T) { // Very low limit so that ruler hits it. "-querier.max-fetched-chunks-per-query": "15", - "-querier.query-store-after": (1 * time.Second).String(), - "-querier.query-ingesters-within": (2 * time.Second).String(), + "-limits.query-store-after": (1 * time.Second).String(), + "-limits.query-ingesters-within": (2 * time.Second).String(), }, ) @@ -1378,8 +1378,8 @@ func TestRulerDisablesRuleGroups(t *testing.T) { // Very low limit so that ruler hits it. "-querier.max-fetched-chunks-per-query": "15", - "-querier.query-store-after": (1 * time.Second).String(), - "-querier.query-ingesters-within": (2 * time.Second).String(), + "-limits.query-store-after": (1 * time.Second).String(), + "-limits.query-ingesters-within": (2 * time.Second).String(), }, ) diff --git a/pkg/alertmanager/alertspb/compat_test.go b/pkg/alertmanager/alertspb/compat_test.go new file mode 100644 index 00000000000..7a62902a985 --- /dev/null +++ b/pkg/alertmanager/alertspb/compat_test.go @@ -0,0 +1,118 @@ +package alertspb + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestToProto(t *testing.T) { + tests := []struct { + name string + cfg string + templates map[string]string + user string + }{ + { + name: "empty config and no templates", + cfg: "", + templates: nil, + user: "user-1", + }, + { + name: "config with templates", + cfg: "route:\n receiver: default", + templates: map[string]string{ + "slack.tmpl": "{{ define \"slack\" }}alert{{ end }}", + }, + user: "user-2", + }, + { + name: "empty user", + cfg: "global: {}", + templates: nil, + user: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ToProto(tt.cfg, tt.templates, tt.user) + + assert.Equal(t, tt.user, result.User) + assert.Equal(t, tt.cfg, result.RawConfig) + + if tt.templates == nil { + assert.Empty(t, result.Templates) + } else { + assert.Len(t, result.Templates, len(tt.templates)) + for _, tmpl := range result.Templates { + expectedBody, ok := tt.templates[tmpl.Filename] + assert.True(t, ok, "unexpected template filename: %s", tmpl.Filename) + assert.Equal(t, expectedBody, tmpl.Body) + } + } + }) + } +} + +func TestParseTemplates(t *testing.T) { + tests := []struct { + name string + cfg AlertConfigDesc + expected map[string]string + }{ + { + name: "no templates", + cfg: AlertConfigDesc{Templates: nil}, + expected: map[string]string{}, + }, + { + name: "single template", + cfg: AlertConfigDesc{ + Templates: []*TemplateDesc{ + {Filename: "slack.tmpl", Body: "{{ define \"slack\" }}msg{{ end }}"}, + }, + }, + expected: map[string]string{ + "slack.tmpl": "{{ define \"slack\" }}msg{{ end }}", + }, + }, + { + name: "multiple templates", + cfg: AlertConfigDesc{ + Templates: []*TemplateDesc{ + {Filename: "a.tmpl", Body: "body-a"}, + {Filename: "b.tmpl", Body: "body-b"}, + }, + }, + expected: map[string]string{ + "a.tmpl": "body-a", + "b.tmpl": "body-b", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ParseTemplates(tt.cfg) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestToProto_ParseTemplates_roundtrip(t *testing.T) { + templates := map[string]string{ + "email.tmpl": "{{ define \"email\" }}hello{{ end }}", + "slack.tmpl": "{{ define \"slack\" }}alert{{ end }}", + } + cfg := "route:\n receiver: default" + user := "test-user" + + proto := ToProto(cfg, templates, user) + result := ParseTemplates(proto) + + assert.Equal(t, templates, result) + assert.Equal(t, cfg, proto.RawConfig) + assert.Equal(t, user, proto.User) +} diff --git a/pkg/cortex/cortex.go b/pkg/cortex/cortex.go index 6591e056c7f..74df4b93d3a 100644 --- a/pkg/cortex/cortex.go +++ b/pkg/cortex/cortex.go @@ -219,6 +219,9 @@ func (c *Config) Validate(log log.Logger) error { if err := c.LimitsConfig.Validate(c.NameValidationScheme, c.Distributor.ShardByAllLabels, c.Ingester.ActiveSeriesMetricsEnabled); err != nil { return errors.Wrap(err, "invalid limits config") } + if err := c.LimitsConfig.ValidateQueryLimits("default", c.BlocksStorage.TSDB.CloseIdleTSDBTimeout); err != nil { + return errors.Wrap(err, "invalid query routing config") + } if err := c.ResourceMonitor.Validate(); err != nil { return errors.Wrap(err, "invalid resource-monitor config") } diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index 046f3a631f5..805c2095429 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -243,7 +243,6 @@ func (t *Cortex) initOverridesExporter() (services.Service, error) { func (t *Cortex) initDistributorService() (serv services.Service, err error) { t.Cfg.Distributor.DistributorRing.ListenPort = t.Cfg.Server.GRPCListenPort - t.Cfg.Distributor.ShuffleShardingLookbackPeriod = t.Cfg.Querier.ShuffleShardingIngestersLookbackPeriod t.Cfg.Distributor.NameValidationScheme = t.Cfg.NameValidationScheme t.Cfg.IngesterClient.GRPCClientConfig.SignWriteRequestsEnabled = t.Cfg.Distributor.SignWriteRequestsEnabled @@ -497,7 +496,6 @@ func (t *Cortex) initIngesterService() (serv services.Service, err error) { t.Cfg.Ingester.DistributorShardingStrategy = t.Cfg.Distributor.ShardingStrategy t.Cfg.Ingester.DistributorShardByAllLabels = t.Cfg.Distributor.ShardByAllLabels t.Cfg.Ingester.InstanceLimitsFn = ingesterInstanceLimits(t.RuntimeConfig) - t.Cfg.Ingester.QueryIngestersWithin = t.Cfg.Querier.QueryIngestersWithin t.tsdbIngesterConfig() t.Ingester, err = ingester.New(t.Cfg.Ingester, t.OverridesConfig, prometheus.DefaultRegisterer, util_log.Logger, t.ResourceMonitor) diff --git a/pkg/cortex/runtime_config.go b/pkg/cortex/runtime_config.go index ea84fb3f525..e1471c33b7d 100644 --- a/pkg/cortex/runtime_config.go +++ b/pkg/cortex/runtime_config.go @@ -76,10 +76,13 @@ func (l runtimeConfigLoader) load(r io.Reader) (any, error) { // only check if target is `all`, `distributor`, "querier", and "ruler" // refer to https://github.com/cortexproject/cortex/issues/6741#issuecomment-3067244929 if overrides != nil { - for _, ul := range overrides.TenantLimits { + for userID, ul := range overrides.TenantLimits { if err := ul.Validate(l.cfg.NameValidationScheme, l.cfg.Distributor.ShardByAllLabels, l.cfg.Ingester.ActiveSeriesMetricsEnabled); err != nil { return nil, err } + if err := ul.ValidateQueryLimits(userID, l.cfg.BlocksStorage.TSDB.CloseIdleTSDBTimeout); err != nil { + return nil, err + } } } } diff --git a/pkg/cortexpb/cortex.pb.go b/pkg/cortexpb/cortex.pb.go index cb87faedba3..6037803b267 100644 --- a/pkg/cortexpb/cortex.pb.go +++ b/pkg/cortexpb/cortex.pb.go @@ -806,8 +806,9 @@ func (m *LabelPair) GetValue() []byte { } type Sample struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` + StartTimestampMs int64 `protobuf:"varint,3,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` } func (m *Sample) Reset() { *m = Sample{} } @@ -856,6 +857,13 @@ func (m *Sample) GetTimestampMs() int64 { return 0 } +func (m *Sample) GetStartTimestampMs() int64 { + if m != nil { + return m.StartTimestampMs + } + return 0 +} + type MetricMetadata struct { Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=cortexpb.MetricMetadata_MetricType" json:"type,omitempty"` MetricFamilyName string `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"` @@ -1084,7 +1092,8 @@ type Histogram struct { // // The last element is not only the upper inclusive bound of the last regular // bucket, but implicitly the lower exclusive bound of the +Inf bucket. - CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"` + CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"` + StartTimestampMs int64 `protobuf:"varint,17,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` } func (m *Histogram) Reset() { *m = Histogram{} } @@ -1275,6 +1284,13 @@ func (m *Histogram) GetCustomValues() []float64 { return nil } +func (m *Histogram) GetStartTimestampMs() int64 { + if m != nil { + return m.StartTimestampMs + } + return 0 +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Histogram) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -1367,102 +1383,104 @@ func init() { func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } var fileDescriptor_893a47d0a749d749 = []byte{ - // 1517 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0xcd, 0x6f, 0x13, 0xc7, - 0x1b, 0xf6, 0xfa, 0xdb, 0xaf, 0x3f, 0xd8, 0x0c, 0x06, 0x36, 0x01, 0xd6, 0xc1, 0xe8, 0xf7, 0x6b, - 0x44, 0x51, 0x8a, 0x82, 0x4a, 0x5b, 0x84, 0x2a, 0xd9, 0xc1, 0x21, 0x16, 0xd8, 0x8e, 0xc6, 0x4e, - 0x10, 0xbd, 0xac, 0x36, 0xf6, 0x38, 0x5e, 0xe1, 0xdd, 0x75, 0x77, 0xc6, 0x88, 0xf4, 0xd4, 0x53, - 0xd5, 0xde, 0x7a, 0xe9, 0xa5, 0xb7, 0xaa, 0x97, 0x5e, 0x7b, 0xee, 0x3f, 0xc0, 0x31, 0xb7, 0x22, - 0xa4, 0x46, 0x25, 0x5c, 0x68, 0x4f, 0x1c, 0xda, 0x7b, 0x35, 0xb3, 0x9f, 0x8e, 0x83, 0x68, 0x2b, - 0x0e, 0xbd, 0xcd, 0x3c, 0xef, 0x3b, 0x33, 0xcf, 0xcc, 0xfb, 0xbc, 0xcf, 0xda, 0x50, 0xe8, 0xdb, - 0x0e, 0x23, 0x8f, 0x57, 0x27, 0x8e, 0xcd, 0x6c, 0x94, 0x75, 0x67, 0x93, 0xdd, 0xa5, 0xf2, 0x9e, - 0xbd, 0x67, 0x0b, 0xf0, 0x3d, 0x3e, 0x72, 0xe3, 0xd5, 0x45, 0x58, 0x68, 0x11, 0x4a, 0xf5, 0x3d, - 0x72, 0xdf, 0x60, 0xa3, 0xfa, 0x74, 0x88, 0xc9, 0xf0, 0x66, 0xf2, 0xd5, 0x77, 0x95, 0x58, 0xf5, - 0xab, 0x04, 0x14, 0xee, 0x3b, 0x06, 0x23, 0x98, 0x7c, 0x3a, 0x25, 0x94, 0xa1, 0x2d, 0x00, 0x66, - 0x98, 0x84, 0x12, 0xc7, 0x20, 0x54, 0x91, 0x96, 0x13, 0x2b, 0xf9, 0xb5, 0xf2, 0xaa, 0x7f, 0xc0, - 0x6a, 0xcf, 0x30, 0x49, 0x57, 0xc4, 0xea, 0x4b, 0x4f, 0x0e, 0x2b, 0xb1, 0x67, 0x87, 0x15, 0xb4, - 0xe5, 0x10, 0x7d, 0x3c, 0xb6, 0xfb, 0xbd, 0x60, 0x1d, 0x8e, 0xec, 0x81, 0xae, 0x42, 0xba, 0x6b, - 0x4f, 0x9d, 0x3e, 0x51, 0xe2, 0xcb, 0xd2, 0x4a, 0x29, 0xba, 0x9b, 0x8b, 0x37, 0xac, 0xa9, 0x89, - 0xbd, 0x1c, 0x74, 0x13, 0xb2, 0x26, 0x61, 0xfa, 0x40, 0x67, 0xba, 0x92, 0x10, 0xa7, 0x2b, 0x61, - 0x7e, 0x8b, 0x30, 0xc7, 0xe8, 0xb7, 0xbc, 0x78, 0x3d, 0xf9, 0xe4, 0xb0, 0x22, 0xe1, 0x20, 0x1f, - 0xdd, 0x82, 0x25, 0xfa, 0xd0, 0x98, 0x68, 0x63, 0x7d, 0x97, 0x8c, 0x35, 0x4b, 0x37, 0x89, 0xf6, - 0x48, 0x1f, 0x1b, 0x03, 0x9d, 0x19, 0xb6, 0xa5, 0xbc, 0xcc, 0x2c, 0x4b, 0x2b, 0x59, 0x7c, 0x8e, - 0xa7, 0xdc, 0xe3, 0x19, 0x6d, 0xdd, 0x24, 0x3b, 0x41, 0x1c, 0xb5, 0x20, 0x81, 0xc9, 0x50, 0xf9, - 0x8d, 0xa7, 0xe5, 0xd7, 0xce, 0x47, 0x4f, 0x3d, 0xf6, 0x76, 0xf5, 0x8b, 0xfc, 0xea, 0x07, 0x87, - 0x15, 0xe9, 0xd9, 0x61, 0x65, 0xfe, 0x69, 0x31, 0xdf, 0x07, 0x5d, 0x83, 0xf2, 0xc0, 0xa0, 0x7d, - 0xdd, 0x19, 0x68, 0xf6, 0x94, 0x69, 0xf6, 0x50, 0xb3, 0x9d, 0x01, 0x71, 0x94, 0xdf, 0x5d, 0x1a, - 0x0b, 0x5e, 0xb0, 0x33, 0x65, 0x9d, 0x61, 0x87, 0x47, 0xaa, 0x3f, 0xc5, 0xa1, 0x14, 0xad, 0xc5, - 0xce, 0x1a, 0x52, 0x20, 0x43, 0xf7, 0xcd, 0x5d, 0x7b, 0x4c, 0x95, 0xe4, 0x72, 0x62, 0x25, 0x87, - 0xfd, 0x29, 0xea, 0xcd, 0xd4, 0x29, 0x25, 0x5e, 0xea, 0xec, 0x49, 0x75, 0xda, 0x59, 0xab, 0x5f, - 0xf0, 0x2a, 0x55, 0x9e, 0xaf, 0xd4, 0xce, 0xda, 0x6b, 0x6a, 0x95, 0xfe, 0x1b, 0xb5, 0xfa, 0x2f, - 0xbd, 0x37, 0x7f, 0xbd, 0x42, 0xf4, 0xd6, 0xa8, 0x02, 0x79, 0x41, 0x8c, 0x6a, 0x0e, 0x19, 0xba, - 0x52, 0x2e, 0x62, 0x70, 0x21, 0x4c, 0x86, 0x14, 0x5d, 0x83, 0x0c, 0xd5, 0xcd, 0xc9, 0x98, 0x50, - 0x25, 0x2e, 0xde, 0x4f, 0x8e, 0xdc, 0x56, 0x04, 0x84, 0xc2, 0x62, 0xd8, 0x4f, 0x43, 0x1f, 0x01, - 0x8c, 0x0c, 0xca, 0xec, 0x3d, 0x47, 0x37, 0xa9, 0x27, 0xcf, 0xd3, 0xe1, 0xa2, 0x4d, 0x3f, 0xe6, - 0xad, 0x8b, 0x24, 0xa3, 0x0f, 0x21, 0x47, 0x1e, 0x13, 0x73, 0x32, 0xd6, 0x1d, 0xb7, 0x96, 0x33, - 0x6d, 0xd5, 0xf0, 0x42, 0x3b, 0x6b, 0xde, 0xd2, 0x30, 0x19, 0xdd, 0x88, 0x74, 0x44, 0x4a, 0xbc, - 0x55, 0x79, 0xa6, 0x23, 0x44, 0x24, 0x58, 0x18, 0x76, 0xc3, 0xbb, 0xb0, 0xd0, 0x77, 0x88, 0xce, - 0xc8, 0x40, 0x13, 0x15, 0x66, 0xba, 0x39, 0x11, 0x65, 0x4d, 0x60, 0xd9, 0x0b, 0xf4, 0x7c, 0xbc, - 0xaa, 0x03, 0x84, 0x1c, 0xde, 0xfc, 0x74, 0x65, 0x48, 0x3d, 0xd2, 0xc7, 0x53, 0xb7, 0xa5, 0x25, - 0xec, 0x4e, 0xd0, 0x05, 0xc8, 0x85, 0x27, 0x25, 0xc4, 0x49, 0x21, 0x50, 0xfd, 0x39, 0x0e, 0x10, - 0xd2, 0x45, 0xd7, 0x21, 0xc9, 0xf6, 0x27, 0x44, 0x91, 0x84, 0xd0, 0x2a, 0x27, 0x5d, 0xc9, 0xeb, - 0xf7, 0xde, 0xfe, 0x84, 0x60, 0x91, 0x8c, 0x16, 0x21, 0x3b, 0x22, 0xe3, 0x09, 0xa7, 0x25, 0x0e, - 0x28, 0xe2, 0x0c, 0x9f, 0xf3, 0x7e, 0x5b, 0x84, 0xec, 0xd4, 0x32, 0x98, 0x08, 0x25, 0xdd, 0x10, - 0x9f, 0x73, 0x69, 0xfc, 0x22, 0x89, 0x93, 0xbd, 0xad, 0xd0, 0x79, 0x38, 0xd7, 0x6a, 0xf4, 0x70, - 0x73, 0x5d, 0xeb, 0x3d, 0xd8, 0x6a, 0x68, 0xdb, 0xed, 0xee, 0x56, 0x63, 0xbd, 0xb9, 0xd1, 0x6c, - 0xdc, 0x96, 0x63, 0xe8, 0x1c, 0x9c, 0x8e, 0x06, 0xd7, 0x3b, 0xdb, 0xed, 0x5e, 0x03, 0xcb, 0x12, - 0x3a, 0x03, 0x0b, 0xd1, 0xc0, 0x9d, 0xda, 0xf6, 0x9d, 0x86, 0x1c, 0x47, 0x8b, 0x70, 0x26, 0x0a, - 0x6f, 0x36, 0xbb, 0xbd, 0xce, 0x1d, 0x5c, 0x6b, 0xc9, 0x09, 0xa4, 0xc2, 0xd2, 0xdc, 0x8a, 0x30, - 0x9e, 0x3c, 0x7e, 0x54, 0x77, 0xbb, 0xd5, 0xaa, 0xe1, 0x07, 0x72, 0x0a, 0x95, 0x41, 0x8e, 0x06, - 0x9a, 0xed, 0x8d, 0x8e, 0x9c, 0x46, 0x0a, 0x94, 0x67, 0xd2, 0x7b, 0xb5, 0x5e, 0xa3, 0xdb, 0xe8, - 0xc9, 0x99, 0xea, 0x8f, 0x12, 0xa0, 0x2e, 0x73, 0x88, 0x6e, 0xce, 0x58, 0xf9, 0x12, 0x64, 0x7b, - 0xc4, 0xd2, 0x2d, 0xd6, 0xbc, 0x2d, 0x5e, 0x39, 0x87, 0x83, 0x39, 0xd7, 0xbe, 0x97, 0x26, 0x4a, - 0x38, 0xe3, 0x1d, 0xd1, 0x4d, 0xb0, 0x9f, 0xe6, 0xb7, 0xeb, 0xcb, 0xb7, 0xd4, 0xae, 0xdf, 0x48, - 0x50, 0xf4, 0x0e, 0xa2, 0x13, 0xdb, 0xa2, 0x04, 0x21, 0x48, 0xf6, 0xed, 0x81, 0x2b, 0x88, 0x14, - 0x16, 0x63, 0xee, 0x7f, 0xa6, 0xbb, 0x5e, 0xd0, 0xcc, 0x61, 0x7f, 0xca, 0x23, 0x5d, 0xaf, 0x79, - 0x5d, 0xa5, 0xf9, 0x53, 0xa4, 0x02, 0x6c, 0x86, 0x4d, 0x9a, 0x14, 0xc1, 0x08, 0xc2, 0x55, 0xda, - 0x08, 0x3a, 0x31, 0xe5, 0xaa, 0x34, 0x00, 0xaa, 0x7f, 0x48, 0x00, 0xa1, 0x8d, 0xa0, 0x1a, 0xa4, - 0x5d, 0xd9, 0x7b, 0x9f, 0xc2, 0x48, 0xb7, 0x0b, 0x4f, 0xdb, 0xd2, 0x0d, 0xa7, 0x5e, 0xf6, 0xfc, - 0xb5, 0x20, 0xa0, 0xda, 0x40, 0x9f, 0x30, 0xe2, 0x60, 0x6f, 0xe1, 0xbf, 0xb0, 0x99, 0x1b, 0x51, - 0xaf, 0x70, 0x5d, 0x06, 0xcd, 0x7b, 0xc5, 0xbc, 0x53, 0xcc, 0xda, 0x53, 0xf2, 0x1f, 0xd8, 0x53, - 0xf5, 0x7d, 0xc8, 0x05, 0xf7, 0xe1, 0x95, 0xe0, 0x66, 0x2e, 0x2a, 0x51, 0xc0, 0x62, 0x3c, 0xdb, - 0xf1, 0x05, 0xaf, 0xe3, 0xab, 0x35, 0x48, 0xbb, 0x57, 0x08, 0xe3, 0x52, 0xd4, 0x11, 0x2e, 0x41, - 0x21, 0x30, 0x00, 0xcd, 0xa4, 0x62, 0x71, 0x02, 0xe7, 0x03, 0xac, 0x45, 0xab, 0xdf, 0xc6, 0xa1, - 0x34, 0xfb, 0x5d, 0x47, 0x1f, 0xcc, 0x58, 0xc3, 0xe5, 0xd7, 0x7d, 0xff, 0xe7, 0xed, 0xe1, 0x2a, - 0x20, 0x53, 0x60, 0xda, 0x50, 0x37, 0x8d, 0xf1, 0xbe, 0xf8, 0x26, 0x79, 0xca, 0x91, 0xdd, 0xc8, - 0x86, 0x08, 0xf0, 0x4f, 0x11, 0xbf, 0x26, 0x37, 0x0f, 0x21, 0x91, 0x1c, 0x16, 0x63, 0x8e, 0x71, - 0xd7, 0x10, 0xba, 0xc8, 0x61, 0x31, 0xae, 0xee, 0xcf, 0xb8, 0x47, 0x1e, 0x32, 0xdb, 0xed, 0xbb, - 0xed, 0xce, 0xfd, 0xb6, 0x1c, 0xe3, 0x93, 0xd0, 0x21, 0x72, 0x90, 0xf2, 0x5d, 0xa1, 0x08, 0xb9, - 0xa8, 0x13, 0x20, 0x28, 0xcd, 0x75, 0x7f, 0x1e, 0x32, 0x61, 0xc7, 0x67, 0x21, 0xe9, 0x75, 0x79, - 0x01, 0xb2, 0x91, 0xce, 0xbe, 0x0b, 0x69, 0xf7, 0xe8, 0xb7, 0x20, 0xc4, 0xea, 0x17, 0x12, 0x64, - 0x7d, 0xf1, 0xbc, 0x0d, 0x61, 0x9f, 0xfc, 0x11, 0x38, 0x5e, 0xf2, 0xc4, 0x7c, 0xc9, 0xff, 0x4c, - 0x41, 0x2e, 0x10, 0x23, 0xba, 0x08, 0xb9, 0xbe, 0x3d, 0xb5, 0x98, 0x66, 0x58, 0x4c, 0x94, 0x3c, - 0xb9, 0x19, 0xc3, 0x59, 0x01, 0x35, 0x2d, 0x86, 0x2e, 0x41, 0xde, 0x0d, 0x0f, 0xc7, 0xb6, 0xee, - 0xba, 0x95, 0xb4, 0x19, 0xc3, 0x20, 0xc0, 0x0d, 0x8e, 0x21, 0x19, 0x12, 0x74, 0x6a, 0x8a, 0x93, - 0x24, 0xcc, 0x87, 0xe8, 0x2c, 0xa4, 0x69, 0x7f, 0x44, 0x4c, 0x5d, 0x14, 0x77, 0x01, 0x7b, 0x33, - 0xf4, 0x3f, 0x28, 0x7d, 0x46, 0x1c, 0x5b, 0x63, 0x23, 0x87, 0xd0, 0x91, 0x3d, 0x1e, 0x88, 0x42, - 0x4b, 0xb8, 0xc8, 0xd1, 0x9e, 0x0f, 0xa2, 0xff, 0x7b, 0x69, 0x21, 0xaf, 0xb4, 0xe0, 0x25, 0xe1, - 0x02, 0xc7, 0xd7, 0x7d, 0x6e, 0x57, 0x40, 0x8e, 0xe4, 0xb9, 0x04, 0x33, 0x82, 0xa0, 0x84, 0x4b, - 0x41, 0xa6, 0x4b, 0xb2, 0x06, 0x25, 0x8b, 0xec, 0xe9, 0xcc, 0x78, 0x44, 0x34, 0x3a, 0xd1, 0x2d, - 0xaa, 0x64, 0x8f, 0xff, 0x0a, 0xa8, 0x4f, 0xfb, 0x0f, 0x09, 0xeb, 0x4e, 0x74, 0xcb, 0xeb, 0xd0, - 0xa2, 0xbf, 0x82, 0x63, 0x14, 0xbd, 0x03, 0xa7, 0x82, 0x2d, 0x06, 0x64, 0xcc, 0x74, 0xaa, 0xe4, - 0x96, 0x13, 0x2b, 0x08, 0x07, 0x3b, 0xdf, 0x16, 0xe8, 0x4c, 0xa2, 0xe0, 0x46, 0x15, 0x58, 0x4e, - 0xac, 0x48, 0x61, 0xa2, 0x20, 0xc6, 0xed, 0xad, 0x34, 0xb1, 0xa9, 0x11, 0x21, 0x95, 0x7f, 0x33, - 0x29, 0x7f, 0x45, 0x40, 0x2a, 0xd8, 0xc2, 0x23, 0x55, 0x70, 0x49, 0xf9, 0x70, 0x48, 0x2a, 0x48, - 0xf4, 0x48, 0x15, 0x5d, 0x52, 0x3e, 0xec, 0x91, 0xba, 0x05, 0xe0, 0x10, 0x4a, 0x98, 0x36, 0xe2, - 0x2f, 0x5f, 0x12, 0x26, 0x70, 0xf1, 0x04, 0x1b, 0x5b, 0xc5, 0x3c, 0x6b, 0xd3, 0xb0, 0x18, 0xce, - 0x39, 0xfe, 0x70, 0x4e, 0x7f, 0xa7, 0xe6, 0xf4, 0x87, 0x2e, 0x43, 0xb1, 0x3f, 0xa5, 0xcc, 0x36, - 0x35, 0x21, 0x59, 0xaa, 0xc8, 0x82, 0x47, 0xc1, 0x05, 0x77, 0x04, 0x56, 0xbd, 0x09, 0xb9, 0x60, - 0xff, 0xd9, 0xa6, 0xcf, 0x40, 0xe2, 0x41, 0xa3, 0x2b, 0x4b, 0x28, 0x0d, 0xf1, 0x76, 0x47, 0x8e, - 0x87, 0x8d, 0x9f, 0x58, 0x4a, 0x7e, 0xf9, 0xbd, 0x2a, 0xd5, 0x33, 0x90, 0x12, 0x37, 0xac, 0x17, - 0x00, 0x42, 0x81, 0x54, 0x6f, 0x01, 0x84, 0xaf, 0xc9, 0x35, 0x6a, 0x0f, 0x87, 0x94, 0xb8, 0xa2, - 0x5f, 0xc0, 0xde, 0x8c, 0xe3, 0x63, 0x62, 0xed, 0xb1, 0x91, 0xd0, 0x7a, 0x11, 0x7b, 0xb3, 0x2b, - 0x15, 0x80, 0xf0, 0x37, 0x38, 0x27, 0x51, 0xdb, 0x6a, 0xca, 0x31, 0x6e, 0x1d, 0x78, 0xfb, 0x5e, - 0x43, 0x96, 0xea, 0x1f, 0x1f, 0x3c, 0x57, 0x63, 0x4f, 0x9f, 0xab, 0xb1, 0x57, 0xcf, 0x55, 0xe9, - 0xf3, 0x23, 0x55, 0xfa, 0xe1, 0x48, 0x95, 0x9e, 0x1c, 0xa9, 0xd2, 0xc1, 0x91, 0x2a, 0xfd, 0x7a, - 0xa4, 0x4a, 0x2f, 0x8f, 0xd4, 0xd8, 0xab, 0x23, 0x55, 0xfa, 0xfa, 0x85, 0x1a, 0x3b, 0x78, 0xa1, - 0xc6, 0x9e, 0xbe, 0x50, 0x63, 0x9f, 0x04, 0x7f, 0x1e, 0x77, 0xd3, 0xe2, 0xdf, 0xe2, 0xf5, 0xbf, - 0x02, 0x00, 0x00, 0xff, 0xff, 0xb1, 0x8c, 0x07, 0xad, 0x5d, 0x0e, 0x00, 0x00, + // 1540 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0x3b, 0x6f, 0x1b, 0xc7, + 0x16, 0xe6, 0xf2, 0xcd, 0xc3, 0x87, 0x57, 0x63, 0xda, 0x5e, 0xc9, 0xf6, 0x52, 0xa6, 0x71, 0xef, + 0x15, 0x7c, 0x0d, 0x5d, 0x43, 0xc6, 0x75, 0x12, 0xc3, 0x08, 0x40, 0xca, 0x94, 0x45, 0xd8, 0x24, + 0x85, 0x21, 0x25, 0xc3, 0x69, 0x16, 0x2b, 0x72, 0x28, 0x2e, 0xcc, 0xdd, 0x65, 0x76, 0x86, 0x86, + 0x95, 0x2a, 0x55, 0x90, 0x74, 0x69, 0xd2, 0xa4, 0x0b, 0xdc, 0xa4, 0x4d, 0x9d, 0x3f, 0xe0, 0x52, + 0x5d, 0x0c, 0x03, 0x11, 0x62, 0xb9, 0x71, 0x52, 0xb9, 0xc8, 0x0f, 0x08, 0x66, 0xf6, 0x49, 0x51, + 0x82, 0x93, 0xc0, 0x45, 0xba, 0x39, 0xdf, 0x39, 0x33, 0xf3, 0xcd, 0x79, 0x7c, 0x4b, 0x42, 0xa1, + 0x6f, 0x3b, 0x8c, 0x3c, 0x5d, 0x9d, 0x38, 0x36, 0xb3, 0x51, 0xd6, 0xb5, 0x26, 0xbb, 0x4b, 0xe5, + 0x3d, 0x7b, 0xcf, 0x16, 0xe0, 0xff, 0xf8, 0xca, 0xf5, 0x57, 0x17, 0x61, 0xa1, 0x45, 0x28, 0xd5, + 0xf7, 0xc8, 0x43, 0x83, 0x8d, 0xea, 0xd3, 0x21, 0x26, 0xc3, 0xdb, 0xc9, 0xb7, 0xdf, 0x55, 0x62, + 0xd5, 0xaf, 0x12, 0x50, 0x78, 0xe8, 0x18, 0x8c, 0x60, 0xf2, 0xe9, 0x94, 0x50, 0x86, 0xb6, 0x00, + 0x98, 0x61, 0x12, 0x4a, 0x1c, 0x83, 0x50, 0x45, 0x5a, 0x4e, 0xac, 0xe4, 0xd7, 0xca, 0xab, 0xfe, + 0x05, 0xab, 0x3d, 0xc3, 0x24, 0x5d, 0xe1, 0xab, 0x2f, 0x3d, 0x3f, 0xac, 0xc4, 0x5e, 0x1e, 0x56, + 0xd0, 0x96, 0x43, 0xf4, 0xf1, 0xd8, 0xee, 0xf7, 0x82, 0x7d, 0x38, 0x72, 0x06, 0xba, 0x0e, 0xe9, + 0xae, 0x3d, 0x75, 0xfa, 0x44, 0x89, 0x2f, 0x4b, 0x2b, 0xa5, 0xe8, 0x69, 0x2e, 0xde, 0xb0, 0xa6, + 0x26, 0xf6, 0x62, 0xd0, 0x6d, 0xc8, 0x9a, 0x84, 0xe9, 0x03, 0x9d, 0xe9, 0x4a, 0x42, 0xdc, 0xae, + 0x84, 0xf1, 0x2d, 0xc2, 0x1c, 0xa3, 0xdf, 0xf2, 0xfc, 0xf5, 0xe4, 0xf3, 0xc3, 0x8a, 0x84, 0x83, + 0x78, 0x74, 0x07, 0x96, 0xe8, 0x63, 0x63, 0xa2, 0x8d, 0xf5, 0x5d, 0x32, 0xd6, 0x2c, 0xdd, 0x24, + 0xda, 0x13, 0x7d, 0x6c, 0x0c, 0x74, 0x66, 0xd8, 0x96, 0xf2, 0x26, 0xb3, 0x2c, 0xad, 0x64, 0xf1, + 0x05, 0x1e, 0xf2, 0x80, 0x47, 0xb4, 0x75, 0x93, 0xec, 0x04, 0x7e, 0xd4, 0x82, 0x04, 0x26, 0x43, + 0xe5, 0x57, 0x1e, 0x96, 0x5f, 0xbb, 0x18, 0xbd, 0xf5, 0x58, 0xee, 0xea, 0x97, 0xf9, 0xd3, 0x0f, + 0x0e, 0x2b, 0xd2, 0xcb, 0xc3, 0xca, 0x7c, 0x6a, 0x31, 0x3f, 0x07, 0xdd, 0x80, 0xf2, 0xc0, 0xa0, + 0x7d, 0xdd, 0x19, 0x68, 0xf6, 0x94, 0x69, 0xf6, 0x50, 0xb3, 0x9d, 0x01, 0x71, 0x94, 0xdf, 0x5c, + 0x1a, 0x0b, 0x9e, 0xb3, 0x33, 0x65, 0x9d, 0x61, 0x87, 0x7b, 0xaa, 0x3f, 0xc6, 0xa1, 0x14, 0xad, + 0xc5, 0xce, 0x1a, 0x52, 0x20, 0x43, 0xf7, 0xcd, 0x5d, 0x7b, 0x4c, 0x95, 0xe4, 0x72, 0x62, 0x25, + 0x87, 0x7d, 0x13, 0xf5, 0x66, 0xea, 0x94, 0x12, 0x99, 0x3a, 0x7f, 0x52, 0x9d, 0x76, 0xd6, 0xea, + 0x97, 0xbc, 0x4a, 0x95, 0xe7, 0x2b, 0xb5, 0xb3, 0x76, 0x4a, 0xad, 0xd2, 0x7f, 0xa2, 0x56, 0xff, + 0xa4, 0x7c, 0xf3, 0xec, 0x15, 0xa2, 0xaf, 0x46, 0x15, 0xc8, 0x0b, 0x62, 0x54, 0x73, 0xc8, 0xd0, + 0x6d, 0xe5, 0x22, 0x06, 0x17, 0xc2, 0x64, 0x48, 0xd1, 0x0d, 0xc8, 0x50, 0xdd, 0x9c, 0x8c, 0x09, + 0x55, 0xe2, 0x22, 0x7f, 0x72, 0xe4, 0xb5, 0xc2, 0x21, 0x3a, 0x2c, 0x86, 0xfd, 0x30, 0xf4, 0x11, + 0xc0, 0xc8, 0xa0, 0xcc, 0xde, 0x73, 0x74, 0x93, 0x7a, 0xed, 0x79, 0x36, 0xdc, 0xb4, 0xe9, 0xfb, + 0xbc, 0x7d, 0x91, 0x60, 0xf4, 0x21, 0xe4, 0xc8, 0x53, 0x62, 0x4e, 0xc6, 0xba, 0xe3, 0xd6, 0x72, + 0x66, 0xac, 0x1a, 0x9e, 0x6b, 0x67, 0xcd, 0xdb, 0x1a, 0x06, 0xa3, 0x5b, 0x91, 0x89, 0x48, 0x89, + 0x5c, 0x95, 0x67, 0x26, 0x42, 0x78, 0x82, 0x8d, 0xe1, 0x34, 0xfc, 0x17, 0x16, 0xfa, 0x0e, 0xd1, + 0x19, 0x19, 0x68, 0xa2, 0xc2, 0x4c, 0x37, 0x27, 0xa2, 0xac, 0x09, 0x2c, 0x7b, 0x8e, 0x9e, 0x8f, + 0x57, 0x75, 0x80, 0x90, 0xc3, 0xbb, 0x53, 0x57, 0x86, 0xd4, 0x13, 0x7d, 0x3c, 0x75, 0x47, 0x5a, + 0xc2, 0xae, 0x81, 0x2e, 0x41, 0x2e, 0xbc, 0x29, 0x21, 0x6e, 0x0a, 0x81, 0xea, 0x4f, 0x71, 0x80, + 0x90, 0x2e, 0xba, 0x09, 0x49, 0xb6, 0x3f, 0x21, 0x8a, 0x24, 0x1a, 0xad, 0x72, 0xd2, 0x93, 0xbc, + 0x79, 0xef, 0xed, 0x4f, 0x08, 0x16, 0xc1, 0x68, 0x11, 0xb2, 0x23, 0x32, 0x9e, 0x70, 0x5a, 0xe2, + 0x82, 0x22, 0xce, 0x70, 0x9b, 0xcf, 0xdb, 0x22, 0x64, 0xa7, 0x96, 0xc1, 0x84, 0x2b, 0xe9, 0xba, + 0xb8, 0xcd, 0x5b, 0xe3, 0x67, 0x49, 0xdc, 0xec, 0x1d, 0x85, 0x2e, 0xc2, 0x85, 0x56, 0xa3, 0x87, + 0x9b, 0xeb, 0x5a, 0xef, 0xd1, 0x56, 0x43, 0xdb, 0x6e, 0x77, 0xb7, 0x1a, 0xeb, 0xcd, 0x8d, 0x66, + 0xe3, 0xae, 0x1c, 0x43, 0x17, 0xe0, 0x6c, 0xd4, 0xb9, 0xde, 0xd9, 0x6e, 0xf7, 0x1a, 0x58, 0x96, + 0xd0, 0x39, 0x58, 0x88, 0x3a, 0xee, 0xd5, 0xb6, 0xef, 0x35, 0xe4, 0x38, 0x5a, 0x84, 0x73, 0x51, + 0x78, 0xb3, 0xd9, 0xed, 0x75, 0xee, 0xe1, 0x5a, 0x4b, 0x4e, 0x20, 0x15, 0x96, 0xe6, 0x76, 0x84, + 0xfe, 0xe4, 0xf1, 0xab, 0xba, 0xdb, 0xad, 0x56, 0x0d, 0x3f, 0x92, 0x53, 0xa8, 0x0c, 0x72, 0xd4, + 0xd1, 0x6c, 0x6f, 0x74, 0xe4, 0x34, 0x52, 0xa0, 0x3c, 0x13, 0xde, 0xab, 0xf5, 0x1a, 0xdd, 0x46, + 0x4f, 0xce, 0x54, 0x7f, 0x90, 0x00, 0x75, 0x99, 0x43, 0x74, 0x73, 0x46, 0xca, 0x97, 0x20, 0xdb, + 0x23, 0x96, 0x6e, 0xb1, 0xe6, 0x5d, 0x91, 0xe5, 0x1c, 0x0e, 0x6c, 0xde, 0xfb, 0x5e, 0x98, 0x28, + 0xe1, 0x8c, 0x76, 0x44, 0x0f, 0xc1, 0x7e, 0x98, 0x3f, 0xae, 0x6f, 0xde, 0xd3, 0xb8, 0x7e, 0x23, + 0x41, 0xd1, 0xbb, 0x88, 0x4e, 0x6c, 0x8b, 0x12, 0x84, 0x20, 0xd9, 0xb7, 0x07, 0x6e, 0x43, 0xa4, + 0xb0, 0x58, 0x73, 0xfd, 0x33, 0xdd, 0xfd, 0x82, 0x66, 0x0e, 0xfb, 0x26, 0xf7, 0x74, 0xbd, 0xe1, + 0x75, 0x3b, 0xcd, 0x37, 0x91, 0x0a, 0xb0, 0x19, 0x0e, 0x69, 0x52, 0x38, 0x23, 0x08, 0xef, 0xd2, + 0x46, 0x30, 0x89, 0x29, 0xb7, 0x4b, 0x03, 0xa0, 0xfa, 0xbb, 0x04, 0x10, 0xca, 0x08, 0xaa, 0x41, + 0xda, 0x6d, 0x7b, 0xef, 0x53, 0x18, 0x99, 0x76, 0xa1, 0x69, 0x5b, 0xba, 0xe1, 0xd4, 0xcb, 0x9e, + 0xbe, 0x16, 0x04, 0x54, 0x1b, 0xe8, 0x13, 0x46, 0x1c, 0xec, 0x6d, 0xfc, 0x1b, 0x32, 0x73, 0x2b, + 0xaa, 0x15, 0xae, 0xca, 0xa0, 0x79, 0xad, 0x98, 0x57, 0x8a, 0x59, 0x79, 0x4a, 0xfe, 0x05, 0x79, + 0xaa, 0xfe, 0x1f, 0x72, 0xc1, 0x7b, 0x78, 0x25, 0xb8, 0x98, 0x8b, 0x4a, 0x14, 0xb0, 0x58, 0xcf, + 0x4e, 0x7c, 0xc1, 0x9b, 0xf8, 0xaa, 0x0d, 0x69, 0xf7, 0x09, 0xa1, 0x5f, 0x8a, 0x2a, 0xc2, 0x15, + 0x28, 0x04, 0x02, 0xa0, 0x99, 0x54, 0x6c, 0x4e, 0xe0, 0x7c, 0x80, 0xb5, 0xf8, 0x27, 0x07, 0x51, + 0xa6, 0x3b, 0x4c, 0x9b, 0x09, 0x74, 0x6b, 0x2a, 0x0b, 0x4f, 0x2f, 0x8c, 0xae, 0x7e, 0x1b, 0x87, + 0xd2, 0xec, 0xaf, 0x00, 0xf4, 0xc1, 0x8c, 0x90, 0x5c, 0x3d, 0xed, 0xd7, 0xc2, 0xbc, 0x98, 0x5c, + 0x07, 0x64, 0x0a, 0x4c, 0x1b, 0xea, 0xa6, 0x31, 0xde, 0x17, 0x5f, 0x30, 0xaf, 0xcf, 0x64, 0xd7, + 0xb3, 0x21, 0x1c, 0xfc, 0xc3, 0xc5, 0x93, 0xc2, 0xa5, 0x46, 0x34, 0x54, 0x0e, 0x8b, 0x35, 0xc7, + 0xb8, 0xc6, 0x88, 0x2e, 0xca, 0x61, 0xb1, 0xae, 0xee, 0xcf, 0x68, 0x4d, 0x1e, 0x32, 0xdb, 0xed, + 0xfb, 0xed, 0xce, 0xc3, 0xb6, 0x1c, 0xe3, 0x46, 0xa8, 0x27, 0x39, 0x48, 0xf9, 0x1a, 0x52, 0x84, + 0x5c, 0x54, 0x37, 0x10, 0x94, 0xe6, 0xb4, 0x22, 0x0f, 0x99, 0x50, 0x1f, 0xb2, 0x90, 0xf4, 0x34, + 0xa1, 0x00, 0xd9, 0x88, 0x0e, 0xdc, 0x87, 0xb4, 0x7b, 0xf5, 0x7b, 0x68, 0xdb, 0xea, 0x17, 0x12, + 0x64, 0xfd, 0x56, 0x7b, 0x1f, 0x63, 0x70, 0xf2, 0x27, 0xe3, 0x78, 0x83, 0x24, 0xe6, 0x1a, 0xa4, + 0xfa, 0x2c, 0x0d, 0xb9, 0xa0, 0x75, 0xd1, 0x65, 0xc8, 0xf5, 0xed, 0xa9, 0xc5, 0x34, 0xc3, 0x62, + 0xa2, 0xe4, 0xc9, 0xcd, 0x18, 0xce, 0x0a, 0xa8, 0x69, 0x31, 0x74, 0x05, 0xf2, 0xae, 0x7b, 0x38, + 0xb6, 0x75, 0x57, 0xdb, 0xa4, 0xcd, 0x18, 0x06, 0x01, 0x6e, 0x70, 0x0c, 0xc9, 0x90, 0xa0, 0x53, + 0x53, 0xdc, 0x24, 0x61, 0xbe, 0x44, 0xe7, 0x21, 0x4d, 0xfb, 0x23, 0x62, 0xea, 0xa2, 0xb8, 0x0b, + 0xd8, 0xb3, 0xd0, 0xbf, 0xa0, 0xf4, 0x19, 0x71, 0x6c, 0x8d, 0x8d, 0x1c, 0x42, 0x47, 0xf6, 0x78, + 0x20, 0x0a, 0x2d, 0xe1, 0x22, 0x47, 0x7b, 0x3e, 0x88, 0xfe, 0xed, 0x85, 0x85, 0xbc, 0xd2, 0x82, + 0x97, 0x84, 0x0b, 0x1c, 0x5f, 0xf7, 0xb9, 0x5d, 0x03, 0x39, 0x12, 0xe7, 0x12, 0xcc, 0x08, 0x82, + 0x12, 0x2e, 0x05, 0x91, 0x2e, 0xc9, 0x1a, 0x94, 0x2c, 0xb2, 0xa7, 0x33, 0xe3, 0x09, 0xd1, 0xe8, + 0x44, 0xb7, 0xa8, 0x92, 0x3d, 0xfe, 0x9b, 0xa1, 0x3e, 0xed, 0x3f, 0x26, 0xac, 0x3b, 0xd1, 0x2d, + 0x6f, 0x9e, 0x8b, 0xfe, 0x0e, 0x8e, 0x51, 0xf4, 0x1f, 0x38, 0x13, 0x1c, 0x31, 0x20, 0x63, 0xa6, + 0x53, 0x25, 0xb7, 0x9c, 0x58, 0x41, 0x38, 0x38, 0xf9, 0xae, 0x40, 0x67, 0x02, 0x05, 0x37, 0xaa, + 0xc0, 0x72, 0x62, 0x45, 0x0a, 0x03, 0x05, 0x31, 0x2e, 0x86, 0xa5, 0x89, 0x4d, 0x8d, 0x08, 0xa9, + 0xfc, 0xbb, 0x49, 0xf9, 0x3b, 0x02, 0x52, 0xc1, 0x11, 0x1e, 0xa9, 0x82, 0x4b, 0xca, 0x87, 0x43, + 0x52, 0x41, 0xa0, 0x47, 0xaa, 0xe8, 0x92, 0xf2, 0x61, 0x8f, 0xd4, 0x1d, 0x00, 0x87, 0x50, 0xc2, + 0xb4, 0x11, 0xcf, 0x7c, 0x49, 0x88, 0xc0, 0xe5, 0x13, 0x44, 0x6f, 0x15, 0xf3, 0xa8, 0x4d, 0xc3, + 0x62, 0x38, 0xe7, 0xf8, 0xcb, 0xb9, 0xfe, 0x3b, 0x33, 0x2f, 0x50, 0x57, 0xa1, 0xd8, 0x9f, 0x52, + 0x66, 0x9b, 0x9a, 0x68, 0x59, 0xaa, 0xc8, 0x82, 0x47, 0xc1, 0x05, 0x77, 0x04, 0x76, 0x8a, 0x8a, + 0x2d, 0x9c, 0xa2, 0x62, 0xb7, 0x21, 0x17, 0xb0, 0x99, 0x95, 0x88, 0x0c, 0x24, 0x1e, 0x35, 0xba, + 0xb2, 0x84, 0xd2, 0x10, 0x6f, 0x77, 0xe4, 0x78, 0x28, 0x13, 0x89, 0xa5, 0xe4, 0x97, 0xcf, 0x54, + 0xa9, 0x9e, 0x81, 0x94, 0xc8, 0x47, 0xbd, 0x00, 0x10, 0xb6, 0x53, 0xf5, 0x0e, 0x40, 0x98, 0x7b, + 0xde, 0xd1, 0xf6, 0x70, 0x48, 0x89, 0x3b, 0x22, 0x0b, 0xd8, 0xb3, 0x38, 0x3e, 0x26, 0xd6, 0x1e, + 0x1b, 0x89, 0xc9, 0x28, 0x62, 0xcf, 0xba, 0x56, 0x01, 0x08, 0x7f, 0xdf, 0x73, 0x12, 0xb5, 0xad, + 0xa6, 0x1c, 0xe3, 0x42, 0x83, 0xb7, 0x1f, 0x34, 0x64, 0xa9, 0xfe, 0xf1, 0xc1, 0x2b, 0x35, 0xf6, + 0xe2, 0x95, 0x1a, 0x7b, 0xfb, 0x4a, 0x95, 0x3e, 0x3f, 0x52, 0xa5, 0xef, 0x8f, 0x54, 0xe9, 0xf9, + 0x91, 0x2a, 0x1d, 0x1c, 0xa9, 0xd2, 0x2f, 0x47, 0xaa, 0xf4, 0xe6, 0x48, 0x8d, 0xbd, 0x3d, 0x52, + 0xa5, 0xaf, 0x5f, 0xab, 0xb1, 0x83, 0xd7, 0x6a, 0xec, 0xc5, 0x6b, 0x35, 0xf6, 0x49, 0xf0, 0xc7, + 0x74, 0x37, 0x2d, 0xfe, 0x89, 0xde, 0xfc, 0x23, 0x00, 0x00, 0xff, 0xff, 0x75, 0x0f, 0x93, 0x48, + 0xb9, 0x0e, 0x00, 0x00, } func (x SourceEnum) String() string { @@ -1904,6 +1922,9 @@ func (this *Sample) Equal(that interface{}) bool { if this.TimestampMs != that1.TimestampMs { return false } + if this.StartTimestampMs != that1.StartTimestampMs { + return false + } return true } func (this *MetricMetadata) Equal(that interface{}) bool { @@ -2111,6 +2132,9 @@ func (this *Histogram) Equal(that interface{}) bool { return false } } + if this.StartTimestampMs != that1.StartTimestampMs { + return false + } return true } func (this *Histogram_CountInt) Equal(that interface{}) bool { @@ -2407,10 +2431,11 @@ func (this *Sample) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&cortexpb.Sample{") s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -2453,7 +2478,7 @@ func (this *Histogram) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 20) + s := make([]string, 0, 21) s = append(s, "&cortexpb.Histogram{") if this.Count != nil { s = append(s, "Count: "+fmt.Sprintf("%#v", this.Count)+",\n") @@ -2485,6 +2510,7 @@ func (this *Histogram) GoString() string { s = append(s, "ResetHint: "+fmt.Sprintf("%#v", this.ResetHint)+",\n") s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n") s = append(s, "CustomValues: "+fmt.Sprintf("%#v", this.CustomValues)+",\n") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -3157,6 +3183,11 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.StartTimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x18 + } if m.TimestampMs != 0 { i = encodeVarintCortex(dAtA, i, uint64(m.TimestampMs)) i-- @@ -3325,6 +3356,13 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.StartTimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } if len(m.CustomValues) > 0 { for iNdEx := len(m.CustomValues) - 1; iNdEx >= 0; iNdEx-- { f10 := math.Float64bits(float64(m.CustomValues[iNdEx])) @@ -3816,6 +3854,9 @@ func (m *Sample) Size() (n int) { if m.TimestampMs != 0 { n += 1 + sovCortex(uint64(m.TimestampMs)) } + if m.StartTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.StartTimestampMs)) + } return n } @@ -3941,6 +3982,9 @@ func (m *Histogram) Size() (n int) { if len(m.CustomValues) > 0 { n += 2 + sovCortex(uint64(len(m.CustomValues)*8)) + len(m.CustomValues)*8 } + if m.StartTimestampMs != 0 { + n += 2 + sovCortex(uint64(m.StartTimestampMs)) + } return n } @@ -4170,6 +4214,7 @@ func (this *Sample) String() string { s := strings.Join([]string{`&Sample{`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, `}`, }, "") return s @@ -4238,6 +4283,7 @@ func (this *Histogram) String() string { `ResetHint:` + fmt.Sprintf("%v", this.ResetHint) + `,`, `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`, `CustomValues:` + fmt.Sprintf("%v", this.CustomValues) + `,`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, `}`, }, "") return s @@ -5994,6 +6040,25 @@ func (m *Sample) Unmarshal(dAtA []byte) error { break } } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + } + m.StartTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipCortex(dAtA[iNdEx:]) @@ -6948,6 +7013,25 @@ func (m *Histogram) Unmarshal(dAtA []byte) error { } else { return fmt.Errorf("proto: wrong wireType = %d for field CustomValues", wireType) } + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + } + m.StartTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipCortex(dAtA[iNdEx:]) diff --git a/pkg/cortexpb/cortex.proto b/pkg/cortexpb/cortex.proto index 62885116953..a0eecd9fb31 100644 --- a/pkg/cortexpb/cortex.proto +++ b/pkg/cortexpb/cortex.proto @@ -151,6 +151,7 @@ message LabelPair { message Sample { double value = 1; int64 timestamp_ms = 2; + int64 start_timestamp_ms = 3; } message MetricMetadata { @@ -265,6 +266,8 @@ message Histogram { // The last element is not only the upper inclusive bound of the last regular // bucket, but implicitly the lower exclusive bound of the +Inf bucket. repeated double custom_values = 16; + + int64 start_timestamp_ms = 17; } // A BucketSpan defines a number of consecutive buckets with their diff --git a/pkg/cortexpb/timeseriesv2.go b/pkg/cortexpb/timeseriesv2.go index 291ac32789e..ed8d9ba5696 100644 --- a/pkg/cortexpb/timeseriesv2.go +++ b/pkg/cortexpb/timeseriesv2.go @@ -2,10 +2,19 @@ package cortexpb import ( "sync" + + "go.uber.org/atomic" ) +var dynamicSymbolsCapacity atomic.Int64 + +func init() { + dynamicSymbolsCapacity.Store(int64(initialSymbolsCapacity)) +} + var ( - expectedSymbols = 20 + initialSymbolsCapacity = 128 + maxSymbolsCapacity = int64(8192) slicePoolV2 = sync.Pool{ New: func() any { @@ -29,7 +38,7 @@ var ( New: func() any { return &PreallocWriteRequestV2{ WriteRequestV2: WriteRequestV2{ - Symbols: make([]string, 0, expectedSymbols), + Symbols: make([]string, 0, dynamicSymbolsCapacity.Load()), }, } }, @@ -77,6 +86,38 @@ func ReuseWriteRequestV2(req *PreallocWriteRequestV2) { req.data = nil } req.Source = 0 + + // If the underlying array has grown beyond our acceptable maximum capacity, + // we discard this object instead of putting it back into the pool to let GC + // reclaim it. + symbolsCap := int64(cap(req.Symbols)) + if symbolsCap > maxSymbolsCapacity { + if req.Timeseries != nil { + ReuseSliceV2(req.Timeseries) + req.Timeseries = nil + } + return + } + + // Update the dynamic symbol capacity. + for { + current := dynamicSymbolsCapacity.Load() + // We use an EMA to update the capacity. + newAvg := max((current*9+symbolsCap*1)/10, int64(initialSymbolsCapacity)) + + if current == newAvg { + // nothing to change + break + } + + if dynamicSymbolsCapacity.CompareAndSwap(current, newAvg) { + break + } + } + + for i := range req.Symbols { + req.Symbols[i] = "" + } req.Symbols = req.Symbols[:0] if req.Timeseries != nil { ReuseSliceV2(req.Timeseries) @@ -120,6 +161,8 @@ func ReuseTimeseriesV2(ts *TimeSeriesV2) { ts.Metadata.Type = 0 ts.Metadata.UnitRef = 0 ts.Metadata.HelpRef = 0 + // Clear CT + ts.CreatedTimestamp = 0 // clear exemplar label refs for i := range ts.Exemplars { diff --git a/pkg/cortexpb/timeseriesv2_test.go b/pkg/cortexpb/timeseriesv2_test.go index 270a8597111..3f06bd4c092 100644 --- a/pkg/cortexpb/timeseriesv2_test.go +++ b/pkg/cortexpb/timeseriesv2_test.go @@ -42,6 +42,8 @@ func TestTimeseriesV2FromPool(t *testing.T) { ts.Samples = []Sample{{Value: 1, TimestampMs: 2}} ts.Exemplars = []ExemplarV2{{LabelsRefs: []uint32{1, 2}, Value: 1, Timestamp: 2}} ts.Histograms = []Histogram{{}} + ts.CreatedTimestamp = 12345 + ts.Metadata = MetadataV2{Type: 1, HelpRef: 2, UnitRef: 3} ReuseTimeseriesV2(ts) reused := TimeseriesV2FromPool() @@ -49,6 +51,71 @@ func TestTimeseriesV2FromPool(t *testing.T) { assert.Len(t, reused.Samples, 0) assert.Len(t, reused.Exemplars, 0) assert.Len(t, reused.Histograms, 0) + assert.Zero(t, reused.CreatedTimestamp) + assert.Zero(t, reused.Metadata.Type) + assert.Zero(t, reused.Metadata.HelpRef) + assert.Zero(t, reused.Metadata.UnitRef) + }) +} + +func TestReuseWriteRequestV2(t *testing.T) { + t.Run("resets fields to default and cleans backing array", func(t *testing.T) { + req := PreallocWriteRequestV2FromPool() + + // Populate req with some data. + req.Source = RULE + req.Symbols = append(req.Symbols, "", "__name__", "test") + + tsSlice := PreallocTimeseriesV2SliceFromPool() + tsSlice = append(tsSlice, PreallocTimeseriesV2{TimeSeriesV2: TimeseriesV2FromPool()}) + req.Timeseries = tsSlice + + // Capture backing array before reuse + symbolsBackingArray := req.Symbols[:cap(req.Symbols)] + require.Equal(t, "__name__", symbolsBackingArray[1]) + require.Equal(t, "test", symbolsBackingArray[2]) + + // Put the request back into the pool + ReuseWriteRequestV2(req) + + // Verify clearing directly on the backing array + for i, s := range symbolsBackingArray[:3] { + assert.Equalf(t, "", s, "symbol at index %d not cleared", i) + } + + // Source is reset to default + assert.Equal(t, API, req.Source) + // The symbol length is properly reset to 0. + assert.Len(t, req.Symbols, 0) + // Timeseries slice is nil + assert.Nil(t, req.Timeseries) + }) + t.Run("updates dynamic capacity", func(t *testing.T) { + currentCap := dynamicSymbolsCapacity.Load() + newCap := int(currentCap) + 100 // Increase capacity + + req := PreallocWriteRequestV2FromPool() + req.Symbols = make([]string, newCap) + req.Timeseries = PreallocTimeseriesV2SliceFromPool() + + ReuseWriteRequestV2(req) + + // Verify that the dynamic capacity has been updated + expectedCap := max((currentCap*9+int64(newCap))/10, int64(initialSymbolsCapacity)) + assert.Equal(t, expectedCap, dynamicSymbolsCapacity.Load()) + }) + t.Run("outlier capacity does not update dynamic capacity and is discarded", func(t *testing.T) { + currentCap := dynamicSymbolsCapacity.Load() + outlierCap := int(maxSymbolsCapacity) + 100 // Exceeds the max limit + + req := PreallocWriteRequestV2FromPool() + req.Symbols = make([]string, outlierCap) + req.Timeseries = PreallocTimeseriesV2SliceFromPool() + + ReuseWriteRequestV2(req) + + // Verify dynamic capacity didn't increase due to out-of-bound outlier + assert.Equal(t, currentCap, dynamicSymbolsCapacity.Load()) }) } diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 04f62fabbe6..b217a09e9c5 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -171,9 +171,6 @@ type Config struct { // this (and should never use it) but this feature is used by other projects built on top of it SkipLabelNameValidation bool `yaml:"-"` - // This config is dynamically injected because defined in the querier config. - ShuffleShardingLookbackPeriod time.Duration `yaml:"-"` - // ZoneResultsQuorumMetadata enables zone results quorum when querying ingester replication set // with metadata APIs (labels names and values for now). When zone awareness is enabled, only results // from quorum number of zones will be included to reduce data merged and improve performance. diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 2b3b91b0fe3..d8d270aaf3c 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -3388,7 +3388,7 @@ func prepare(tb testing.TB, cfg prepConfig) ([]*Distributor, []*mockIngester, [] if cfg.shuffleShardEnabled { distributorCfg.ShardingStrategy = util.ShardingStrategyShuffle - distributorCfg.ShuffleShardingLookbackPeriod = time.Hour + cfg.limits.ShuffleShardingIngestersLookbackPeriod = model.Duration(time.Hour) cfg.limits.IngestionTenantShardSize = cfg.shuffleShardSize } @@ -4794,3 +4794,147 @@ func TestDistributor_BatchTimeoutMetric(t *testing.T) { cortex_distributor_ingester_push_timeouts_total 5 `), "cortex_distributor_ingester_push_timeouts_total")) } +func TestDistributor_ShuffleShardingIngestersLookbackPeriod(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + lookbackPeriod time.Duration + shardSize int + expectedBehavior string + }{ + "lookback disabled (0) should not use shuffle sharding with lookback": { + lookbackPeriod: 0, + shardSize: 3, + expectedBehavior: "no_lookback", + }, + "lookback 1h should include ingesters from past hour": { + lookbackPeriod: 1 * time.Hour, + shardSize: 3, + expectedBehavior: "with_lookback", + }, + "lookback 2h should include ingesters from past 2 hours": { + lookbackPeriod: 2 * time.Hour, + shardSize: 3, + expectedBehavior: "with_lookback", + }, + "shard size 0 should not use shuffle sharding": { + lookbackPeriod: 1 * time.Hour, + shardSize: 0, + expectedBehavior: "no_shuffle_sharding", + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + // Setup distributor with shuffle sharding enabled + limits := &validation.Limits{} + flagext.DefaultValues(limits) + limits.IngestionTenantShardSize = testData.shardSize + limits.ShuffleShardingIngestersLookbackPeriod = model.Duration(testData.lookbackPeriod) + + numIngesters := 10 + ds, _, _, _ := prepare(t, prepConfig{ + numIngesters: numIngesters, + happyIngesters: numIngesters, + numDistributors: 1, + shardByAllLabels: true, + shuffleShardSize: testData.shardSize, + shuffleShardEnabled: true, + limits: limits, + }) + + ctx := user.InjectOrgID(context.Background(), "test-user") + + // Get ingesters for query + replicationSet, err := ds[0].GetIngestersForQuery(ctx) + require.NoError(t, err) + + switch testData.expectedBehavior { + case "no_lookback": + // When lookback is disabled, should still use shuffle sharding but without lookback + // This means we get the current shard size + if testData.shardSize > 0 { + assert.LessOrEqual(t, len(replicationSet.Instances), testData.shardSize, + "should not exceed shard size when lookback is disabled") + } + + case "with_lookback": + // When lookback is enabled, should use shuffle sharding with lookback + // This means we might get more ingesters than the shard size + assert.GreaterOrEqual(t, len(replicationSet.Instances), testData.shardSize, + "should include at least shard size ingesters with lookback") + + case "no_shuffle_sharding": + // When shard size is 0, shuffle sharding is disabled + // Should query all ingesters + assert.Equal(t, numIngesters, len(replicationSet.Instances), + "should query all ingesters when shuffle sharding is disabled") + } + }) + } +} + +func TestDistributor_ShuffleShardingIngestersLookbackPeriod_Validation(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + queryStoreAfter time.Duration + shuffleShardingIngestersLookbackPeriod time.Duration + shouldBeValid bool + description string + }{ + "valid: lookback >= queryStoreAfter": { + queryStoreAfter: 1 * time.Hour, + shuffleShardingIngestersLookbackPeriod: 2 * time.Hour, + shouldBeValid: true, + description: "lookback period should be >= queryStoreAfter", + }, + "valid: lookback == queryStoreAfter": { + queryStoreAfter: 1 * time.Hour, + shuffleShardingIngestersLookbackPeriod: 1 * time.Hour, + shouldBeValid: true, + description: "lookback period can equal queryStoreAfter", + }, + "invalid: lookback < queryStoreAfter": { + queryStoreAfter: 2 * time.Hour, + shuffleShardingIngestersLookbackPeriod: 1 * time.Hour, + shouldBeValid: false, + description: "lookback period must be >= queryStoreAfter", + }, + "valid: both disabled": { + queryStoreAfter: 0, + shuffleShardingIngestersLookbackPeriod: 0, + shouldBeValid: true, + description: "both can be disabled", + }, + "valid: queryStoreAfter disabled": { + queryStoreAfter: 0, + shuffleShardingIngestersLookbackPeriod: 1 * time.Hour, + shouldBeValid: true, + description: "queryStoreAfter can be disabled while lookback is enabled", + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + limits := &validation.Limits{} + flagext.DefaultValues(limits) + limits.QueryStoreAfter = model.Duration(testData.queryStoreAfter) + limits.ShuffleShardingIngestersLookbackPeriod = model.Duration(testData.shuffleShardingIngestersLookbackPeriod) + + // ValidateQueryLimits requires userID and closeIdleTSDBTimeout + err := limits.ValidateQueryLimits("test-user", 13*time.Hour) + + if testData.shouldBeValid { + assert.NoError(t, err, testData.description) + } else { + assert.Error(t, err, testData.description) + assert.Contains(t, err.Error(), "shuffle_sharding_ingesters_lookback_period", testData.description) + } + }) + } +} diff --git a/pkg/distributor/query.go b/pkg/distributor/query.go index 9835ab1c822..3c858406650 100644 --- a/pkg/distributor/query.go +++ b/pkg/distributor/query.go @@ -92,7 +92,7 @@ func (d *Distributor) GetIngestersForQuery(ctx context.Context, matchers ...*lab // part of the tenant's subring. if d.cfg.ShardingStrategy == util.ShardingStrategyShuffle { shardSize := d.limits.IngestionTenantShardSize(userID) - lookbackPeriod := d.cfg.ShuffleShardingLookbackPeriod + lookbackPeriod := d.limits.ShuffleShardingIngestersLookbackPeriod(userID) if shardSize > 0 && lookbackPeriod > 0 { return d.ingestersRing.ShuffleShardWithLookback(userID, shardSize, lookbackPeriod, time.Now()).GetReplicationSetForOperation(ring.Read) @@ -123,7 +123,7 @@ func (d *Distributor) GetIngestersForMetadata(ctx context.Context) (ring.Replica // part of the tenant's subring. if d.cfg.ShardingStrategy == util.ShardingStrategyShuffle { shardSize := d.limits.IngestionTenantShardSize(userID) - lookbackPeriod := d.cfg.ShuffleShardingLookbackPeriod + lookbackPeriod := d.limits.ShuffleShardingIngestersLookbackPeriod(userID) if shardSize > 0 && lookbackPeriod > 0 { return d.ingestersRing.ShuffleShardWithLookback(userID, shardSize, lookbackPeriod, time.Now()).GetReplicationSetForOperation(ring.Read) diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 59f2abd09ed..cffb9ca332b 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -143,9 +143,6 @@ type Config struct { DistributorShardingStrategy string `yaml:"-"` DistributorShardByAllLabels bool `yaml:"-"` - // Injected at runtime and read from querier config. - QueryIngestersWithin time.Duration `yaml:"-"` - DefaultLimits InstanceLimits `yaml:"instance_limits"` InstanceLimitsFn func() *InstanceLimits `yaml:"-"` @@ -1336,6 +1333,8 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte failedHistogramsCount = 0 succeededExemplarsCount = 0 failedExemplarsCount = 0 + startTimestampSampleAppendFailCount = 0 + startTimestampHistogramAppendFailCount = 0 startAppend = time.Now() sampleOutOfBoundsCount = 0 sampleOutOfOrderCount = 0 @@ -1460,6 +1459,14 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte for _, s := range ts.Samples { var err error + if s.StartTimestampMs != 0 && s.TimestampMs != 0 { + // TODO(SungJin1212): Change to AppendSTZeroSample after update the Prometheus v3.9.0+ + if _, err = app.AppendCTZeroSample(ref, copiedLabels, s.TimestampMs, s.StartTimestampMs); err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { + startTimestampSampleAppendFailCount++ + i.metrics.startTimestampFail.WithLabelValues(sampleMetricTypeFloat).Inc() + } + } + // If the cached reference exists, we try to use it. if ref != 0 { if _, err = app.Append(ref, copiedLabels, s.TimestampMs, s.Value); err == nil { @@ -1506,6 +1513,14 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte h = cortexpb.HistogramProtoToHistogram(hp) } + if hp.StartTimestampMs != 0 && hp.TimestampMs != 0 { + // TODO(SungJin1212): Change to AppendHistogramSTZeroSample after update the Prometheus v3.9.0+ + if _, err = app.AppendHistogramCTZeroSample(ref, copiedLabels, hp.TimestampMs, hp.StartTimestampMs, h, fh); err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { + startTimestampHistogramAppendFailCount++ + i.metrics.startTimestampFail.WithLabelValues(sampleMetricTypeHistogram).Inc() + } + } + if ref != 0 { if _, err = app.AppendHistogram(ref, copiedLabels, hp.TimestampMs, h, fh); err == nil { succeededHistogramsCount++ @@ -1587,6 +1602,15 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte // At this point all samples have been added to the appender, so we can track the time it took. i.TSDBState.appenderAddDuration.Observe(time.Since(startAppend).Seconds()) + if startTimestampSampleAppendFailCount > 0 || startTimestampHistogramAppendFailCount > 0 { + level.Debug(logutil.WithContext(ctx, i.logger)).Log( + "msg", "failed to append start timestamp in push", + "user", userID, + "sample_failures", startTimestampSampleAppendFailCount, + "histogram_failures", startTimestampHistogramAppendFailCount, + ) + } + startCommit := time.Now() if err := app.Commit(); err != nil { return nil, wrapWithUser(err, userID) @@ -1926,7 +1950,7 @@ func (i *Ingester) labelsValuesCommon(ctx context.Context, req *client.LabelValu } defer db.releaseReadLock() - mint, maxt, err := metadataQueryRange(startTimestampMs, endTimestampMs, db, i.cfg.QueryIngestersWithin) + mint, maxt, err := metadataQueryRange(startTimestampMs, endTimestampMs, db, i.limits.QueryIngestersWithin(userID)) if err != nil { return nil, cleanup, err } @@ -2042,7 +2066,7 @@ func (i *Ingester) labelNamesCommon(ctx context.Context, req *client.LabelNamesR } defer db.releaseReadLock() - mint, maxt, err := metadataQueryRange(startTimestampMs, endTimestampMs, db, i.cfg.QueryIngestersWithin) + mint, maxt, err := metadataQueryRange(startTimestampMs, endTimestampMs, db, i.limits.QueryIngestersWithin(userID)) if err != nil { return nil, cleanup, err } @@ -2174,7 +2198,7 @@ func (i *Ingester) metricsForLabelMatchersCommon(ctx context.Context, req *clien return cleanup, err } - mint, maxt, err := metadataQueryRange(req.StartTimestampMs, req.EndTimestampMs, db, i.cfg.QueryIngestersWithin) + mint, maxt, err := metadataQueryRange(req.StartTimestampMs, req.EndTimestampMs, db, i.limits.QueryIngestersWithin(userID)) if err != nil { return cleanup, err } diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 8f09aab9876..5dd0d1ec36b 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -2127,6 +2127,160 @@ func TestIngester_Push(t *testing.T) { } } +func TestIngester_Push_StartTimestamp(t *testing.T) { + tests := []struct { + name string + metricName string + req *cortexpb.WriteRequest + assertFn func(t *testing.T, ts cortexpb.TimeSeries) + }{ + { + name: "sample start timestamp appends zero sample", + metricName: "test_start_timestamp_sample", + req: cortexpb.ToWriteRequest( + []labels.Labels{labels.FromStrings(labels.MetricName, "test_start_timestamp_sample")}, + []cortexpb.Sample{{Value: 42, TimestampMs: 200, StartTimestampMs: 100}}, + nil, + nil, + cortexpb.API, + ), + assertFn: func(t *testing.T, ts cortexpb.TimeSeries) { + require.Len(t, ts.Samples, 2) + assert.Equal(t, int64(100), ts.Samples[0].TimestampMs) + assert.Equal(t, float64(0), ts.Samples[0].Value) + assert.Equal(t, int64(200), ts.Samples[1].TimestampMs) + assert.Equal(t, float64(42), ts.Samples[1].Value) + }, + }, + { + name: "histogram start timestamp appends zero histogram", + metricName: "test_start_timestamp_histogram", + req: func() *cortexpb.WriteRequest { + h := cortexpb.HistogramToHistogramProto(200, tsdbutil.GenerateTestHistogram(1)) + h.StartTimestampMs = 100 + return cortexpb.ToWriteRequest( + []labels.Labels{labels.FromStrings(labels.MetricName, "test_start_timestamp_histogram")}, + nil, + nil, + []cortexpb.Histogram{h}, + cortexpb.API, + ) + }(), + assertFn: func(t *testing.T, ts cortexpb.TimeSeries) { + require.Len(t, ts.Histograms, 2) + assert.Equal(t, int64(100), ts.Histograms[0].TimestampMs) + assert.Equal(t, int64(200), ts.Histograms[1].TimestampMs) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + cfg := defaultIngesterTestConfig(t) + cfg.LifecyclerConfig.JoinAfter = 0 + + limits := defaultLimitsTestConfig() + limits.EnableNativeHistograms = true + + ing, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, "", prometheus.NewRegistry()) + require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) + defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck + + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { + return ing.lifecycler.GetState() + }) + + ctx := user.InjectOrgID(context.Background(), "test") + _, err = ing.Push(ctx, tc.req) + require.NoError(t, err) + + s := &mockQueryStreamServer{ctx: ctx} + err = ing.QueryStream(&client.QueryRequest{ + StartTimestampMs: math.MinInt64, + EndTimestampMs: math.MaxInt64, + Matchers: []*client.LabelMatcher{{Type: client.EQUAL, Name: labels.MetricName, Value: tc.metricName}}, + }, s) + require.NoError(t, err) + + set, err := seriesSetFromResponseStream(s) + require.NoError(t, err) + + resp, err := client.SeriesSetToQueryResponse(set) + require.NoError(t, err) + require.Len(t, resp.Timeseries, 1) + + ts := resp.Timeseries[0] + tc.assertFn(t, ts) + }) + } +} + +func TestIngester_Push_StartTimestampAppendFailureMetrics(t *testing.T) { + tests := []struct { + name string + req *cortexpb.WriteRequest + expectedType string + unexpectedType string + }{ + { + name: "sample start timestamp append failure increments float metric", + req: cortexpb.ToWriteRequest( + []labels.Labels{labels.FromStrings(labels.MetricName, "test_start_timestamp_failure_sample")}, + []cortexpb.Sample{{Value: 42, TimestampMs: 200, StartTimestampMs: math.MinInt64}}, + nil, + nil, + cortexpb.API, + ), + expectedType: sampleMetricTypeFloat, + unexpectedType: sampleMetricTypeHistogram, + }, + { + name: "histogram start timestamp append failure increments histogram metric", + req: func() *cortexpb.WriteRequest { + h := cortexpb.HistogramToHistogramProto(200, tsdbutil.GenerateTestHistogram(1)) + h.StartTimestampMs = math.MinInt64 + return cortexpb.ToWriteRequest( + []labels.Labels{labels.FromStrings(labels.MetricName, "test_start_timestamp_failure_histogram")}, + nil, + nil, + []cortexpb.Histogram{h}, + cortexpb.API, + ) + }(), + expectedType: sampleMetricTypeHistogram, + unexpectedType: sampleMetricTypeFloat, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + cfg := defaultIngesterTestConfig(t) + cfg.LifecyclerConfig.JoinAfter = 0 + + limits := defaultLimitsTestConfig() + limits.EnableNativeHistograms = true + + registry := prometheus.NewRegistry() + ing, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, nil, "", registry) + require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) + defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck + + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { + return ing.lifecycler.GetState() + }) + + ctx := user.InjectOrgID(context.Background(), "test") + _, err = ing.Push(ctx, tc.req) + require.NoError(t, err) + + require.Equal(t, float64(1), testutil.ToFloat64(ing.metrics.startTimestampFail.WithLabelValues(tc.expectedType))) + require.Equal(t, float64(0), testutil.ToFloat64(ing.metrics.startTimestampFail.WithLabelValues(tc.unexpectedType))) + }) + } +} + // Referred from https://github.com/prometheus/prometheus/blob/v3.9.1/model/histogram/histogram_test.go#L1384. func TestIngester_PushNativeHistogramErrors(t *testing.T) { metricLabelAdapters := []cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test"}} @@ -3712,13 +3866,17 @@ func Test_Ingester_MetricsForLabelMatchers(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { + limits := defaultLimitsTestConfig() + limits.QueryIngestersWithin = model.Duration(testData.queryIngestersWithin) + tenantLimits := newMockTenantLimits(map[string]*validation.Limits{"test": &limits}) + i.limits = validation.NewOverrides(limits, tenantLimits) + req := &client.MetricsForLabelMatchersRequest{ StartTimestampMs: testData.from, EndTimestampMs: testData.to, MatchersSet: testData.matchers, Limit: testData.limit, } - i.cfg.QueryIngestersWithin = testData.queryIngestersWithin res, err := i.MetricsForLabelMatchers(ctx, req) require.NoError(t, err) assert.ElementsMatch(t, testData.expected, res.Metric) @@ -6327,12 +6485,15 @@ func TestExpendedPostingsCacheMatchers(t *testing.T) { cfg.BlocksStorageConfig.TSDB.BlockRanges = []time.Duration{2 * time.Hour} cfg.BlocksStorageConfig.TSDB.PostingsCache.Blocks.Enabled = true cfg.BlocksStorageConfig.TSDB.PostingsCache.Head.Enabled = true - cfg.QueryIngestersWithin = 24 * time.Hour + + limits := defaultLimitsTestConfig() + limits.QueryIngestersWithin = model.Duration(24 * time.Hour) + tenantLimits := newMockTenantLimits(map[string]*validation.Limits{userID: &limits}) ctx := user.InjectOrgID(context.Background(), userID) r := prometheus.NewRegistry() - ing, err := prepareIngesterWithBlocksStorage(t, cfg, r) + ing, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, tenantLimits, "", r) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck diff --git a/pkg/ingester/metrics.go b/pkg/ingester/metrics.go index 773646cb12b..f0fbddd334e 100644 --- a/pkg/ingester/metrics.go +++ b/pkg/ingester/metrics.go @@ -36,6 +36,7 @@ type ingesterMetrics struct { ingestedMetadata prometheus.Counter ingestedSamplesFail prometheus.Counter ingestedHistogramsFail prometheus.Counter + startTimestampFail *prometheus.CounterVec ingestedExemplarsFail prometheus.Counter ingestedMetadataFail prometheus.Counter ingestedHistogramBuckets *prometheus.HistogramVec @@ -123,6 +124,10 @@ func newIngesterMetrics(r prometheus.Registerer, Name: "cortex_ingester_ingested_native_histograms_failures_total", Help: "The total number of native histograms that errored on ingestion.", }), + startTimestampFail: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_ingester_start_timestamp_append_failures_total", + Help: "Total number of failed appends for samples and histograms with a start timestamp.", + }, []string{"type"}), ingestedExemplarsFail: promauto.With(r).NewCounter(prometheus.CounterOpts{ Name: "cortex_ingester_ingested_exemplars_failures_total", Help: "The total number of exemplars that errored on ingestion.", @@ -355,6 +360,9 @@ func newIngesterMetrics(r prometheus.Registerer, }, []string{"user"}) } + m.startTimestampFail.WithLabelValues(sampleMetricTypeFloat) + m.startTimestampFail.WithLabelValues(sampleMetricTypeHistogram) + return m } diff --git a/pkg/ingester/metrics_test.go b/pkg/ingester/metrics_test.go index 011e530c896..cc1c85929cf 100644 --- a/pkg/ingester/metrics_test.go +++ b/pkg/ingester/metrics_test.go @@ -151,6 +151,10 @@ func TestIngesterMetrics(t *testing.T) { # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. # TYPE cortex_ingester_ingested_samples_total counter cortex_ingester_ingested_samples_total 0 + # HELP cortex_ingester_start_timestamp_append_failures_total Total number of failed appends for samples and histograms with a start timestamp. + # TYPE cortex_ingester_start_timestamp_append_failures_total counter + cortex_ingester_start_timestamp_append_failures_total{type="float"} 0 + cortex_ingester_start_timestamp_append_failures_total{type="histogram"} 0 # HELP cortex_ingester_ingested_native_histograms_total The total number of native histograms ingested. # TYPE cortex_ingester_ingested_native_histograms_total counter cortex_ingester_ingested_native_histograms_total 0 diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index b3e336a940a..b755f643a2c 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -104,6 +104,7 @@ type BlocksStoreLimits interface { MaxChunksPerQueryFromStore(userID string) int StoreGatewayTenantShardSize(userID string) float64 + QueryStoreAfter(userID string) time.Duration } type blocksStoreQueryableMetrics struct { @@ -133,13 +134,12 @@ func newBlocksStoreQueryableMetrics(reg prometheus.Registerer) *blocksStoreQuery type BlocksStoreQueryable struct { services.Service - stores BlocksStoreSet - finder BlocksFinder - consistency *BlocksConsistencyChecker - logger log.Logger - queryStoreAfter time.Duration - metrics *blocksStoreQueryableMetrics - limits BlocksStoreLimits + stores BlocksStoreSet + finder BlocksFinder + consistency *BlocksConsistencyChecker + logger log.Logger + metrics *blocksStoreQueryableMetrics + limits BlocksStoreLimits storeGatewayQueryStatsEnabled bool storeGatewayConsistencyCheckMaxAttempts int @@ -168,7 +168,6 @@ func NewBlocksStoreQueryable( stores: stores, finder: finder, consistency: consistency, - queryStoreAfter: config.QueryStoreAfter, logger: logger, subservices: manager, subservicesWatcher: services.NewFailureWatcher(), @@ -305,7 +304,6 @@ func (q *BlocksStoreQueryable) Querier(mint, maxt int64) (storage.Querier, error limits: q.limits, consistency: q.consistency, logger: q.logger, - queryStoreAfter: q.queryStoreAfter, storeGatewayQueryStatsEnabled: q.storeGatewayQueryStatsEnabled, storeGatewayConsistencyCheckMaxAttempts: q.storeGatewayConsistencyCheckMaxAttempts, storeGatewaySeriesBatchSize: q.storeGatewaySeriesBatchSize, @@ -321,10 +319,6 @@ type blocksStoreQuerier struct { limits BlocksStoreLimits logger log.Logger - // If set, the querier manipulates the max time to not be greater than - // "now - queryStoreAfter" so that most recent blocks are not queried. - queryStoreAfter time.Duration - // If enabled, query stats of store gateway requests will be logged // using `info` level. storeGatewayQueryStatsEnabled bool @@ -492,14 +486,15 @@ func (q *blocksStoreQuerier) selectSorted(ctx context.Context, sp *storage.Selec func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logger log.Logger, minT, maxT int64, matchers []*labels.Matcher, userID string, queryFunc func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error, error)) error { + queryStoreAfter := q.limits.QueryStoreAfter(userID) // If queryStoreAfter is enabled, we do manipulate the query maxt to query samples up until // now - queryStoreAfter, because the most recent time range is covered by ingesters. This // optimization is particularly important for the blocks storage because can be used to skip // querying most recent not-compacted-yet blocks from the storage. - if q.queryStoreAfter > 0 { + if queryStoreAfter > 0 { now := time.Now() origMaxT := maxT - maxT = min(maxT, util.TimeToMillis(now.Add(-q.queryStoreAfter))) + maxT = min(maxT, util.TimeToMillis(now.Add(-queryStoreAfter))) if origMaxT != maxT { level.Debug(logger).Log("msg", "the max time of the query to blocks storage has been manipulated", "original", origMaxT, "updated", maxT) diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 26a2c2fb4ac..f81831f6385 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -2424,15 +2424,14 @@ func TestBlocksStoreQuerier_SelectSortedShouldHonorQueryStoreAfter(t *testing.T) finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything, mock.Anything).Return(bucketindex.Blocks(nil), map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) q := &blocksStoreQuerier{ - minT: testData.queryMinT, - maxT: testData.queryMaxT, - finder: finder, - stores: &blocksStoreSetMock{}, - consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), - logger: log.NewNopLogger(), - metrics: newBlocksStoreQueryableMetrics(nil), - limits: &blocksStoreLimitsMock{}, - queryStoreAfter: testData.queryStoreAfter, + minT: testData.queryMinT, + maxT: testData.queryMaxT, + finder: finder, + stores: &blocksStoreSetMock{}, + consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), + logger: log.NewNopLogger(), + metrics: newBlocksStoreQueryableMetrics(nil), + limits: &blocksStoreLimitsMock{queryStoreAfter: testData.queryStoreAfter}, } sp := &storage.SelectHints{ @@ -2448,7 +2447,7 @@ func TestBlocksStoreQuerier_SelectSortedShouldHonorQueryStoreAfter(t *testing.T) } else { require.Len(t, finder.Calls, 1) assert.Equal(t, testData.expectedMinT, finder.Calls[0].Arguments.Get(2)) - assert.InDelta(t, testData.expectedMaxT, finder.Calls[0].Arguments.Get(3), float64(5*time.Second.Milliseconds())) + assert.InDelta(t, testData.expectedMaxT, finder.Calls[0].Arguments.Get(3), float64(15*time.Second.Milliseconds())) } }) } @@ -2550,7 +2549,6 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { // Instance the querier that will be executed to run the query. cfg := Config{ - QueryStoreAfter: 0, StoreGatewayQueryStatsEnabled: false, StoreGatewayConsistencyCheckMaxAttempts: 3, } @@ -2718,6 +2716,7 @@ func (m *storeGatewaySeriesClientMock) Recv() (*storepb.SeriesResponse, error) { type blocksStoreLimitsMock struct { maxChunksPerQuery int storeGatewayTenantShardSize float64 + queryStoreAfter time.Duration } func (m *blocksStoreLimitsMock) MaxChunksPerQueryFromStore(_ string) int { @@ -2728,6 +2727,10 @@ func (m *blocksStoreLimitsMock) StoreGatewayTenantShardSize(_ string) float64 { return m.storeGatewayTenantShardSize } +func (m *blocksStoreLimitsMock) QueryStoreAfter(_ string) time.Duration { + return m.queryStoreAfter +} + func (m *blocksStoreLimitsMock) S3SSEType(_ string) string { return "" } @@ -2989,3 +2992,204 @@ func createAggrChunk(t *testing.T, step time.Duration, from model.Time, points i }, } } + +func TestBlocksStoreQuerier_MultiTenantQueryStoreAfter(t *testing.T) { + t.Parallel() + + now := time.Now() + + tests := map[string]struct { + queryStoreAfter time.Duration + queryMinT int64 + queryMaxT int64 + expectedMinT int64 + expectedMaxT int64 + description string + }{ + "30m cutoff: should manipulate recent query": { + queryStoreAfter: 30 * time.Minute, + queryMinT: util.TimeToMillis(now.Add(-2 * time.Hour)), + queryMaxT: util.TimeToMillis(now), + expectedMinT: util.TimeToMillis(now.Add(-2 * time.Hour)), + expectedMaxT: util.TimeToMillis(now.Add(-30 * time.Minute)), + description: "tenant with 30m cutoff should query blocks up to 30m ago", + }, + "2h cutoff: should manipulate recent query": { + queryStoreAfter: 2 * time.Hour, + queryMinT: util.TimeToMillis(now.Add(-5 * time.Hour)), + queryMaxT: util.TimeToMillis(now), + expectedMinT: util.TimeToMillis(now.Add(-5 * time.Hour)), + expectedMaxT: util.TimeToMillis(now.Add(-2 * time.Hour)), + description: "tenant with 2h cutoff should query blocks up to 2h ago", + }, + "disabled: should not manipulate time range": { + queryStoreAfter: 0, + queryMinT: util.TimeToMillis(now.Add(-5 * time.Hour)), + queryMaxT: util.TimeToMillis(now), + expectedMinT: util.TimeToMillis(now.Add(-5 * time.Hour)), + expectedMaxT: util.TimeToMillis(now), + description: "disabled queryStoreAfter should not manipulate time range", + }, + "1h cutoff: query already old should not be manipulated": { + queryStoreAfter: 1 * time.Hour, + queryMinT: util.TimeToMillis(now.Add(-3 * time.Hour)), + queryMaxT: util.TimeToMillis(now.Add(-2 * time.Hour)), + expectedMinT: util.TimeToMillis(now.Add(-3 * time.Hour)), + expectedMaxT: util.TimeToMillis(now.Add(-2 * time.Hour)), + description: "query already older than cutoff should not be manipulated", + }, + "2h cutoff: recent query should be skipped": { + queryStoreAfter: 2 * time.Hour, + queryMinT: util.TimeToMillis(now.Add(-1 * time.Hour)), + queryMaxT: util.TimeToMillis(now), + expectedMinT: 0, + expectedMaxT: 0, + description: "query entirely within cutoff period should be skipped", + }, + "1h cutoff: partial overlap should manipulate": { + queryStoreAfter: 1 * time.Hour, + queryMinT: util.TimeToMillis(now.Add(-90 * time.Minute)), + queryMaxT: util.TimeToMillis(now.Add(-30 * time.Minute)), + expectedMinT: util.TimeToMillis(now.Add(-90 * time.Minute)), + expectedMaxT: util.TimeToMillis(now.Add(-60 * time.Minute)), + description: "query partially overlapping cutoff should be manipulated", + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + ctx := user.InjectOrgID(context.Background(), "test-tenant") + finder := &blocksFinderMock{} + finder.On("GetBlocks", mock.Anything, "test-tenant", mock.Anything, mock.Anything, mock.Anything).Return(bucketindex.Blocks(nil), map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) + + q := &blocksStoreQuerier{ + minT: testData.queryMinT, + maxT: testData.queryMaxT, + finder: finder, + stores: &blocksStoreSetMock{}, + consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), + logger: log.NewNopLogger(), + metrics: newBlocksStoreQueryableMetrics(nil), + limits: &blocksStoreLimitsMock{queryStoreAfter: testData.queryStoreAfter}, + } + + sp := &storage.SelectHints{ + Start: testData.queryMinT, + End: testData.queryMaxT, + } + + set := q.selectSorted(ctx, sp) + require.NoError(t, set.Err()) + + if testData.expectedMinT == 0 && testData.expectedMaxT == 0 { + assert.Len(t, finder.Calls, 0, testData.description) + } else { + require.Len(t, finder.Calls, 1, testData.description) + assert.Equal(t, testData.expectedMinT, finder.Calls[0].Arguments.Get(2), testData.description) + // Allow 15 seconds of time drift to account for CI environment delays. + // The actual code calls time.Now() when manipulating query time ranges, + // which can differ from the test's captured 'now' value. + assert.InDelta(t, testData.expectedMaxT, finder.Calls[0].Arguments.Get(3), float64(15*time.Second.Milliseconds()), testData.description) + } + }) + } +} + +func TestBlocksStoreQuerier_QueryStoreAfterBoundary(t *testing.T) { + t.Parallel() + + now := time.Now() + cutoff := 1 * time.Hour + + tests := map[string]struct { + queryMinT int64 + queryMaxT int64 + expectedMinT int64 + expectedMaxT int64 + shouldSkip bool + description string + }{ + "maxT exactly at cutoff boundary": { + queryMinT: util.TimeToMillis(now.Add(-2 * cutoff)), + queryMaxT: util.TimeToMillis(now.Add(-cutoff)), + expectedMinT: util.TimeToMillis(now.Add(-2 * cutoff)), + expectedMaxT: util.TimeToMillis(now.Add(-cutoff)), + shouldSkip: false, + description: "should not manipulate when maxT is exactly at boundary", + }, + "maxT 1ms before cutoff boundary": { + queryMinT: util.TimeToMillis(now.Add(-2 * cutoff)), + queryMaxT: util.TimeToMillis(now.Add(-cutoff - time.Millisecond)), + expectedMinT: util.TimeToMillis(now.Add(-2 * cutoff)), + expectedMaxT: util.TimeToMillis(now.Add(-cutoff - time.Millisecond)), + shouldSkip: false, + description: "should not manipulate when maxT is before boundary", + }, + "maxT 1ms after cutoff boundary": { + queryMinT: util.TimeToMillis(now.Add(-2 * cutoff)), + queryMaxT: util.TimeToMillis(now.Add(-cutoff + time.Millisecond)), + expectedMinT: util.TimeToMillis(now.Add(-2 * cutoff)), + expectedMaxT: util.TimeToMillis(now.Add(-cutoff)), + shouldSkip: false, + description: "should manipulate when maxT is 1ms after boundary", + }, + "minT 1ms before cutoff boundary": { + queryMinT: util.TimeToMillis(now.Add(-cutoff - time.Millisecond)), + queryMaxT: util.TimeToMillis(now), + expectedMinT: util.TimeToMillis(now.Add(-cutoff - time.Millisecond)), + expectedMaxT: util.TimeToMillis(now.Add(-cutoff)), + shouldSkip: false, + description: "should manipulate when minT is before boundary", + }, + "minT well after cutoff boundary": { + queryMinT: util.TimeToMillis(now.Add(-30 * time.Minute)), + queryMaxT: util.TimeToMillis(now), + expectedMinT: 0, + expectedMaxT: 0, + shouldSkip: true, + description: "should skip query when minT is well after boundary", + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + ctx := user.InjectOrgID(context.Background(), "test") + finder := &blocksFinderMock{} + finder.On("GetBlocks", mock.Anything, "test", mock.Anything, mock.Anything, mock.Anything).Return(bucketindex.Blocks(nil), map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) + + q := &blocksStoreQuerier{ + minT: testData.queryMinT, + maxT: testData.queryMaxT, + finder: finder, + stores: &blocksStoreSetMock{}, + consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), + logger: log.NewNopLogger(), + metrics: newBlocksStoreQueryableMetrics(nil), + limits: &blocksStoreLimitsMock{queryStoreAfter: cutoff}, + } + + sp := &storage.SelectHints{ + Start: testData.queryMinT, + End: testData.queryMaxT, + } + + set := q.selectSorted(ctx, sp) + require.NoError(t, set.Err()) + + if testData.shouldSkip { + assert.Len(t, finder.Calls, 0, testData.description) + } else { + require.Len(t, finder.Calls, 1, testData.description) + assert.Equal(t, testData.expectedMinT, finder.Calls[0].Arguments.Get(2), testData.description) + // Allow 15 seconds of time drift to account for CI environment delays. + // The actual code calls time.Now() when manipulating query time ranges, + // which can differ from the test's captured 'now' value. + assert.InDelta(t, testData.expectedMaxT, finder.Calls[0].Arguments.Get(3), float64(15*time.Second.Milliseconds()), testData.description) + } + }) + } +} diff --git a/pkg/querier/distributor_queryable.go b/pkg/querier/distributor_queryable.go index 8d82cd78878..1dfc80d32db 100644 --- a/pkg/querier/distributor_queryable.go +++ b/pkg/querier/distributor_queryable.go @@ -23,6 +23,7 @@ import ( "github.com/cortexproject/cortex/pkg/util/chunkcompat" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/users" + "github.com/cortexproject/cortex/pkg/util/validation" ) const retryMinBackoff = time.Millisecond @@ -42,15 +43,15 @@ type Distributor interface { MetricsMetadata(ctx context.Context, req *client.MetricsMetadataRequest) ([]scrape.MetricMetadata, error) } -func newDistributorQueryable(distributor Distributor, streamingMetdata bool, labelNamesWithMatchers bool, iteratorFn chunkIteratorFunc, queryIngestersWithin time.Duration, isPartialDataEnabled partialdata.IsCfgEnabledFunc, ingesterQueryMaxAttempts int) QueryableWithFilter { +func newDistributorQueryable(distributor Distributor, streamingMetdata bool, labelNamesWithMatchers bool, iteratorFn chunkIteratorFunc, isPartialDataEnabled partialdata.IsCfgEnabledFunc, ingesterQueryMaxAttempts int, limits *validation.Overrides) QueryableWithFilter { return distributorQueryable{ distributor: distributor, streamingMetdata: streamingMetdata, labelNamesWithMatchers: labelNamesWithMatchers, iteratorFn: iteratorFn, - queryIngestersWithin: queryIngestersWithin, isPartialDataEnabled: isPartialDataEnabled, ingesterQueryMaxAttempts: ingesterQueryMaxAttempts, + limits: limits, } } @@ -59,9 +60,9 @@ type distributorQueryable struct { streamingMetdata bool labelNamesWithMatchers bool iteratorFn chunkIteratorFunc - queryIngestersWithin time.Duration isPartialDataEnabled partialdata.IsCfgEnabledFunc ingesterQueryMaxAttempts int + limits *validation.Overrides } func (d distributorQueryable) Querier(mint, maxt int64) (storage.Querier, error) { @@ -72,15 +73,15 @@ func (d distributorQueryable) Querier(mint, maxt int64) (storage.Querier, error) streamingMetadata: d.streamingMetdata, labelNamesMatchers: d.labelNamesWithMatchers, chunkIterFn: d.iteratorFn, - queryIngestersWithin: d.queryIngestersWithin, isPartialDataEnabled: d.isPartialDataEnabled, ingesterQueryMaxAttempts: d.ingesterQueryMaxAttempts, + limits: d.limits, }, nil } - -func (d distributorQueryable) UseQueryable(now time.Time, _, queryMaxT int64) bool { +func (d distributorQueryable) UseQueryable(now time.Time, userID string, _, queryMaxT int64) bool { // Include ingester only if maxt is within QueryIngestersWithin w.r.t. current time. - return d.queryIngestersWithin == 0 || queryMaxT >= util.TimeToMillis(now.Add(-d.queryIngestersWithin)) + queryIngestersWithin := d.limits.QueryIngestersWithin(userID) + return queryIngestersWithin == 0 || queryMaxT >= util.TimeToMillis(now.Add(-queryIngestersWithin)) } type distributorQuerier struct { @@ -89,9 +90,9 @@ type distributorQuerier struct { streamingMetadata bool labelNamesMatchers bool chunkIterFn chunkIteratorFunc - queryIngestersWithin time.Duration isPartialDataEnabled partialdata.IsCfgEnabledFunc ingesterQueryMaxAttempts int + limits *validation.Overrides } // Select implements storage.Querier interface. @@ -104,15 +105,20 @@ func (q *distributorQuerier) Select(ctx context.Context, sortSeries bool, sp *st if sp != nil { minT, maxT = sp.Start, sp.End } + userID, err := users.TenantID(ctx) + if err != nil { + return storage.ErrSeriesSet(err) + } + queryIngestersWithin := q.limits.QueryIngestersWithin(userID) // We should manipulate the query mint to query samples up until // now - queryIngestersWithin, because older time ranges are covered by the storage. This // optimization is particularly important for the blocks storage where the blocks retention in the // ingesters could be way higher than queryIngestersWithin. - if q.queryIngestersWithin > 0 { + if queryIngestersWithin > 0 { now := time.Now() origMinT := minT - minT = max(minT, util.TimeToMillis(now.Add(-q.queryIngestersWithin))) + minT = max(minT, util.TimeToMillis(now.Add(-queryIngestersWithin))) if origMinT != minT { level.Debug(log).Log("msg", "the min time of the query to ingesters has been manipulated", "original", origMinT, "updated", minT) diff --git a/pkg/querier/distributor_queryable_test.go b/pkg/querier/distributor_queryable_test.go index 7ec3f583a1c..810185044d9 100644 --- a/pkg/querier/distributor_queryable_test.go +++ b/pkg/querier/distributor_queryable_test.go @@ -91,13 +91,15 @@ func TestDistributorQuerier_SelectShouldHonorQueryIngestersWithin(t *testing.T) distributor.On("MetricsForLabelMatchersStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]labels.Labels{}, nil) ctx := user.InjectOrgID(context.Background(), "test") - queryable := newDistributorQueryable(distributor, streamingMetadataEnabled, true, nil, testData.queryIngestersWithin, nil, 1) - querier, err := queryable.Querier(testData.queryMinT, testData.queryMaxT) - require.NoError(t, err) limits := DefaultLimitsConfig() + limits.QueryIngestersWithin = model.Duration(testData.queryIngestersWithin) overrides := validation.NewOverrides(limits, nil) + queryable := newDistributorQueryable(distributor, streamingMetadataEnabled, true, nil, nil, 1, overrides) + querier, err := queryable.Querier(testData.queryMinT, testData.queryMaxT) + require.NoError(t, err) + start, end, err := validateQueryTimeRange(ctx, "test", testData.queryMinT, testData.queryMaxT, overrides, 0) require.NoError(t, err) // Select hints are passed by Prometheus when querying /series. @@ -117,7 +119,7 @@ func TestDistributorQuerier_SelectShouldHonorQueryIngestersWithin(t *testing.T) assert.Len(t, distributor.Calls, 0) } else { require.Len(t, distributor.Calls, 1) - assert.InDelta(t, testData.expectedMinT, int64(distributor.Calls[0].Arguments.Get(1).(model.Time)), float64(5*time.Second.Milliseconds())) + assert.InDelta(t, testData.expectedMinT, int64(distributor.Calls[0].Arguments.Get(1).(model.Time)), float64(15*time.Second.Milliseconds())) assert.Equal(t, testData.expectedMaxT, int64(distributor.Calls[0].Arguments.Get(2).(model.Time))) } }) @@ -129,18 +131,23 @@ func TestDistributorQueryableFilter(t *testing.T) { t.Parallel() d := &MockDistributor{} - dq := newDistributorQueryable(d, false, true, nil, 1*time.Hour, nil, 1) + + limits := DefaultLimitsConfig() + limits.QueryIngestersWithin = model.Duration(1 * time.Hour) + overrides := validation.NewOverrides(limits, nil) + + dq := newDistributorQueryable(d, false, true, nil, nil, 1, overrides) now := time.Now() queryMinT := util.TimeToMillis(now.Add(-5 * time.Minute)) queryMaxT := util.TimeToMillis(now) - require.True(t, dq.UseQueryable(now, queryMinT, queryMaxT)) - require.True(t, dq.UseQueryable(now.Add(time.Hour), queryMinT, queryMaxT)) + require.True(t, dq.UseQueryable(now, "test", queryMinT, queryMaxT)) + require.True(t, dq.UseQueryable(now.Add(time.Hour), "test", queryMinT, queryMaxT)) // Same query, hour+1ms later, is not sent to ingesters. - require.False(t, dq.UseQueryable(now.Add(time.Hour).Add(1*time.Millisecond), queryMinT, queryMaxT)) + require.False(t, dq.UseQueryable(now.Add(time.Hour).Add(1*time.Millisecond), "test", queryMinT, queryMaxT)) } func TestIngesterStreaming(t *testing.T) { @@ -179,9 +186,13 @@ func TestIngesterStreaming(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "0") - queryable := newDistributorQueryable(d, true, true, batch.NewChunkMergeIterator, 0, func(string) bool { + limits := DefaultLimitsConfig() + limits.QueryIngestersWithin = model.Duration(0) // Disable time filtering for this test + overrides := validation.NewOverrides(limits, nil) + + queryable := newDistributorQueryable(d, true, true, batch.NewChunkMergeIterator, func(string) bool { return partialDataEnabled - }, 1) + }, 1, overrides) querier, err := queryable.Querier(mint, maxt) require.NoError(t, err) @@ -345,9 +356,14 @@ func TestDistributorQuerier_Retry(t *testing.T) { } ingesterQueryMaxAttempts := 3 - queryable := newDistributorQueryable(d, true, true, batch.NewChunkMergeIterator, 0, func(string) bool { + + limits := DefaultLimitsConfig() + limits.QueryIngestersWithin = model.Duration(0) + overrides := validation.NewOverrides(limits, nil) + + queryable := newDistributorQueryable(d, true, true, batch.NewChunkMergeIterator, func(string) bool { return true - }, ingesterQueryMaxAttempts) + }, ingesterQueryMaxAttempts, overrides) querier, err := queryable.Querier(mint, maxt) require.NoError(t, err) @@ -401,9 +417,11 @@ func TestDistributorQuerier_Select_CancelledContext_NoRetry(t *testing.T) { d.On("QueryStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&client.QueryStreamResponse{}, context.Canceled) ingesterQueryMaxAttempts := 1 - queryable := newDistributorQueryable(d, true, true, batch.NewChunkMergeIterator, 0, func(string) bool { + limits := DefaultLimitsConfig() + overrides := validation.NewOverrides(limits, nil) + queryable := newDistributorQueryable(d, true, true, batch.NewChunkMergeIterator, func(string) bool { return true - }, ingesterQueryMaxAttempts) + }, ingesterQueryMaxAttempts, overrides) querier, err := queryable.Querier(mint, maxt) require.NoError(t, err) @@ -433,9 +451,11 @@ func TestDistributorQuerier_Select_CancelledContext(t *testing.T) { // because the context is already cancelled. ingesterQueryMaxAttempts := 2 - queryable := newDistributorQueryable(d, true, true, batch.NewChunkMergeIterator, 0, func(string) bool { + limits := DefaultLimitsConfig() + overrides := validation.NewOverrides(limits, nil) + queryable := newDistributorQueryable(d, true, true, batch.NewChunkMergeIterator, func(string) bool { return true - }, ingesterQueryMaxAttempts) + }, ingesterQueryMaxAttempts, overrides) querier, err := queryable.Querier(mint, maxt) require.NoError(t, err) @@ -456,9 +476,11 @@ func TestDistributorQuerier_Labels_CancelledContext(t *testing.T) { d := &MockDistributor{} ingesterQueryMaxAttempts := 2 - queryable := newDistributorQueryable(d, true, true, batch.NewChunkMergeIterator, 0, func(string) bool { + limits := DefaultLimitsConfig() + overrides := validation.NewOverrides(limits, nil) + queryable := newDistributorQueryable(d, true, true, batch.NewChunkMergeIterator, func(string) bool { return true - }, ingesterQueryMaxAttempts) + }, ingesterQueryMaxAttempts, overrides) querier, err := queryable.Querier(mint, maxt) require.NoError(t, err) @@ -510,9 +532,12 @@ func TestDistributorQuerier_LabelNames(t *testing.T) { Return(metrics, partialDataErr) } - queryable := newDistributorQueryable(d, streamingEnabled, labelNamesWithMatchers, nil, 0, func(string) bool { + limits := DefaultLimitsConfig() + overrides := validation.NewOverrides(limits, nil) + + queryable := newDistributorQueryable(d, streamingEnabled, labelNamesWithMatchers, nil, func(string) bool { return partialDataEnabled - }, 1) + }, 1, overrides) querier, err := queryable.Querier(mint, maxt) require.NoError(t, err) @@ -530,3 +555,90 @@ func TestDistributorQuerier_LabelNames(t *testing.T) { } } } +func TestDistributorQuerier_QueryIngestersWithinBoundary(t *testing.T) { + t.Parallel() + + now := time.Now() + lookback := 1 * time.Hour + + tests := map[string]struct { + queryMinT int64 + queryMaxT int64 + expectedMinT int64 + expectedMaxT int64 + description string + }{ + "query exactly at lookback boundary": { + queryMinT: util.TimeToMillis(now.Add(-lookback)), + queryMaxT: util.TimeToMillis(now), + expectedMinT: util.TimeToMillis(now.Add(-lookback)), + expectedMaxT: util.TimeToMillis(now), + description: "should not manipulate when minT is exactly at boundary", + }, + "query 1ms before lookback boundary": { + queryMinT: util.TimeToMillis(now.Add(-lookback - time.Millisecond)), + queryMaxT: util.TimeToMillis(now), + expectedMinT: util.TimeToMillis(now.Add(-lookback)), + expectedMaxT: util.TimeToMillis(now), + description: "should manipulate when minT is 1ms before boundary", + }, + "query 1ms after lookback boundary": { + queryMinT: util.TimeToMillis(now.Add(-lookback + time.Millisecond)), + queryMaxT: util.TimeToMillis(now), + expectedMinT: util.TimeToMillis(now.Add(-lookback + time.Millisecond)), + expectedMaxT: util.TimeToMillis(now), + description: "should not manipulate when minT is 1ms after boundary", + }, + "maxT well before lookback boundary": { + queryMinT: util.TimeToMillis(now.Add(-2 * lookback)), + queryMaxT: util.TimeToMillis(now.Add(-lookback - 10*time.Second)), + expectedMinT: 0, + expectedMaxT: 0, + description: "should skip query when maxT is well before boundary", + }, + "maxT 1ms before lookback boundary": { + queryMinT: util.TimeToMillis(now.Add(-2 * lookback)), + queryMaxT: util.TimeToMillis(now.Add(-lookback - time.Millisecond)), + expectedMinT: 0, + expectedMaxT: 0, + description: "should skip query when maxT is before boundary", + }, + "maxT well after lookback boundary": { + queryMinT: util.TimeToMillis(now.Add(-2 * lookback)), + queryMaxT: util.TimeToMillis(now.Add(-lookback + 10*time.Second)), + expectedMinT: util.TimeToMillis(now.Add(-lookback)), + expectedMaxT: util.TimeToMillis(now.Add(-lookback + 10*time.Second)), + description: "should manipulate when maxT is well after boundary", + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + distributor := &MockDistributor{} + distributor.On("QueryStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&client.QueryStreamResponse{}, nil) + + ctx := user.InjectOrgID(context.Background(), "test") + + limits := DefaultLimitsConfig() + limits.QueryIngestersWithin = model.Duration(lookback) + overrides := validation.NewOverrides(limits, nil) + + queryable := newDistributorQueryable(distributor, false, true, nil, nil, 1, overrides) + querier, err := queryable.Querier(testData.queryMinT, testData.queryMaxT) + require.NoError(t, err) + + seriesSet := querier.Select(ctx, true, nil) + require.NoError(t, seriesSet.Err()) + + if testData.expectedMinT == 0 && testData.expectedMaxT == 0 { + assert.Len(t, distributor.Calls, 0, testData.description) + } else { + require.Len(t, distributor.Calls, 1, testData.description) + assert.InDelta(t, testData.expectedMinT, int64(distributor.Calls[0].Arguments.Get(1).(model.Time)), float64(15*time.Second.Milliseconds()), testData.description) + assert.Equal(t, testData.expectedMaxT, int64(distributor.Calls[0].Arguments.Get(2).(model.Time)), testData.description) + } + }) + } +} diff --git a/pkg/querier/parquet_queryable.go b/pkg/querier/parquet_queryable.go index a8f20560443..559f1f1c533 100644 --- a/pkg/querier/parquet_queryable.go +++ b/pkg/querier/parquet_queryable.go @@ -98,7 +98,6 @@ type parquetQueryableWithFallback struct { services.Service fallbackDisabled bool - queryStoreAfter time.Duration parquetQueryable storage.Queryable cache parquetutil.CacheInterface[parquet_storage.ParquetShard] blockStorageQueryable *BlocksStoreQueryable @@ -280,7 +279,6 @@ func NewParquetQueryable( blockStorageQueryable: blockStorageQueryable, parquetQueryable: parquetQueryable, cache: cache, - queryStoreAfter: config.QueryStoreAfter, subservicesWatcher: services.NewFailureWatcher(), finder: blockStorageQueryable.finder, metrics: newParquetQueryableFallbackMetrics(reg), @@ -338,7 +336,6 @@ func (p *parquetQueryableWithFallback) Querier(mint, maxt int64) (storage.Querie minT: mint, maxT: maxt, parquetQuerier: pq, - queryStoreAfter: p.queryStoreAfter, blocksStoreQuerier: bsq, finder: p.finder, metrics: p.metrics, @@ -358,10 +355,6 @@ type parquetQuerierWithFallback struct { parquetQuerier storage.Querier blocksStoreQuerier storage.Querier - // If set, the querier manipulates the max time to not be greater than - // "now - queryStoreAfter" so that most recent blocks are not queried. - queryStoreAfter time.Duration - // metrics metrics *parquetQueryableFallbackMetrics @@ -503,7 +496,7 @@ func (q *parquetQuerierWithFallback) Select(ctx context.Context, sortSeries bool mint, maxt, limit = hints.Start, hints.End, hints.Limit } - maxt = q.adjustMaxT(maxt) + maxt = q.adjustMaxT(ctx, maxt) hints.End = maxt if maxt < mint { @@ -578,14 +571,19 @@ func (q *parquetQuerierWithFallback) Select(ctx context.Context, sortSeries bool return storage.NewMergeSeriesSet(seriesSets, limit, storage.ChainedSeriesMerge) } -func (q *parquetQuerierWithFallback) adjustMaxT(maxt int64) int64 { +func (q *parquetQuerierWithFallback) adjustMaxT(ctx context.Context, maxt int64) int64 { // If queryStoreAfter is enabled, we do manipulate the query maxt to query samples up until // now - queryStoreAfter, because the most recent time range is covered by ingesters. This // optimization is particularly important for the blocks storage because can be used to skip // querying most recent not-compacted-yet blocks from the storage. - if q.queryStoreAfter > 0 { + userID, err := users.TenantID(ctx) + if err != nil { + return maxt + } + queryStoreAfter := q.limits.QueryStoreAfter(userID) + if queryStoreAfter > 0 { now := time.Now() - maxt = min(maxt, util.TimeToMillis(now.Add(-q.queryStoreAfter))) + maxt = min(maxt, util.TimeToMillis(now.Add(-queryStoreAfter))) } return maxt } @@ -603,7 +601,7 @@ func (q *parquetQuerierWithFallback) getBlocks(ctx context.Context, minT, maxT i return nil, nil, err } - maxT = q.adjustMaxT(maxT) + maxT = q.adjustMaxT(ctx, maxT) if maxT < minT { return nil, nil, nil diff --git a/pkg/querier/parquet_queryable_test.go b/pkg/querier/parquet_queryable_test.go index f3d6897b9d9..417bfdc433c 100644 --- a/pkg/querier/parquet_queryable_test.go +++ b/pkg/querier/parquet_queryable_test.go @@ -15,6 +15,7 @@ import ( "github.com/prometheus-community/parquet-common/convert" "github.com/prometheus-community/parquet-common/schema" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" @@ -104,9 +105,8 @@ func TestParquetQueryableFallbackLogic(t *testing.T) { finder: finder, blocksStoreQuerier: q, parquetQuerier: mParquetQuerier, - queryStoreAfter: time.Hour, metrics: newParquetQueryableFallbackMetrics(prometheus.NewRegistry()), - limits: defaultOverrides(t, 0), + limits: defaultOverridesWithQueryStoreAfter(t, 0, time.Hour), logger: log.NewNopLogger(), defaultBlockStoreType: parquetBlockStore, } @@ -234,9 +234,8 @@ func TestParquetQueryableFallbackLogic(t *testing.T) { finder: finder, blocksStoreQuerier: q, parquetQuerier: mParquetQuerier, - queryStoreAfter: queryStoreAfter, metrics: newParquetQueryableFallbackMetrics(prometheus.NewRegistry()), - limits: defaultOverrides(t, 0), + limits: defaultOverridesWithQueryStoreAfter(t, 0, queryStoreAfter), logger: log.NewNopLogger(), defaultBlockStoreType: parquetBlockStore, } @@ -399,7 +398,6 @@ func TestParquetQueryable_Limits(t *testing.T) { bkt, tempDir := cortex_testutil.PrepareFilesystemBucket(t) config := Config{ - QueryStoreAfter: 0, StoreGatewayQueryStatsEnabled: false, StoreGatewayConsistencyCheckMaxAttempts: 3, ParquetShardCache: parquetutil.CacheConfig{ @@ -590,9 +588,14 @@ func convertBlockToParquet(t *testing.T, ctx context.Context, userBucketClient o } func defaultOverrides(t *testing.T, queryVerticalShardSize int) *validation.Overrides { + return defaultOverridesWithQueryStoreAfter(t, queryVerticalShardSize, 0) +} + +func defaultOverridesWithQueryStoreAfter(t *testing.T, queryVerticalShardSize int, queryStoreAfter time.Duration) *validation.Overrides { limits := validation.Limits{} flagext.DefaultValues(&limits) limits.QueryVerticalShardSize = queryVerticalShardSize + limits.QueryStoreAfter = model.Duration(queryStoreAfter) overrides := validation.NewOverrides(limits, nil) return overrides @@ -815,9 +818,8 @@ func TestSelectProjectionHints(t *testing.T) { finder: finder, blocksStoreQuerier: mockTSDBQuerier, parquetQuerier: mockParquetQuerierInstance, - queryStoreAfter: 0, // Disable queryStoreAfter manipulation metrics: newParquetQueryableFallbackMetrics(prometheus.NewRegistry()), - limits: defaultOverrides(t, 0), + limits: defaultOverridesWithQueryStoreAfter(t, 0, 0), // Disable queryStoreAfter manipulation logger: log.NewNopLogger(), defaultBlockStoreType: parquetBlockStore, fallbackDisabled: false, @@ -1082,14 +1084,14 @@ func TestParquetQueryableFallbackDisabled(t *testing.T) { mParquetQuerier := &mockParquetQuerier{} pq := &parquetQuerierWithFallback{ - minT: minT, - maxT: maxT, - finder: finder, - blocksStoreQuerier: q, - parquetQuerier: mParquetQuerier, - queryStoreAfter: time.Hour, + minT: minT, + maxT: maxT, + finder: finder, + blocksStoreQuerier: q, + parquetQuerier: mParquetQuerier, + metrics: newParquetQueryableFallbackMetrics(prometheus.NewRegistry()), - limits: defaultOverrides(t, 0), + limits: defaultOverridesWithQueryStoreAfter(t, 0, time.Hour), logger: log.NewNopLogger(), defaultBlockStoreType: parquetBlockStore, fallbackDisabled: true, // Disable fallback @@ -1146,7 +1148,6 @@ func TestParquetQueryableFallbackDisabled(t *testing.T) { finder: finder, blocksStoreQuerier: q, parquetQuerier: mParquetQuerier, - queryStoreAfter: time.Hour, metrics: newParquetQueryableFallbackMetrics(prometheus.NewRegistry()), limits: defaultOverrides(t, 0), logger: log.NewNopLogger(), diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 8ebc66a16dc..5205a9eda83 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -45,14 +45,11 @@ type Config struct { IngesterMetadataStreaming bool `yaml:"ingester_metadata_streaming"` IngesterLabelNamesWithMatchers bool `yaml:"ingester_label_names_with_matchers"` MaxSamples int `yaml:"max_samples"` - QueryIngestersWithin time.Duration `yaml:"query_ingesters_within"` EnablePerStepStats bool `yaml:"per_step_stats_enabled"` // Use compression for metrics query API or instant and range query APIs. ResponseCompression string `yaml:"response_compression"` - // QueryStoreAfter the time after which queries should also be sent to the store and not just ingesters. - QueryStoreAfter time.Duration `yaml:"query_store_after"` MaxQueryIntoFuture time.Duration `yaml:"max_query_into_future"` // The default evaluation interval for the promql engine. @@ -86,8 +83,6 @@ type Config struct { // The maximum number of times we attempt fetching data from Ingesters. IngesterQueryMaxAttempts int `yaml:"ingester_query_max_attempts"` - ShuffleShardingIngestersLookbackPeriod time.Duration `yaml:"shuffle_sharding_ingesters_lookback_period"` - ThanosEngine engine.ThanosEngineConfig `yaml:"thanos_engine"` // Ignore max query length check at Querier. @@ -106,14 +101,12 @@ type Config struct { } var ( - errBadLookbackConfigs = errors.New("bad settings, query_store_after >= query_ingesters_within which can result in queries not being sent") - errShuffleShardingLookbackLessThanQueryStoreAfter = errors.New("the shuffle-sharding lookback period should be greater or equal than the configured 'query store after'") - errEmptyTimeRange = errors.New("empty time range") - errUnsupportedResponseCompression = errors.New("unsupported response compression. Supported compression 'gzip', 'snappy', 'zstd' and '' (disable compression)") - errInvalidConsistencyCheckAttempts = errors.New("store gateway consistency check max attempts should be greater or equal than 1") - errInvalidSeriesBatchSize = errors.New("store gateway series batch size should be greater or equal than 0") - errInvalidIngesterQueryMaxAttempts = errors.New("ingester query max attempts should be greater or equal than 1") - errInvalidParquetQueryableDefaultBlockStore = errors.New("unsupported parquet queryable default block store. Supported options are tsdb and parquet") + errEmptyTimeRange = errors.New("empty time range") + errUnsupportedResponseCompression = errors.New("unsupported response compression. Supported compression 'gzip', 'snappy', 'zstd' and '' (disable compression)") + errInvalidConsistencyCheckAttempts = errors.New("store gateway consistency check max attempts should be greater or equal than 1") + errInvalidSeriesBatchSize = errors.New("store gateway series batch size should be greater or equal than 0") + errInvalidIngesterQueryMaxAttempts = errors.New("ingester query max attempts should be greater or equal than 1") + errInvalidParquetQueryableDefaultBlockStore = errors.New("unsupported parquet queryable default block store. Supported options are tsdb and parquet") ) // RegisterFlags adds the flags required to config this to the given FlagSet. @@ -135,12 +128,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.IngesterMetadataStreaming, "querier.ingester-metadata-streaming", true, "Deprecated (This feature will be always on after v1.18): Use streaming RPCs for metadata APIs from ingester.") f.BoolVar(&cfg.IngesterLabelNamesWithMatchers, "querier.ingester-label-names-with-matchers", false, "Use LabelNames ingester RPCs with match params.") f.IntVar(&cfg.MaxSamples, "querier.max-samples", 50e6, "Maximum number of samples a single query can load into memory.") - f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.") f.BoolVar(&cfg.EnablePerStepStats, "querier.per-step-stats-enabled", false, "Enable returning samples stats per steps in query response.") f.StringVar(&cfg.ResponseCompression, "querier.response-compression", "gzip", "Use compression for metrics query API or instant and range query APIs. Supported compression 'gzip', 'snappy', 'zstd' and '' (disable compression)") f.DurationVar(&cfg.MaxQueryIntoFuture, "querier.max-query-into-future", 10*time.Minute, "Maximum duration into the future you can query. 0 to disable.") f.DurationVar(&cfg.DefaultEvaluationInterval, "querier.default-evaluation-interval", time.Minute, "The default evaluation interval or step size for subqueries.") - f.DurationVar(&cfg.QueryStoreAfter, "querier.query-store-after", 0, "The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. When running the blocks storage, if this option is enabled, the time range of the query sent to the store will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") f.StringVar(&cfg.ActiveQueryTrackerDir, "querier.active-query-tracker-dir", "./active-query-tracker", "Active query tracker monitors active queries, and writes them to the file in given directory. If Cortex discovers any queries in this log during startup, it will log them to the log file. Setting to empty value disables active query tracker, which also disables -querier.max-concurrent option.") f.StringVar(&cfg.StoreGatewayAddresses, "querier.store-gateway-addresses", "", "Comma separated list of store-gateway addresses in DNS Service Discovery format. This option should be set when using the blocks storage and the store-gateway sharding is disabled (when enabled, the store-gateway instances form a ring and addresses are picked from the ring).") f.BoolVar(&cfg.StoreGatewayQueryStatsEnabled, "querier.store-gateway-query-stats-enabled", true, "If enabled, store gateway query stats will be logged using `info` log level.") @@ -148,7 +139,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.Int64Var(&cfg.StoreGatewaySeriesBatchSize, "querier.store-gateway-series-batch-size", 1, "[Experimental] The maximum number of series to be batched in a single gRPC response message from Store Gateways. A value of 0 or 1 disables batching.") f.IntVar(&cfg.IngesterQueryMaxAttempts, "querier.ingester-query-max-attempts", 1, "The maximum number of times we attempt fetching data from ingesters for retryable errors (ex. partial data returned).") f.DurationVar(&cfg.LookbackDelta, "querier.lookback-delta", 5*time.Minute, "Time since the last sample after which a time series is considered stale and ignored by expression evaluations.") - f.DurationVar(&cfg.ShuffleShardingIngestersLookbackPeriod, "querier.shuffle-sharding-ingesters-lookback-period", 0, "When distributor's sharding strategy is shuffle-sharding and this setting is > 0, queriers fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since 'now - lookback period'. The lookback period should be greater or equal than the configured 'query store after' and 'query ingesters within'. If this setting is 0, queriers always query all ingesters (ingesters shuffle sharding on read path is disabled).") f.Int64Var(&cfg.MaxSubQuerySteps, "querier.max-subquery-steps", 0, "Max number of steps allowed for every subquery expression in query. Number of steps is calculated using subquery range / step. A value > 0 enables it.") f.BoolVar(&cfg.IgnoreMaxQueryLength, "querier.ignore-max-query-length", false, "If enabled, ignore max query length check at Querier select method. Users can choose to ignore it since the validation can be done before Querier evaluation like at Query Frontend or Ruler.") f.BoolVar(&cfg.EnablePromQLExperimentalFunctions, "querier.enable-promql-experimental-functions", false, "[Experimental] If true, experimental promQL functions are enabled.") @@ -162,23 +152,11 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { // Validate the config func (cfg *Config) Validate() error { - // Ensure the config won't create a situation where no queriers are returned. - if cfg.QueryIngestersWithin != 0 && cfg.QueryStoreAfter != 0 { - if cfg.QueryStoreAfter >= cfg.QueryIngestersWithin { - return errBadLookbackConfigs - } - } if cfg.ResponseCompression != "" && cfg.ResponseCompression != "gzip" && cfg.ResponseCompression != "snappy" && cfg.ResponseCompression != "zstd" { return errUnsupportedResponseCompression } - if cfg.ShuffleShardingIngestersLookbackPeriod > 0 { - if cfg.ShuffleShardingIngestersLookbackPeriod < cfg.QueryStoreAfter { - return errShuffleShardingLookbackLessThanQueryStoreAfter - } - } - if cfg.StoreGatewayConsistencyCheckMaxAttempts < 1 { return errInvalidConsistencyCheckAttempts } @@ -220,13 +198,13 @@ func getChunksIteratorFunction(_ Config) chunkIteratorFunc { func New(cfg Config, limits *validation.Overrides, distributor Distributor, stores []QueryableWithFilter, reg prometheus.Registerer, logger log.Logger, isPartialDataEnabled partialdata.IsCfgEnabledFunc) (storage.SampleAndChunkQueryable, storage.ExemplarQueryable, engine.QueryEngine) { iteratorFunc := getChunksIteratorFunction(cfg) - distributorQueryable := newDistributorQueryable(distributor, cfg.IngesterMetadataStreaming, cfg.IngesterLabelNamesWithMatchers, iteratorFunc, cfg.QueryIngestersWithin, isPartialDataEnabled, cfg.IngesterQueryMaxAttempts) + distributorQueryable := newDistributorQueryable(distributor, cfg.IngesterMetadataStreaming, cfg.IngesterLabelNamesWithMatchers, iteratorFunc, isPartialDataEnabled, cfg.IngesterQueryMaxAttempts, limits) ns := make([]QueryableWithFilter, len(stores)) for ix, s := range stores { ns[ix] = storeQueryable{ QueryableWithFilter: s, - QueryStoreAfter: cfg.QueryStoreAfter, + limits: limits, } } queryable := NewQueryable(distributorQueryable, ns, cfg, limits) @@ -296,7 +274,7 @@ type QueryableWithFilter interface { // UseQueryable returns true if this queryable should be used to satisfy the query for given time range. // Query min and max time are in milliseconds since epoch. - UseQueryable(now time.Time, queryMinT, queryMaxT int64) bool + UseQueryable(now time.Time, userID string, queryMinT, queryMaxT int64) bool } type limiterHolder struct { @@ -362,12 +340,12 @@ func (q querier) setupFromCtx(ctx context.Context) (context.Context, *querier_st metadataQuerier := dqr queriers := make([]storage.Querier, 0) - if q.distributor.UseQueryable(q.now, mint, maxt) { + if q.distributor.UseQueryable(q.now, userID, mint, maxt) { queriers = append(queriers, dqr) } for _, s := range q.stores { - if !s.UseQueryable(q.now, mint, maxt) { + if !s.UseQueryable(q.now, userID, mint, maxt) { continue } @@ -451,7 +429,7 @@ func (q querier) Select(ctx context.Context, sortSeries bool, sp *storage.Select // Reset projection hints if querying ingesters or projection is not included. // Projection can only be applied when not querying mixed sources (ingester + store). if q.honorProjectionHints { - if !sp.ProjectionInclude || q.distributor.UseQueryable(q.now, mint, maxt) { + if !sp.ProjectionInclude || q.distributor.UseQueryable(q.now, userID, mint, maxt) { sp.ProjectionLabels = nil sp.ProjectionInclude = false } @@ -625,22 +603,27 @@ func (querier) Close() error { type storeQueryable struct { QueryableWithFilter - QueryStoreAfter time.Duration + limits *validation.Overrides } -func (s storeQueryable) UseQueryable(now time.Time, queryMinT, queryMaxT int64) bool { +func (s storeQueryable) UseQueryable(now time.Time, userID string, queryMinT, queryMaxT int64) bool { + var queryStoreAfter time.Duration + if s.limits != nil { + queryStoreAfter = s.limits.QueryStoreAfter(userID) + } + // Include this store only if mint is within QueryStoreAfter w.r.t current time. - if s.QueryStoreAfter != 0 && queryMinT > util.TimeToMillis(now.Add(-s.QueryStoreAfter)) { + if queryStoreAfter != 0 && queryMinT > util.TimeToMillis(now.Add(-queryStoreAfter)) { return false } - return s.QueryableWithFilter.UseQueryable(now, queryMinT, queryMaxT) + return s.QueryableWithFilter.UseQueryable(now, userID, queryMinT, queryMaxT) } type alwaysTrueFilterQueryable struct { storage.Queryable } -func (alwaysTrueFilterQueryable) UseQueryable(_ time.Time, _, _ int64) bool { +func (alwaysTrueFilterQueryable) UseQueryable(_ time.Time, _ string, _, _ int64) bool { return true } @@ -654,7 +637,7 @@ type useBeforeTimestampQueryable struct { ts int64 // Timestamp in milliseconds } -func (u useBeforeTimestampQueryable) UseQueryable(_ time.Time, queryMinT, _ int64) bool { +func (u useBeforeTimestampQueryable) UseQueryable(_ time.Time, _ string, queryMinT, _ int64) bool { if u.ts == 0 { return true } diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 4a13dae9aaf..e2f57c6cddb 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -296,7 +296,12 @@ func TestShouldSortSeriesIfQueryingMultipleQueryables(t *testing.T) { } distributor.On("QueryStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&unorderedResponse, nil) - distributorQueryable := newDistributorQueryable(distributor, cfg.IngesterMetadataStreaming, cfg.IngesterLabelNamesWithMatchers, batch.NewChunkMergeIterator, cfg.QueryIngestersWithin, nil, 1) + + // Create limits with default QueryIngestersWithin + limits := DefaultLimitsConfig() + testOverrides := validation.NewOverrides(limits, nil) + + distributorQueryable := newDistributorQueryable(distributor, cfg.IngesterMetadataStreaming, cfg.IngesterLabelNamesWithMatchers, batch.NewChunkMergeIterator, nil, 1, testOverrides) tCases := []struct { name string @@ -356,7 +361,7 @@ func TestShouldSortSeriesIfQueryingMultipleQueryables(t *testing.T) { for _, queryable := range append(wQueriables, wDistributorQueriable) { var wQueryable = queryable.(*wrappedSampleAndChunkQueryable) - if wQueryable.UseQueryable(time.Now(), start.Unix()*1000, end.Unix()*1000) { + if wQueryable.UseQueryable(time.Now(), "0", start.Unix()*1000, end.Unix()*1000) { require.Equal(t, tc.sorted, wQueryable.queriers[0].selectCallsArgs[0][0]) } } @@ -441,7 +446,11 @@ func TestLimits(t *testing.T) { response: &streamResponse, } - distributorQueryableStreaming := newDistributorQueryable(distributor, cfg.IngesterMetadataStreaming, cfg.IngesterLabelNamesWithMatchers, batch.NewChunkMergeIterator, cfg.QueryIngestersWithin, nil, 1) + // Create limits with default QueryIngestersWithin + limits := DefaultLimitsConfig() + testOverrides := validation.NewOverrides(limits, nil) + + distributorQueryableStreaming := newDistributorQueryable(distributor, cfg.IngesterMetadataStreaming, cfg.IngesterLabelNamesWithMatchers, batch.NewChunkMergeIterator, nil, 1, testOverrides) tCases := []struct { name string @@ -659,7 +668,6 @@ func TestNoHistoricalQueryToIngester(t *testing.T) { for _, thanosEngine := range []bool{true, false} { for _, encoding := range encodings { for _, c := range testCases { - cfg.QueryIngestersWithin = c.queryIngestersWithin t.Run(fmt.Sprintf("thanosEngine=%t,encoding=%s,queryIngestersWithin=%v, test=%s", thanosEngine, encoding.String(), c.queryIngestersWithin, c.name), func(t *testing.T) { var queryEngine promql.QueryEngine if thanosEngine { @@ -674,7 +682,10 @@ func TestNoHistoricalQueryToIngester(t *testing.T) { chunkStore, _ := makeMockChunkStore(t, 24, encoding) distributor := &errDistributor{} - overrides := validation.NewOverrides(DefaultLimitsConfig(), nil) + // Create limits with QueryIngestersWithin from test case + limits := DefaultLimitsConfig() + limits.QueryIngestersWithin = model.Duration(c.queryIngestersWithin) + overrides := validation.NewOverrides(limits, nil) ctx := user.InjectOrgID(context.Background(), "0") queryable, _, _ := New(cfg, overrides, distributor, []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(chunkStore))}, nil, log.NewNopLogger(), nil) @@ -1558,14 +1569,16 @@ func TestShortTermQueryToLTS(t *testing.T) { cfg.ActiveQueryTrackerDir = "" for _, c := range testCases { - cfg.QueryIngestersWithin = c.queryIngestersWithin - cfg.QueryStoreAfter = c.queryStoreAfter t.Run(c.name, func(t *testing.T) { //parallel testing causes data race chunkStore := &emptyChunkStore{} distributor := &errDistributor{} - overrides := validation.NewOverrides(DefaultLimitsConfig(), nil) + // Create limits with QueryIngestersWithin and QueryStoreAfter from test case + limits := DefaultLimitsConfig() + limits.QueryIngestersWithin = model.Duration(c.queryIngestersWithin) + limits.QueryStoreAfter = model.Duration(c.queryStoreAfter) + overrides := validation.NewOverrides(limits, nil) queryable, _, _ := New(cfg, overrides, distributor, []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(chunkStore))}, nil, log.NewNopLogger(), nil) ctx := user.InjectOrgID(context.Background(), "0") @@ -1597,7 +1610,7 @@ func TestUseAlwaysQueryable(t *testing.T) { m := &mockQueryableWithFilter{} qwf := UseAlwaysQueryable(m) - require.True(t, qwf.UseQueryable(time.Now(), 0, 0)) + require.True(t, qwf.UseQueryable(time.Now(), "test", 0, 0)) require.False(t, m.useQueryableCalled) } @@ -1607,13 +1620,13 @@ func TestUseBeforeTimestamp(t *testing.T) { now := time.Now() qwf := UseBeforeTimestampQueryable(m, now.Add(-1*time.Hour)) - require.False(t, qwf.UseQueryable(now, util.TimeToMillis(now.Add(-5*time.Minute)), util.TimeToMillis(now))) + require.False(t, qwf.UseQueryable(now, "test", util.TimeToMillis(now.Add(-5*time.Minute)), util.TimeToMillis(now))) require.False(t, m.useQueryableCalled) - require.False(t, qwf.UseQueryable(now, util.TimeToMillis(now.Add(-1*time.Hour)), util.TimeToMillis(now))) + require.False(t, qwf.UseQueryable(now, "test", util.TimeToMillis(now.Add(-1*time.Hour)), util.TimeToMillis(now))) require.False(t, m.useQueryableCalled) - require.True(t, qwf.UseQueryable(now, util.TimeToMillis(now.Add(-1*time.Hour).Add(-time.Millisecond)), util.TimeToMillis(now))) + require.True(t, qwf.UseQueryable(now, "test", util.TimeToMillis(now.Add(-1*time.Hour).Add(-time.Millisecond)), util.TimeToMillis(now))) require.False(t, m.useQueryableCalled) // UseBeforeTimestampQueryable wraps Queryable, and not QueryableWithFilter. } @@ -1621,15 +1634,21 @@ func TestStoreQueryable(t *testing.T) { t.Parallel() m := &mockQueryableWithFilter{} now := time.Now() - sq := storeQueryable{m, time.Hour} - require.False(t, sq.UseQueryable(now, util.TimeToMillis(now.Add(-5*time.Minute)), util.TimeToMillis(now))) + // Create limits with QueryStoreAfter set to 1 hour + limits := DefaultLimitsConfig() + limits.QueryStoreAfter = model.Duration(time.Hour) + overrides := validation.NewOverrides(limits, nil) + + sq := storeQueryable{m, overrides} + + require.False(t, sq.UseQueryable(now, "test", util.TimeToMillis(now.Add(-5*time.Minute)), util.TimeToMillis(now))) require.False(t, m.useQueryableCalled) - require.False(t, sq.UseQueryable(now, util.TimeToMillis(now.Add(-1*time.Hour).Add(time.Millisecond)), util.TimeToMillis(now))) + require.False(t, sq.UseQueryable(now, "test", util.TimeToMillis(now.Add(-1*time.Hour).Add(time.Millisecond)), util.TimeToMillis(now))) require.False(t, m.useQueryableCalled) - require.True(t, sq.UseQueryable(now, util.TimeToMillis(now.Add(-1*time.Hour)), util.TimeToMillis(now))) + require.True(t, sq.UseQueryable(now, "test", util.TimeToMillis(now.Add(-1*time.Hour)), util.TimeToMillis(now))) require.True(t, m.useQueryableCalled) // storeQueryable wraps QueryableWithFilter, so it must call its UseQueryable method. } @@ -1642,24 +1661,6 @@ func TestConfig_Validate(t *testing.T) { "should pass with default config": { setup: func(cfg *Config) {}, }, - "should pass if 'query store after' is enabled and shuffle-sharding is disabled": { - setup: func(cfg *Config) { - cfg.QueryStoreAfter = time.Hour - }, - }, - "should pass if 'query store after' is enabled and shuffle-sharding is enabled with greater value": { - setup: func(cfg *Config) { - cfg.QueryStoreAfter = time.Hour - cfg.ShuffleShardingIngestersLookbackPeriod = 2 * time.Hour - }, - }, - "should fail if 'query store after' is enabled and shuffle-sharding is enabled with lesser value": { - setup: func(cfg *Config) { - cfg.QueryStoreAfter = time.Hour - cfg.ShuffleShardingIngestersLookbackPeriod = time.Minute - }, - expected: errShuffleShardingLookbackLessThanQueryStoreAfter, - }, "should fail if invalid parquet queryable default block store": { setup: func(cfg *Config) { cfg.EnableParquetQueryable = true @@ -1734,7 +1735,7 @@ func (m *mockQueryableWithFilter) Querier(_, _ int64) (storage.Querier, error) { return nil, nil } -func (m *mockQueryableWithFilter) UseQueryable(_ time.Time, _, _ int64) bool { +func (m *mockQueryableWithFilter) UseQueryable(_ time.Time, _ string, _, _ int64) bool { m.useQueryableCalled = true return true } @@ -1815,15 +1816,19 @@ func TestQuerier_ProjectionHints(t *testing.T) { distributor := &MockDistributor{} distributor.On("QueryStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&client.QueryStreamResponse{}, nil) + // Create limits with default QueryIngestersWithin + limits := DefaultLimitsConfig() + testOverrides := validation.NewOverrides(limits, nil) + // Create distributor queryable that can be controlled to be used or not var distributorQueryable QueryableWithFilter if testData.queryIngesters { // Ingesters will be queried - distributorQueryable = newDistributorQueryable(distributor, cfg.IngesterMetadataStreaming, cfg.IngesterLabelNamesWithMatchers, batch.NewChunkMergeIterator, cfg.QueryIngestersWithin, nil, 1) + distributorQueryable = newDistributorQueryable(distributor, cfg.IngesterMetadataStreaming, cfg.IngesterLabelNamesWithMatchers, batch.NewChunkMergeIterator, nil, 1, testOverrides) } else { // Ingesters will not be queried (time range is too old) distributorQueryable = UseBeforeTimestampQueryable( - newDistributorQueryable(distributor, cfg.IngesterMetadataStreaming, cfg.IngesterLabelNamesWithMatchers, batch.NewChunkMergeIterator, cfg.QueryIngestersWithin, nil, 1), + newDistributorQueryable(distributor, cfg.IngesterMetadataStreaming, cfg.IngesterLabelNamesWithMatchers, batch.NewChunkMergeIterator, nil, 1, testOverrides), start.Add(-1*time.Hour), ) } @@ -1851,7 +1856,7 @@ func TestQuerier_ProjectionHints(t *testing.T) { var receivedHints *storage.SelectHints for _, queryable := range append([]QueryableWithFilter{storeQueryable}, wDistributorQueryable) { wQueryable := queryable.(*wrappedSampleAndChunkQueryable) - if wQueryable.UseQueryable(time.Now(), util.TimeToMillis(start), util.TimeToMillis(end)) { + if wQueryable.UseQueryable(time.Now(), "0", util.TimeToMillis(start), util.TimeToMillis(end)) { require.Len(t, wQueryable.queriers, 1) require.Len(t, wQueryable.queriers[0].selectCallsArgs, 1) receivedHints = wQueryable.queriers[0].selectCallsArgs[0][1].(*storage.SelectHints) diff --git a/pkg/ring/kv/memberlist/memberlist_client.go b/pkg/ring/kv/memberlist/memberlist_client.go index b635a54cee9..3a61817dcc9 100644 --- a/pkg/ring/kv/memberlist/memberlist_client.go +++ b/pkg/ring/kv/memberlist/memberlist_client.go @@ -152,6 +152,9 @@ type KVConfig struct { AdvertiseAddr string `yaml:"advertise_addr"` AdvertisePort int `yaml:"advertise_port"` + ClusterLabel string `yaml:"cluster_label"` + ClusterLabelVerificationDisabled bool `yaml:"cluster_label_verification_disabled"` + // List of members to join JoinMembers flagext.StringSlice `yaml:"join_members"` MinJoinBackoff time.Duration `yaml:"min_join_backoff"` @@ -209,6 +212,8 @@ func (cfg *KVConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { f.BoolVar(&cfg.EnableCompression, prefix+"memberlist.compression-enabled", mlDefaults.EnableCompression, "Enable message compression. This can be used to reduce bandwidth usage at the cost of slightly more CPU utilization.") f.StringVar(&cfg.AdvertiseAddr, prefix+"memberlist.advertise-addr", mlDefaults.AdvertiseAddr, "Gossip address to advertise to other members in the cluster. Used for NAT traversal.") f.IntVar(&cfg.AdvertisePort, prefix+"memberlist.advertise-port", mlDefaults.AdvertisePort, "Gossip port to advertise to other members in the cluster. Used for NAT traversal.") + f.StringVar(&cfg.ClusterLabel, prefix+"memberlist.cluster-label", mlDefaults.Label, "The cluster label is an optional string to include in outbound packets and gossip streams. Other members in the memberlist cluster will discard any message whose label doesn't match the configured one, unless the 'cluster-label-verification-disabled' configuration option is set to true.") + f.BoolVar(&cfg.ClusterLabelVerificationDisabled, prefix+"memberlist.cluster-label-verification-disabled", mlDefaults.SkipInboundLabelCheck, "When true, memberlist doesn't verify that inbound packets and gossip streams have the cluster label matching the configured one. This verification should be disabled while rolling out the change to the configured cluster label in a live memberlist cluster.") cfg.TCPTransport.RegisterFlagsWithPrefix(f, prefix) } @@ -406,6 +411,12 @@ func (m *KV) buildMemberlistConfig() (*memberlist.Config, error) { mlCfg.AdvertiseAddr = m.cfg.AdvertiseAddr mlCfg.AdvertisePort = m.cfg.AdvertisePort + mlCfg.Label = m.cfg.ClusterLabel + mlCfg.SkipInboundLabelCheck = m.cfg.ClusterLabelVerificationDisabled + + if mlCfg.Label == "" && mlCfg.SkipInboundLabelCheck { + level.Warn(m.logger).Log("msg", "cluster label verification is disabled but no cluster label is configured; this weakens isolation without benefit") + } if m.cfg.NodeName != "" { mlCfg.Name = m.cfg.NodeName @@ -415,6 +426,10 @@ func (m *KV) buildMemberlistConfig() (*memberlist.Config, error) { level.Info(m.logger).Log("msg", "Using memberlist cluster node name", "name", mlCfg.Name) } + if mlCfg.Label != "" { + level.Info(m.logger).Log("msg", "Using memberlist cluster label", "cluster_label", mlCfg.Label, "skip_inbound_label_check", mlCfg.SkipInboundLabelCheck) + } + mlCfg.LogOutput = newMemberlistLoggerAdapter(m.logger, false) mlCfg.Transport = tr diff --git a/pkg/ring/kv/memberlist/memberlist_client_test.go b/pkg/ring/kv/memberlist/memberlist_client_test.go index 002ccb340f5..601e7438675 100644 --- a/pkg/ring/kv/memberlist/memberlist_client_test.go +++ b/pkg/ring/kv/memberlist/memberlist_client_test.go @@ -530,33 +530,156 @@ func TestMultipleCAS(t *testing.T) { } func TestMultipleClients(t *testing.T) { - c := dataCodec{} + t.Parallel() + + err := testMultipleClientsWithConfigGenerator(t, 10, defaultMultipleClientsKVConfig) + require.NoError(t, err) +} + +func TestMultipleClientsWithMixedClusterLabelsAndExpectFailure(t *testing.T) { + t.Parallel() + + memberLabels := []string{"", "label1", "label2", "label3", "label4"} + + configGen := func(memberID int) KVConfig { + cfg := defaultMultipleClientsKVConfig(memberID) + cfg.ClusterLabel = memberLabels[memberID] + return cfg + } + + err := testMultipleClientsWithConfigGenerator(t, len(memberLabels), configGen) + require.Error(t, err) + require.Contains(t, err.Error(), "expected to see at least 2 members, got 1") +} + +func TestMultipleClientsWithMixedClusterLabelsAndVerificationDisabled(t *testing.T) { + t.Parallel() + + memberLabels := []string{"", "label1", "label2"} + + configGen := func(memberID int) KVConfig { + cfg := defaultMultipleClientsKVConfig(memberID) + cfg.ClusterLabel = memberLabels[memberID] + cfg.ClusterLabelVerificationDisabled = true + return cfg + } + + err := testMultipleClientsWithConfigGenerator(t, len(memberLabels), configGen) + require.NoError(t, err) +} + +func TestMultipleClientsWithSameClusterLabel(t *testing.T) { + t.Parallel() const members = 10 - const key = "ring" + const clusterLabel = "test-cluster" - var clients []*Client + configGen := func(memberID int) KVConfig { + cfg := defaultMultipleClientsKVConfig(memberID) + cfg.ClusterLabel = clusterLabel + return cfg + } - stop := make(chan struct{}) - start := make(chan struct{}) + err := testMultipleClientsWithConfigGenerator(t, members, configGen) + require.NoError(t, err) +} + +func TestBuildMemberlistConfigClusterLabelOptions(t *testing.T) { + t.Parallel() + tests := []struct { + name string + clusterLabel string + clusterLabelVerificationDisabled bool + }{ + { + name: "empty label keeps verification enabled by default", + }, + { + name: "configured label can disable verification", + clusterLabel: "cluster-a", + clusterLabelVerificationDisabled: true, + }, + { + name: "configured label with verification enabled", + clusterLabel: "cluster-a", + clusterLabelVerificationDisabled: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var cfg KVConfig + flagext.DefaultValues(&cfg) + cfg.TCPTransport = TCPTransportConfig{ + BindAddrs: []string{"localhost"}, + BindPort: 0, + } + cfg.ClusterLabel = tc.clusterLabel + cfg.ClusterLabelVerificationDisabled = tc.clusterLabelVerificationDisabled + + kv := NewKV(cfg, log.NewNopLogger(), &dnsProviderMock{}, prometheus.NewPedanticRegistry()) + + mlCfg, err := kv.buildMemberlistConfig() + require.NoError(t, err) + require.Equal(t, tc.clusterLabel, mlCfg.Label) + require.Equal(t, tc.clusterLabelVerificationDisabled, mlCfg.SkipInboundLabelCheck) + + transport, ok := mlCfg.Transport.(*TCPTransport) + require.True(t, ok) + require.NoError(t, transport.Shutdown()) + }) + } +} + +func defaultMultipleClientsKVConfig(memberID int) KVConfig { + var cfg KVConfig + flagext.DefaultValues(&cfg) + + cfg.NodeName = fmt.Sprintf("Member-%d", memberID) + cfg.GossipInterval = 100 * time.Millisecond + cfg.GossipNodes = 3 + cfg.PushPullInterval = 5 * time.Second + cfg.TCPTransport = TCPTransportConfig{ + BindAddrs: []string{"localhost"}, + BindPort: 0, + } + + return cfg +} + +func testMultipleClientsWithConfigGenerator(t *testing.T, members int, configGen func(memberID int) KVConfig) error { + t.Helper() + + c := dataCodec{} + const key = "ring" + + clients := make([]*Client, 0, members) port := 0 + casInterval := time.Second - for i := range members { - id := fmt.Sprintf("Member-%d", i) - var cfg KVConfig - flagext.DefaultValues(&cfg) - cfg.NodeName = id + start := make(chan struct{}) + stop := make(chan struct{}) - cfg.GossipInterval = 100 * time.Millisecond - cfg.GossipNodes = 3 - cfg.PushPullInterval = 5 * time.Second + var clientWg sync.WaitGroup - cfg.TCPTransport = TCPTransportConfig{ - BindAddrs: []string{"localhost"}, - BindPort: 0, // randomize ports + clientErrCh := make(chan error, members) + getClientErr := func() error { + select { + case err := <-clientErrCh: + return err + default: + return nil } + } + defer func() { + close(stop) + clientWg.Wait() + }() + + for i := range members { + cfg := configGen(i) cfg.Codecs = []codec.Codec{c} mkv := NewKV(cfg, log.NewNopLogger(), &dnsProviderMock{}, prometheus.NewPedanticRegistry()) @@ -564,29 +687,36 @@ func TestMultipleClients(t *testing.T) { kv, err := NewClient(mkv, c) require.NoError(t, err) - clients = append(clients, kv) - go runClient(t, kv, id, key, port, start, stop) + clientWg.Add(1) + go func(kv *Client, nodeName string, portToConnect int) { + defer clientWg.Done() + + if err := runClientWithErr(kv, nodeName, key, portToConnect, casInterval, start, stop); err != nil { + clientErrCh <- err + } + }(kv, cfg.NodeName, port) // next KV will connect to this one port = kv.kv.GetListeningPort() } - println("Waiting before start") + t.Log("Waiting before start") time.Sleep(2 * time.Second) close(start) - println("Observing ring ...") + t.Log("Observing ring ...") startTime := time.Now() - firstKv := clients[0] + firstKV := clients[0] ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - updates := 0 - firstKv.WatchKey(ctx, key, func(in any) bool { - updates++ + defer cancel() + joinedMembers := 0 + firstKV.WatchKey(ctx, key, func(in any) bool { r := in.(*data) + joinedMembers = len(r.Members) minTimestamp, maxTimestamp, avgTimestamp := getTimestamps(r.Members) @@ -595,64 +725,81 @@ func TestMultipleClients(t *testing.T) { "tokens, oldest timestamp:", now.Sub(time.Unix(minTimestamp, 0)).String(), "avg timestamp:", now.Sub(time.Unix(avgTimestamp, 0)).String(), "youngest timestamp:", now.Sub(time.Unix(maxTimestamp, 0)).String()) - return true // yes, keep watching + return true }) - cancel() // make linter happy - t.Logf("Ring updates observed: %d", updates) + if joinedMembers <= 1 { + return fmt.Errorf("expected to see at least 2 members, got %d", joinedMembers) + } - if updates < members { - // in general, at least one update from each node. (although that's not necessarily true... - // but typically we get more updates than that anyway) - t.Errorf("expected to see updates, got %d", updates) + if err := getClientErr(); err != nil { + return err } - // Let's check all the clients to see if they have relatively up-to-date information - // All of them should at least have all the clients - // And same tokens. - allTokens := []uint32(nil) + check := func() error { + allTokens := []uint32(nil) - for i := range members { - kv := clients[i] + for i, kv := range clients { + r := getData(t, kv, key) + t.Logf("KV %d: number of known members: %d", i, len(r.Members)) + if len(r.Members) != members { + return fmt.Errorf("member %d has only %d members in the ring", i, len(r.Members)) + } - r := getData(t, kv, key) - t.Logf("KV %d: number of known members: %d\n", i, len(r.Members)) - if len(r.Members) != members { - t.Errorf("Member %d has only %d members in the ring", i, len(r.Members)) - } + minTimestamp, maxTimestamp, avgTimestamp := getTimestamps(r.Members) + for n, ing := range r.Members { + if ing.State != ACTIVE { + stateStr := "UNKNOWN" + switch ing.State { + case JOINING: + stateStr = "JOINING" + case LEFT: + stateStr = "LEFT" + } + return fmt.Errorf("member %d: invalid state of member %s in the ring: %s (%v)", i, n, stateStr, ing.State) + } + } - minTimestamp, maxTimestamp, avgTimestamp := getTimestamps(r.Members) - for n, ing := range r.Members { - if ing.State != ACTIVE { - t.Errorf("Member %d: invalid state of member %s in the ring: %v ", i, n, ing.State) + now := time.Now() + t.Logf("Member %d: oldest: %v, avg: %v, youngest: %v", i, + now.Sub(time.Unix(minTimestamp, 0)).String(), + now.Sub(time.Unix(avgTimestamp, 0)).String(), + now.Sub(time.Unix(maxTimestamp, 0)).String()) + + tokens := r.getAllTokens() + if allTokens == nil { + allTokens = tokens + t.Logf("Found tokens: %d", len(allTokens)) + continue } - } - now := time.Now() - t.Logf("Member %d: oldest: %v, avg: %v, youngest: %v", i, - now.Sub(time.Unix(minTimestamp, 0)).String(), - now.Sub(time.Unix(avgTimestamp, 0)).String(), - now.Sub(time.Unix(maxTimestamp, 0)).String()) - - tokens := r.getAllTokens() - if allTokens == nil { - allTokens = tokens - t.Logf("Found tokens: %d", len(allTokens)) - } else { + if len(allTokens) != len(tokens) { - t.Errorf("Member %d: Expected %d tokens, got %d", i, len(allTokens), len(tokens)) - } else { - for ix, tok := range allTokens { - if tok != tokens[ix] { - t.Errorf("Member %d: Tokens at position %d differ: %v, %v", i, ix, tok, tokens[ix]) - break - } + return fmt.Errorf("member %d: expected %d tokens, got %d", i, len(allTokens), len(tokens)) + } + + for ix, tok := range allTokens { + if tok != tokens[ix] { + return fmt.Errorf("member %d: tokens at position %d differ: %v, %v", i, ix, tok, tokens[ix]) } } } + + return getClientErr() } - // We cannot shutdown the KV until now in order for Get() to work reliably. - close(stop) + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + timeout := time.After(10 * time.Second) + for { + select { + case <-timeout: + return check() + case <-ticker.C: + if err := check(); err == nil { + return nil + } + } + } } func TestJoinMembersWithRetryBackoff(t *testing.T) { @@ -871,6 +1018,14 @@ func getTimestamps(members map[string]member) (min int64, max int64, avg int64) } func runClient(t *testing.T, kv *Client, name string, ringKey string, portToConnect int, start <-chan struct{}, stop <-chan struct{}) { + t.Helper() + + if err := runClientWithErr(kv, name, ringKey, portToConnect, time.Second, start, stop); err != nil { + t.Errorf("%v", err) + } +} + +func runClientWithErr(kv *Client, name string, ringKey string, portToConnect int, casInterval time.Duration, start <-chan struct{}, stop <-chan struct{}) error { // stop gossipping about the ring(s) defer services.StopAndAwaitTerminated(context.Background(), kv.kv) //nolint:errcheck @@ -883,14 +1038,28 @@ func runClient(t *testing.T, kv *Client, name string, ringKey string, portToConn if portToConnect > 0 { _, err := kv.kv.JoinMembers([]string{fmt.Sprintf("127.0.0.1:%d", portToConnect)}) if err != nil { - t.Errorf("%s failed to join the cluster: %v", name, err) - return + return fmt.Errorf("%s failed to join the cluster: %w", name, err) } } case <-stop: - return - case <-time.After(1 * time.Second): - cas(t, kv, ringKey, updateFn(name)) + return nil + case <-time.After(casInterval): + err := kv.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { + var d *data + if in != nil { + d = in.(*data) + } + + updated, retry, err := updateFn(name)(d) + if updated == nil { + return nil, retry, err + } + + return updated, retry, err + }) + if err != nil { + return fmt.Errorf("failed to CAS the ring: %w", err) + } } } } diff --git a/pkg/ring/kv/metrics.go b/pkg/ring/kv/metrics.go index 30ed8ff4aa3..3d2fa8928c8 100644 --- a/pkg/ring/kv/metrics.go +++ b/pkg/ring/kv/metrics.go @@ -2,6 +2,7 @@ package kv import ( "context" + "errors" "strconv" "time" @@ -33,7 +34,8 @@ func getCasErrorCode(err error) string { // If the error has been returned to abort the CAS operation, then we shouldn't // consider it an error when tracking metrics. - if casErr, ok := err.(interface{ IsOperationAborted() bool }); ok && casErr.IsOperationAborted() { + var casAborted interface{ IsOperationAborted() bool } + if errors.As(err, &casAborted) && casAborted.IsOperationAborted() { return "200" } diff --git a/pkg/ring/kv/metrics_test.go b/pkg/ring/kv/metrics_test.go new file mode 100644 index 00000000000..e7887aa3ec5 --- /dev/null +++ b/pkg/ring/kv/metrics_test.go @@ -0,0 +1,52 @@ +package kv + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +// operationAbortedError is a local stub that mimics ha.ReplicasNotMatchError, +// implementing IsOperationAborted() to avoid an import cycle. +type operationAbortedError struct{} + +func (e operationAbortedError) Error() string { return "operation aborted" } +func (e operationAbortedError) IsOperationAborted() bool { return true } + +func TestGetCasErrorCode(t *testing.T) { + abortedErr := operationAbortedError{} + + tests := map[string]struct { + err error + expected string + }{ + "nil error": { + err: nil, + expected: "200", + }, + "operation aborted error (direct)": { + err: abortedErr, + expected: "200", + }, + "operation aborted error (single-wrapped by memberlist)": { + err: fmt.Errorf("fn returned error: %w", abortedErr), + expected: "200", + }, + "operation aborted error (double-wrapped by memberlist)": { + err: fmt.Errorf("failed to CAS-update key X: %w", + fmt.Errorf("fn returned error: %w", abortedErr)), + expected: "200", + }, + "generic error": { + err: fmt.Errorf("some real error"), + expected: "500", + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, tc.expected, getCasErrorCode(tc.err)) + }) + } +} diff --git a/pkg/util/push/push.go b/pkg/util/push/push.go index bbd5a3b7937..675785c6230 100644 --- a/pkg/util/push/push.go +++ b/pkg/util/push/push.go @@ -90,6 +90,7 @@ func Handler(remoteWrite2Enabled bool, acceptUnknownRemoteWriteContentType bool, handlePRW2 := func() { userID, err := users.TenantID(ctx) if err != nil { + http.Error(w, err.Error(), http.StatusUnauthorized) return } @@ -112,7 +113,7 @@ func Handler(remoteWrite2Enabled bool, acceptUnknownRemoteWriteContentType bool, req.Source = cortexpb.API } - v1Req, err := convertV2RequestToV1(&req, overrides.EnableTypeAndUnitLabels(userID)) + v1Req, err := convertV2RequestToV1(&req, overrides.EnableTypeAndUnitLabels(userID), overrides.EnableStartTimestamp(userID)) if err != nil { level.Error(logger).Log("err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) @@ -208,11 +209,21 @@ func setPRW2RespHeader(w http.ResponseWriter, samples, histograms, exemplars int w.Header().Set(rw20WrittenExemplarsHeader, strconv.FormatInt(exemplars, 10)) } -func convertV2RequestToV1(req *cortexpb.PreallocWriteRequestV2, enableTypeAndUnitLabels bool) (cortexpb.PreallocWriteRequest, error) { - var v1Req cortexpb.PreallocWriteRequest +func convertV2RequestToV1(req *cortexpb.PreallocWriteRequestV2, enableTypeAndUnitLabels bool, enableStartTimestamp bool) (v1Req cortexpb.PreallocWriteRequest, err error) { v1Timeseries := make([]cortexpb.PreallocTimeseries, 0, len(req.Timeseries)) var v1Metadata []*cortexpb.MetricMetadata + // Release any pulled TimeSeries back to the pool to prevent memory leaks in case of an error. + defer func() { + if err != nil { + for _, pts := range v1Timeseries { + if pts.TimeSeries != nil { + cortexpb.ReuseTimeseries(pts.TimeSeries) + } + } + } + }() + b := labels.NewScratchBuilder(0) symbols := req.Symbols for _, v2Ts := range req.Timeseries { @@ -246,27 +257,54 @@ func convertV2RequestToV1(req *cortexpb.PreallocWriteRequestV2, enableTypeAndUni lbs = slb.Labels() } - exemplars, err := convertV2ToV1Exemplars(&b, symbols, v2Ts.Exemplars) + ts := cortexpb.TimeseriesFromPool() + ts.Labels = cortexpb.FromLabelsToLabelAdapters(lbs) + ts.Samples = ts.Samples[:0] + for _, sample := range v2Ts.Samples { + if enableStartTimestamp { + // Use created_timestamp as a fallback for start_timestamp_ms when not set. + if sample.StartTimestampMs == 0 { + sample.StartTimestampMs = v2Ts.CreatedTimestamp + } + } else { + sample.StartTimestampMs = 0 + } + ts.Samples = append(ts.Samples, sample) + } + + ts.Exemplars, err = convertV2ToV1Exemplars(&b, symbols, v2Ts.Exemplars, ts.Exemplars[:0]) if err != nil { + // Current ts is not appended to the v1Timeseries, so we should call reuse here. + cortexpb.ReuseTimeseries(ts) return v1Req, err } - ts := cortexpb.TimeseriesFromPool() - ts.Labels = cortexpb.FromLabelsToLabelAdapters(lbs) - ts.Samples = append(ts.Samples, v2Ts.Samples...) - ts.Exemplars = exemplars - ts.Histograms = append(ts.Histograms, v2Ts.Histograms...) + ts.Histograms = ts.Histograms[:0] + for _, histogram := range v2Ts.Histograms { + if enableStartTimestamp { + // Use created_timestamp as a fallback for start_timestamp_ms when not set. + if histogram.StartTimestampMs == 0 { + histogram.StartTimestampMs = v2Ts.CreatedTimestamp + } + } else { + histogram.StartTimestampMs = 0 + } + ts.Histograms = append(ts.Histograms, histogram) + } v1Timeseries = append(v1Timeseries, cortexpb.PreallocTimeseries{ TimeSeries: ts, }) if shouldConvertV2Metadata(v2Ts.Metadata) { - metricName, err := extract.MetricNameFromLabels(lbs) + var metricName string + metricName, err = extract.MetricNameFromLabels(lbs) if err != nil { return v1Req, err } - metadata, err := convertV2ToV1Metadata(metricName, symbols, v2Ts.Metadata) + + var metadata *cortexpb.MetricMetadata + metadata, err = convertV2ToV1Metadata(metricName, symbols, v2Ts.Metadata) if err != nil { return v1Req, err } @@ -319,12 +357,11 @@ func convertV2ToV1Metadata(name string, symbols []string, metadata cortexpb.Meta }, nil } -func convertV2ToV1Exemplars(b *labels.ScratchBuilder, symbols []string, v2Exemplars []cortexpb.ExemplarV2) ([]cortexpb.Exemplar, error) { - v1Exemplars := make([]cortexpb.Exemplar, 0, len(v2Exemplars)) +func convertV2ToV1Exemplars(b *labels.ScratchBuilder, symbols []string, v2Exemplars []cortexpb.ExemplarV2, v1Exemplars []cortexpb.Exemplar) ([]cortexpb.Exemplar, error) { for _, e := range v2Exemplars { lbs, err := e.ToLabels(b, symbols) if err != nil { - return nil, err + return v1Exemplars, err } v1Exemplars = append(v1Exemplars, cortexpb.Exemplar{ Labels: cortexpb.FromLabelsToLabelAdapters(lbs), diff --git a/pkg/util/push/push_test.go b/pkg/util/push/push_test.go index c842fd4d48f..3021449c228 100644 --- a/pkg/util/push/push_test.go +++ b/pkg/util/push/push_test.go @@ -75,7 +75,7 @@ func makeV2ReqWithSeries(num int) *cortexpb.PreallocWriteRequestV2 { func createPRW1HTTPRequest(seriesNum int) (*http.Request, error) { series := makeV2ReqWithSeries(seriesNum) - v1Req, err := convertV2RequestToV1(series, false) + v1Req, err := convertV2RequestToV1(series, false, false) if err != nil { return nil, err } @@ -147,6 +147,9 @@ func Benchmark_Handler(b *testing.B) { req, err := createPRW2HTTPRequest(seriesNum) require.NoError(b, err) + ctx := user.InjectOrgID(req.Context(), "user") + req = req.WithContext(ctx) + b.ReportAllocs() for b.Loop() { @@ -168,7 +171,7 @@ func Benchmark_convertV2RequestToV1(b *testing.B) { b.ReportAllocs() for b.Loop() { - _, err := convertV2RequestToV1(series, false) + _, err := convertV2RequestToV1(series, false, false) require.NoError(b, err) } }) @@ -377,7 +380,7 @@ func Test_convertV2RequestToV1_WithEnableTypeAndUnitLabels(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - v1Req, err := convertV2RequestToV1(test.v2Req, test.enableTypeAndUnitLabels) + v1Req, err := convertV2RequestToV1(test.v2Req, test.enableTypeAndUnitLabels, false) for i := range v1Req.Timeseries { if len(v1Req.Timeseries[i].Samples) == 0 { @@ -441,7 +444,7 @@ func Test_convertV2RequestToV1(t *testing.T) { v2Req.Symbols = symbols v2Req.Timeseries = timeseries - v1Req, err := convertV2RequestToV1(&v2Req, false) + v1Req, err := convertV2RequestToV1(&v2Req, false, false) assert.NoError(t, err) expectedSamples := 3 expectedExemplars := 2 @@ -541,7 +544,7 @@ func Test_convertV2RequestToV1_InvalidSymbolRefs(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - _, err := convertV2RequestToV1(tt.v2Req, false) + _, err := convertV2RequestToV1(tt.v2Req, false, false) if tt.expectedError == "" { assert.NoError(t, err) } else { @@ -1208,7 +1211,7 @@ func Test_convertV2RequestToV1_DeepCopy(t *testing.T) { }, } - v1Req, err := convertV2RequestToV1(v2Req, false) + v1Req, err := convertV2RequestToV1(v2Req, false, false) require.NoError(t, err) require.Len(t, v1Req.Timeseries, 1) @@ -1224,3 +1227,141 @@ func Test_convertV2RequestToV1_DeepCopy(t *testing.T) { require.True(t, len(v1Ts.Histograms) > 0 && len(v2Ts.Histograms) > 0) require.NotSame(t, &v1Ts.Histograms[0], &v2Ts.Histograms[0], "Histograms array must not share the same memory address") } + +func Test_convertV2RequestToV1_PreservesStartTimestamp(t *testing.T) { + v2Req := &cortexpb.PreallocWriteRequestV2{ + WriteRequestV2: cortexpb.WriteRequestV2{ + Symbols: []string{"", "__name__", "test_metric"}, + Timeseries: []cortexpb.PreallocTimeseriesV2{ + { + TimeSeriesV2: &cortexpb.TimeSeriesV2{ + LabelsRefs: []uint32{1, 2}, + Samples: []cortexpb.Sample{ + {Value: 1, TimestampMs: 1000, StartTimestampMs: 100}, + }, + Histograms: []cortexpb.Histogram{ + {TimestampMs: 2000, StartTimestampMs: 200}, + }, + }, + }, + }, + }, + } + + t.Run("enableStartTimestamp=true preserves ST", func(t *testing.T) { + v1Req, err := convertV2RequestToV1(v2Req, false, true) + require.NoError(t, err) + require.Len(t, v1Req.Timeseries[0].Samples, 1) + require.Len(t, v1Req.Timeseries[0].Histograms, 1) + assert.Equal(t, int64(100), v1Req.Timeseries[0].Samples[0].StartTimestampMs) + assert.Equal(t, int64(200), v1Req.Timeseries[0].Histograms[0].StartTimestampMs) + }) + + t.Run("enableStartTimestamp=false clears ST", func(t *testing.T) { + v1Req, err := convertV2RequestToV1(v2Req, false, false) + require.NoError(t, err) + require.Len(t, v1Req.Timeseries[0].Samples, 1) + require.Len(t, v1Req.Timeseries[0].Histograms, 1) + assert.Equal(t, int64(0), v1Req.Timeseries[0].Samples[0].StartTimestampMs) + assert.Equal(t, int64(0), v1Req.Timeseries[0].Histograms[0].StartTimestampMs) + }) +} + +func Test_convertV2RequestToV1_UsesCreatedTimestampAsFallback(t *testing.T) { + v2Req := &cortexpb.PreallocWriteRequestV2{ + WriteRequestV2: cortexpb.WriteRequestV2{ + Symbols: []string{"", "__name__", "test_metric"}, + Timeseries: []cortexpb.PreallocTimeseriesV2{ + { + TimeSeriesV2: &cortexpb.TimeSeriesV2{ + LabelsRefs: []uint32{1, 2}, + CreatedTimestamp: 777, + Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 1000}}, + Histograms: []cortexpb.Histogram{{TimestampMs: 2000}}, + }, + }, + }, + }, + } + + t.Run("enableStartTimestamp=true uses CT as fallback for ST", func(t *testing.T) { + v1Req, err := convertV2RequestToV1(v2Req, false, true) + require.NoError(t, err) + require.Len(t, v1Req.Timeseries[0].Samples, 1) + require.Len(t, v1Req.Timeseries[0].Histograms, 1) + assert.Equal(t, int64(777), v1Req.Timeseries[0].Samples[0].StartTimestampMs) + assert.Equal(t, int64(777), v1Req.Timeseries[0].Histograms[0].StartTimestampMs) + }) + + t.Run("enableStartTimestamp=false ignores CT", func(t *testing.T) { + v1Req, err := convertV2RequestToV1(v2Req, false, false) + require.NoError(t, err) + require.Len(t, v1Req.Timeseries[0].Samples, 1) + require.Len(t, v1Req.Timeseries[0].Histograms, 1) + assert.Equal(t, int64(0), v1Req.Timeseries[0].Samples[0].StartTimestampMs) + assert.Equal(t, int64(0), v1Req.Timeseries[0].Histograms[0].StartTimestampMs) + }) +} + +func Test_convertV2RequestToV1_ExplicitStartTimestampTakesPrecedence(t *testing.T) { + v2Req := &cortexpb.PreallocWriteRequestV2{ + WriteRequestV2: cortexpb.WriteRequestV2{ + Symbols: []string{"", "__name__", "test_metric"}, + Timeseries: []cortexpb.PreallocTimeseriesV2{ + { + TimeSeriesV2: &cortexpb.TimeSeriesV2{ + LabelsRefs: []uint32{1, 2}, + CreatedTimestamp: 777, + Samples: []cortexpb.Sample{ + {Value: 1, TimestampMs: 1000, StartTimestampMs: 100}, + }, + Histograms: []cortexpb.Histogram{ + {TimestampMs: 2000, StartTimestampMs: 200}, + }, + }, + }, + }, + }, + } + + t.Run("enableStartTimestamp=true: explicit ST takes precedence over CT", func(t *testing.T) { + v1Req, err := convertV2RequestToV1(v2Req, false, true) + require.NoError(t, err) + require.Len(t, v1Req.Timeseries[0].Samples, 1) + require.Len(t, v1Req.Timeseries[0].Histograms, 1) + assert.Equal(t, int64(100), v1Req.Timeseries[0].Samples[0].StartTimestampMs) + assert.Equal(t, int64(200), v1Req.Timeseries[0].Histograms[0].StartTimestampMs) + }) + + t.Run("enableStartTimestamp=false: ST and CT are both ignored", func(t *testing.T) { + v1Req, err := convertV2RequestToV1(v2Req, false, false) + require.NoError(t, err) + require.Len(t, v1Req.Timeseries[0].Samples, 1) + require.Len(t, v1Req.Timeseries[0].Histograms, 1) + assert.Equal(t, int64(0), v1Req.Timeseries[0].Samples[0].StartTimestampMs) + assert.Equal(t, int64(0), v1Req.Timeseries[0].Histograms[0].StartTimestampMs) + }) +} + +func TestHandler_remoteWriteV2_UnauthorizedWithoutTenantID(t *testing.T) { + var limits validation.Limits + flagext.DefaultValues(&limits) + overrides := validation.NewOverrides(limits, nil) + + pushCalled := false + pushFunc := func(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) { + pushCalled = true + return &cortexpb.WriteResponse{}, nil + } + + handler := Handler(true, false, 100000, overrides, nil, pushFunc, nil) + + req := createRequest(t, createPrometheusRemoteWriteV2Protobuf(t), true) + + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + + assert.Equal(t, http.StatusUnauthorized, resp.Code) + assert.Contains(t, resp.Body.String(), user.ErrNoOrgID.Error()) + assert.False(t, pushCalled, "push function must not be called when tenant ID is missing") +} diff --git a/pkg/util/validation/exporter_test.go b/pkg/util/validation/exporter_test.go index 0b1ef21ce8b..fa24aa4a4f6 100644 --- a/pkg/util/validation/exporter_test.go +++ b/pkg/util/validation/exporter_test.go @@ -54,6 +54,7 @@ func TestOverridesExporter_withConfig(t *testing.T) { cortex_overrides{limit_name="compactor_tenant_shard_size",user="tenant-a"} 0 cortex_overrides{limit_name="creation_grace_period",user="tenant-a"} 600 cortex_overrides{limit_name="enable_native_histograms",user="tenant-a"} 0 + cortex_overrides{limit_name="enable_start_timestamp",user="tenant-a"} 0 cortex_overrides{limit_name="enable_type_and_unit_labels",user="tenant-a"} 0 cortex_overrides{limit_name="enforce_metadata_metric_name",user="tenant-a"} 1 cortex_overrides{limit_name="enforce_metric_name",user="tenant-a"} 1 @@ -103,7 +104,9 @@ func TestOverridesExporter_withConfig(t *testing.T) { cortex_overrides{limit_name="parquet_max_fetched_chunk_bytes",user="tenant-a"} 0 cortex_overrides{limit_name="parquet_max_fetched_data_bytes",user="tenant-a"} 0 cortex_overrides{limit_name="parquet_max_fetched_row_count",user="tenant-a"} 0 + cortex_overrides{limit_name="query_ingesters_within",user="tenant-a"} 0 cortex_overrides{limit_name="query_partial_data",user="tenant-a"} 0 + cortex_overrides{limit_name="query_store_after",user="tenant-a"} 0 cortex_overrides{limit_name="query_vertical_shard_size",user="tenant-a"} 0 cortex_overrides{limit_name="reject_old_samples",user="tenant-a"} 0 cortex_overrides{limit_name="reject_old_samples_max_age",user="tenant-a"} 1.2096e+06 @@ -114,6 +117,7 @@ func TestOverridesExporter_withConfig(t *testing.T) { cortex_overrides{limit_name="ruler_query_offset",user="tenant-a"} 0 cortex_overrides{limit_name="ruler_tenant_shard_size",user="tenant-a"} 0 cortex_overrides{limit_name="rules_partial_data",user="tenant-a"} 0 + cortex_overrides{limit_name="shuffle_sharding_ingesters_lookback_period",user="tenant-a"} 0 cortex_overrides{limit_name="store_gateway_tenant_shard_size",user="tenant-a"} 0 `), "cortex_overrides")) } diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index 73f09fe3407..36284110a05 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -154,6 +154,7 @@ type Limits struct { MaxNativeHistogramBuckets int `yaml:"max_native_histogram_buckets" json:"max_native_histogram_buckets"` PromoteResourceAttributes []string `yaml:"promote_resource_attributes" json:"promote_resource_attributes"` EnableTypeAndUnitLabels bool `yaml:"enable_type_and_unit_labels" json:"enable_type_and_unit_labels"` + EnableStartTimestamp bool `yaml:"enable_start_timestamp" json:"enable_start_timestamp"` // Ingester enforced limits. // Series @@ -196,6 +197,12 @@ type Limits struct { MaxQueriersPerTenant float64 `yaml:"max_queriers_per_tenant" json:"max_queriers_per_tenant"` QueryVerticalShardSize int `yaml:"query_vertical_shard_size" json:"query_vertical_shard_size"` QueryPartialData bool `yaml:"query_partial_data" json:"query_partial_data" doc:"nocli|description=Enable to allow queries to be evaluated with data from a single zone, if other zones are not available.|default=false"` + QueryIngestersWithin model.Duration `yaml:"query_ingesters_within" json:"query_ingesters_within"` + + // If set, the querier manipulates the max time to not be greater than + // "now - queryStoreAfter" so that most recent blocks are not queried. + QueryStoreAfter model.Duration `yaml:"query_store_after" json:"query_store_after"` + ShuffleShardingIngestersLookbackPeriod model.Duration `yaml:"shuffle_sharding_ingesters_lookback_period" json:"shuffle_sharding_ingesters_lookback_period"` // Parquet Queryable enforced limits. ParquetMaxFetchedRowCount int `yaml:"parquet_max_fetched_row_count" json:"parquet_max_fetched_row_count"` @@ -274,6 +281,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.Var((*flagext.StringSliceCSV)(&l.PromoteResourceAttributes), "distributor.promote-resource-attributes", "Comma separated list of resource attributes that should be converted to labels.") f.Var(&l.DropLabels, "distributor.drop-label", "This flag can be used to specify label names that to drop during sample ingestion within the distributor and can be repeated in order to drop multiple labels.") f.BoolVar(&l.EnableTypeAndUnitLabels, "distributor.enable-type-and-unit-labels", false, "EXPERIMENTAL: If true, the __type__ and __unit__ labels are added to metrics. This applies to remote write v2 and OTLP requests.") + f.BoolVar(&l.EnableStartTimestamp, "distributor.enable-start-timestamp", false, "EXPERIMENTAL: If true, StartTimestampMs (ST) is handled for remote write v2 samples and histograms. CreatedTimestamp (CT) is used as a fallback when ST is not set.") f.IntVar(&l.MaxLabelNameLength, "validation.max-length-label-name", 1024, "Maximum length accepted for label names") f.IntVar(&l.MaxLabelValueLength, "validation.max-length-label-value", 2048, "Maximum length accepted for label value. This setting also applies to the metric name") f.IntVar(&l.MaxLabelNamesPerSeries, "validation.max-label-names-per-series", 30, "Maximum number of label names per series.") @@ -312,6 +320,16 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxFetchedSeriesPerQuery, "querier.max-fetched-series-per-query", 0, "The maximum number of unique series for which a query can fetch samples from each ingesters and blocks storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable") f.IntVar(&l.MaxFetchedChunkBytesPerQuery, "querier.max-fetched-chunk-bytes-per-query", 0, "Deprecated (use max-fetched-data-bytes-per-query instead): The maximum size of all chunks in bytes that a query can fetch from each ingester and storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable.") f.IntVar(&l.MaxFetchedDataBytesPerQuery, "querier.max-fetched-data-bytes-per-query", 0, "The maximum combined size of all data that a query can fetch from each ingester and storage. This limit is enforced in the querier and ruler for `query`, `query_range` and `series` APIs. 0 to disable.") + + _ = l.QueryIngestersWithin.Set("0") + f.Var(&l.QueryIngestersWithin, "limits.query-ingesters-within", "Maximum lookback duration for querying data from ingesters. Queries for data older than this will only query the long-term storage. This is a per-tenant limit that can be overridden in the runtime configuration. Should be less than or equal to close-idle-tsdb-timeout.") + + _ = l.QueryStoreAfter.Set("0") + f.Var(&l.QueryStoreAfter, "limits.query-store-after", "Minimum age of data before querying the long-term storage. Queries for data younger than this will only query ingesters. This is a per-tenant limit that can be overridden in the runtime configuration.") + + _ = l.ShuffleShardingIngestersLookbackPeriod.Set("0") + f.Var(&l.ShuffleShardingIngestersLookbackPeriod, "limits.shuffle-sharding-ingesters-lookback-period", "Lookback period for shuffle sharding of ingesters. This is a per-tenant limit that can be overridden in the runtime configuration. Should be greater than or equal to query-ingesters-within.") + f.Var(&l.MaxQueryLength, "store.max-query-length", "Limit the query time range (end - start time of range query parameter and max - min of data fetched time range). This limit is enforced in the query-frontend and ruler (on the received query). 0 to disable.") f.Var(&l.MaxQueryLookback, "querier.max-query-lookback", "Limit how long back data (series and metadata) can be queried, up until duration ago. This limit is enforced in the query-frontend, querier and ruler. If the requested time range is outside the allowed range, the request will not fail but will be manipulated to only query data within the allowed time range. 0 to disable.") f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 14, "Maximum number of split queries will be scheduled in parallel by the frontend.") @@ -419,6 +437,28 @@ func (l *Limits) Validate(nameValidationScheme model.ValidationScheme, shardByAl return nil } +func (l *Limits) ValidateQueryLimits(userID string, closeIdleTSDBTimeout time.Duration) error { + queryIngestersWithin := time.Duration(l.QueryIngestersWithin) + queryStoreAfter := time.Duration(l.QueryStoreAfter) + shuffleShardingLookback := time.Duration(l.ShuffleShardingIngestersLookbackPeriod) + + if queryIngestersWithin > 0 && closeIdleTSDBTimeout > 0 && queryIngestersWithin >= closeIdleTSDBTimeout { + return fmt.Errorf("tenant %s: query_ingesters_within (%s) must be less than close_idle_tsdb_timeout (%s)", + userID, queryIngestersWithin, closeIdleTSDBTimeout) + } + + if queryIngestersWithin > 0 && queryStoreAfter > 0 && queryStoreAfter >= queryIngestersWithin { + return fmt.Errorf("tenant %s: query_store_after (%s) must be less than query_ingesters_within (%s)", + userID, queryStoreAfter, queryIngestersWithin) + } + + if queryStoreAfter > 0 && shuffleShardingLookback > 0 && shuffleShardingLookback < queryStoreAfter { + return fmt.Errorf("tenant %s: shuffle_sharding_ingesters_lookback_period (%s) is less than query_store_after (%s)", + userID, shuffleShardingLookback, queryStoreAfter) + } + + return nil +} // UnmarshalYAML implements the yaml.Unmarshaler interface. func (l *Limits) UnmarshalYAML(unmarshal func(any) error) error { @@ -1137,6 +1177,10 @@ func (o *Overrides) EnableTypeAndUnitLabels(userID string) bool { return o.GetOverridesForUser(userID).EnableTypeAndUnitLabels } +func (o *Overrides) EnableStartTimestamp(userID string) bool { + return o.GetOverridesForUser(userID).EnableStartTimestamp +} + func (o *Overrides) DisabledRuleGroups(userID string) DisabledRuleGroups { if o.tenantLimits != nil { l := o.tenantLimits.ByUserID(userID) @@ -1179,6 +1223,18 @@ func (o *Overrides) MaxTotalLabelValueLengthForUnoptimizedRegex(userID string) i return o.GetOverridesForUser(userID).MaxTotalLabelValueLengthForUnoptimizedRegex } +func (o *Overrides) QueryIngestersWithin(userID string) time.Duration { + return time.Duration(o.GetOverridesForUser(userID).QueryIngestersWithin) +} + +func (o *Overrides) QueryStoreAfter(userID string) time.Duration { + return time.Duration(o.GetOverridesForUser(userID).QueryStoreAfter) +} + +func (o *Overrides) ShuffleShardingIngestersLookbackPeriod(userID string) time.Duration { + return time.Duration(o.GetOverridesForUser(userID).ShuffleShardingIngestersLookbackPeriod) +} + // GetOverridesForUser returns the per-tenant limits with overrides. func (o *Overrides) GetOverridesForUser(userID string) *Limits { if o.tenantLimits != nil { diff --git a/pkg/util/validation/limits_test.go b/pkg/util/validation/limits_test.go index 807b3d8e2b4..a0b0b90e3c1 100644 --- a/pkg/util/validation/limits_test.go +++ b/pkg/util/validation/limits_test.go @@ -996,3 +996,274 @@ func TestIsLimitError(t *testing.T) { assert.False(t, IsLimitError(fmt.Errorf("test error"))) assert.True(t, IsLimitError(LimitError("test error"))) } +func TestLimits_ValidateQueryLimits(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + queryIngestersWithin time.Duration + queryStoreAfter time.Duration + shuffleShardingIngestersLookbackPeriod time.Duration + closeIdleTSDBTimeout time.Duration + expectedError string + }{ + "all limits disabled (zero values) should be valid": { + queryIngestersWithin: 0, + queryStoreAfter: 0, + shuffleShardingIngestersLookbackPeriod: 0, + closeIdleTSDBTimeout: 0, + expectedError: "", + }, + "valid configuration with all limits enabled": { + queryIngestersWithin: 25 * time.Hour, + queryStoreAfter: 24 * time.Hour, + shuffleShardingIngestersLookbackPeriod: 25 * time.Hour, + closeIdleTSDBTimeout: 26 * time.Hour, + expectedError: "", + }, + "valid configuration with overlap for safety": { + queryIngestersWithin: 25 * time.Hour, + queryStoreAfter: 23 * time.Hour, + shuffleShardingIngestersLookbackPeriod: 26 * time.Hour, + closeIdleTSDBTimeout: 30 * time.Hour, + expectedError: "", + }, + "valid configuration with only queryIngestersWithin enabled": { + queryIngestersWithin: 25 * time.Hour, + queryStoreAfter: 0, + shuffleShardingIngestersLookbackPeriod: 0, + closeIdleTSDBTimeout: 26 * time.Hour, + expectedError: "", + }, + "valid configuration with only queryStoreAfter enabled": { + queryIngestersWithin: 0, + queryStoreAfter: 24 * time.Hour, + shuffleShardingIngestersLookbackPeriod: 25 * time.Hour, + closeIdleTSDBTimeout: 0, + expectedError: "", + }, + "invalid: queryIngestersWithin >= closeIdleTSDBTimeout": { + queryIngestersWithin: 25 * time.Hour, + queryStoreAfter: 24 * time.Hour, + shuffleShardingIngestersLookbackPeriod: 25 * time.Hour, + closeIdleTSDBTimeout: 25 * time.Hour, + expectedError: "query_ingesters_within (25h0m0s) must be less than close_idle_tsdb_timeout (25h0m0s)", + }, + "invalid: queryIngestersWithin > closeIdleTSDBTimeout": { + queryIngestersWithin: 26 * time.Hour, + queryStoreAfter: 24 * time.Hour, + shuffleShardingIngestersLookbackPeriod: 26 * time.Hour, + closeIdleTSDBTimeout: 25 * time.Hour, + expectedError: "query_ingesters_within (26h0m0s) must be less than close_idle_tsdb_timeout (25h0m0s)", + }, + "invalid: queryStoreAfter >= queryIngestersWithin": { + queryIngestersWithin: 24 * time.Hour, + queryStoreAfter: 24 * time.Hour, + shuffleShardingIngestersLookbackPeriod: 25 * time.Hour, + closeIdleTSDBTimeout: 26 * time.Hour, + expectedError: "query_store_after (24h0m0s) must be less than query_ingesters_within (24h0m0s)", + }, + "invalid: queryStoreAfter > queryIngestersWithin": { + queryIngestersWithin: 24 * time.Hour, + queryStoreAfter: 25 * time.Hour, + shuffleShardingIngestersLookbackPeriod: 26 * time.Hour, + closeIdleTSDBTimeout: 27 * time.Hour, + expectedError: "query_store_after (25h0m0s) must be less than query_ingesters_within (24h0m0s)", + }, + "invalid: shuffleShardingLookback < queryStoreAfter": { + queryIngestersWithin: 25 * time.Hour, + queryStoreAfter: 24 * time.Hour, + shuffleShardingIngestersLookbackPeriod: 20 * time.Hour, + closeIdleTSDBTimeout: 26 * time.Hour, + expectedError: "shuffle_sharding_ingesters_lookback_period (20h0m0s) is less than query_store_after (24h0m0s)", + }, + "valid: shuffleShardingLookback between queryStoreAfter and queryIngestersWithin": { + queryIngestersWithin: 25 * time.Hour, + queryStoreAfter: 20 * time.Hour, + shuffleShardingIngestersLookbackPeriod: 22 * time.Hour, + closeIdleTSDBTimeout: 26 * time.Hour, + expectedError: "", + }, + "boundary: queryIngestersWithin exactly 1ms less than closeIdleTSDBTimeout": { + queryIngestersWithin: 25*time.Hour - time.Millisecond, + queryStoreAfter: 24 * time.Hour, + shuffleShardingIngestersLookbackPeriod: 25 * time.Hour, + closeIdleTSDBTimeout: 25 * time.Hour, + expectedError: "", + }, + "boundary: queryStoreAfter exactly 1ms less than queryIngestersWithin": { + queryIngestersWithin: 25 * time.Hour, + queryStoreAfter: 25*time.Hour - time.Millisecond, + shuffleShardingIngestersLookbackPeriod: 25 * time.Hour, + closeIdleTSDBTimeout: 26 * time.Hour, + expectedError: "", + }, + "boundary: shuffleShardingLookback exactly equal to queryStoreAfter": { + queryIngestersWithin: 25 * time.Hour, + queryStoreAfter: 24 * time.Hour, + shuffleShardingIngestersLookbackPeriod: 24 * time.Hour, + closeIdleTSDBTimeout: 26 * time.Hour, + expectedError: "", + }, + "edge case: very large values": { + queryIngestersWithin: 365 * 24 * time.Hour, // 1 year + queryStoreAfter: 364 * 24 * time.Hour, + shuffleShardingIngestersLookbackPeriod: 365 * 24 * time.Hour, + closeIdleTSDBTimeout: 366 * 24 * time.Hour, + expectedError: "", + }, + "edge case: very small values": { + queryIngestersWithin: 2 * time.Second, + queryStoreAfter: 1 * time.Second, + shuffleShardingIngestersLookbackPeriod: 2 * time.Second, + closeIdleTSDBTimeout: 3 * time.Second, + expectedError: "", + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + limits := Limits{ + QueryIngestersWithin: model.Duration(testData.queryIngestersWithin), + QueryStoreAfter: model.Duration(testData.queryStoreAfter), + ShuffleShardingIngestersLookbackPeriod: model.Duration(testData.shuffleShardingIngestersLookbackPeriod), + } + + err := limits.ValidateQueryLimits("test-tenant", testData.closeIdleTSDBTimeout) + + if testData.expectedError == "" { + assert.NoError(t, err, "expected no error but got: %v", err) + } else { + assert.Error(t, err, "expected error but got none") + if err != nil { + assert.Contains(t, err.Error(), testData.expectedError, "error message mismatch") + } + } + }) + } +} + +func TestQueryLimits_TenantOverrides(t *testing.T) { + t.Parallel() + + // Setup: Create three tenants with different query limit configurations + tenantLimits := map[string]*Limits{ + "tenant-a": { + QueryIngestersWithin: model.Duration(1 * time.Hour), + QueryStoreAfter: model.Duration(30 * time.Minute), + ShuffleShardingIngestersLookbackPeriod: model.Duration(1 * time.Hour), + }, + "tenant-b": { + QueryIngestersWithin: model.Duration(2 * time.Hour), + QueryStoreAfter: model.Duration(1 * time.Hour), + ShuffleShardingIngestersLookbackPeriod: model.Duration(2 * time.Hour), + }, + "tenant-c": { + // Uses defaults (all zeros - disabled) + QueryIngestersWithin: 0, + QueryStoreAfter: 0, + ShuffleShardingIngestersLookbackPeriod: 0, + }, + } + + defaults := Limits{ + QueryIngestersWithin: model.Duration(25 * time.Hour), + QueryStoreAfter: model.Duration(24 * time.Hour), + ShuffleShardingIngestersLookbackPeriod: model.Duration(25 * time.Hour), + } + + ov := NewOverrides(defaults, newMockTenantLimits(tenantLimits)) + + // Verify tenant-a gets their specific limits + assert.Equal(t, 1*time.Hour, ov.QueryIngestersWithin("tenant-a")) + assert.Equal(t, 30*time.Minute, ov.QueryStoreAfter("tenant-a")) + assert.Equal(t, 1*time.Hour, ov.ShuffleShardingIngestersLookbackPeriod("tenant-a")) + + // Verify tenant-b gets their specific limits + assert.Equal(t, 2*time.Hour, ov.QueryIngestersWithin("tenant-b")) + assert.Equal(t, 1*time.Hour, ov.QueryStoreAfter("tenant-b")) + assert.Equal(t, 2*time.Hour, ov.ShuffleShardingIngestersLookbackPeriod("tenant-b")) + + // Verify tenant-c gets their specific limits (zeros) + assert.Equal(t, time.Duration(0), ov.QueryIngestersWithin("tenant-c")) + assert.Equal(t, time.Duration(0), ov.QueryStoreAfter("tenant-c")) + assert.Equal(t, time.Duration(0), ov.ShuffleShardingIngestersLookbackPeriod("tenant-c")) + + // Verify unknown tenant gets defaults + assert.Equal(t, 25*time.Hour, ov.QueryIngestersWithin("tenant-unknown")) + assert.Equal(t, 24*time.Hour, ov.QueryStoreAfter("tenant-unknown")) + assert.Equal(t, 25*time.Hour, ov.ShuffleShardingIngestersLookbackPeriod("tenant-unknown")) +} + +func TestQueryLimits_TenantOverridesValidation(t *testing.T) { + t.Parallel() + + closeIdleTSDBTimeout := 26 * time.Hour + + tests := map[string]struct { + tenantLimits map[string]*Limits + tenantID string + expectedError string + }{ + "valid tenant configuration": { + tenantLimits: map[string]*Limits{ + "valid-tenant": { + QueryIngestersWithin: model.Duration(25 * time.Hour), + QueryStoreAfter: model.Duration(24 * time.Hour), + ShuffleShardingIngestersLookbackPeriod: model.Duration(25 * time.Hour), + }, + }, + tenantID: "valid-tenant", + expectedError: "", + }, + "invalid tenant: queryStoreAfter >= queryIngestersWithin": { + tenantLimits: map[string]*Limits{ + "invalid-tenant": { + QueryIngestersWithin: model.Duration(24 * time.Hour), + QueryStoreAfter: model.Duration(25 * time.Hour), + ShuffleShardingIngestersLookbackPeriod: model.Duration(26 * time.Hour), + }, + }, + tenantID: "invalid-tenant", + expectedError: "query_store_after (25h0m0s) must be less than query_ingesters_within (24h0m0s)", + }, + "invalid tenant: queryIngestersWithin >= closeIdleTSDBTimeout": { + tenantLimits: map[string]*Limits{ + "invalid-tenant": { + QueryIngestersWithin: model.Duration(26 * time.Hour), + QueryStoreAfter: model.Duration(24 * time.Hour), + ShuffleShardingIngestersLookbackPeriod: model.Duration(26 * time.Hour), + }, + }, + tenantID: "invalid-tenant", + expectedError: "query_ingesters_within (26h0m0s) must be less than close_idle_tsdb_timeout (26h0m0s)", + }, + "invalid tenant: shuffleShardingLookback < queryStoreAfter": { + tenantLimits: map[string]*Limits{ + "invalid-tenant": { + QueryIngestersWithin: model.Duration(25 * time.Hour), + QueryStoreAfter: model.Duration(24 * time.Hour), + ShuffleShardingIngestersLookbackPeriod: model.Duration(20 * time.Hour), + }, + }, + tenantID: "invalid-tenant", + expectedError: "shuffle_sharding_ingesters_lookback_period (20h0m0s) is less than query_store_after (24h0m0s)", + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + limits := testData.tenantLimits[testData.tenantID] + err := limits.ValidateQueryLimits(testData.tenantID, closeIdleTSDBTimeout) + + if testData.expectedError == "" { + assert.NoError(t, err, "expected no error but got: %v", err) + } else { + assert.Error(t, err, "expected error but got none") + if err != nil { + assert.Contains(t, err.Error(), testData.expectedError, "error message mismatch") + assert.Contains(t, err.Error(), testData.tenantID, "error should contain tenant ID") + } + } + }) + } +} diff --git a/schemas/cortex-config-schema.json b/schemas/cortex-config-schema.json index 20cf970c35b..18566971cb8 100644 --- a/schemas/cortex-config-schema.json +++ b/schemas/cortex-config-schema.json @@ -5077,6 +5077,12 @@ "type": "boolean", "x-cli-flag": "blocks-storage.tsdb.enable-native-histograms" }, + "enable_start_timestamp": { + "default": false, + "description": "EXPERIMENTAL: If true, StartTimestampMs (ST) is handled for remote write v2 samples and histograms. CreatedTimestamp (CT) is used as a fallback when ST is not set.", + "type": "boolean", + "x-cli-flag": "distributor.enable-start-timestamp" + }, "enable_type_and_unit_labels": { "default": false, "description": "EXPERIMENTAL: If true, the __type__ and __unit__ labels are added to metrics. This applies to remote write v2 and OTLP requests.", @@ -5424,6 +5430,13 @@ "type": "array", "x-cli-flag": "distributor.promote-resource-attributes" }, + "query_ingesters_within": { + "default": "0s", + "description": "Maximum lookback duration for querying data from ingesters. Queries for data older than this will only query the long-term storage. This is a per-tenant limit that can be overridden in the runtime configuration. Should be less than or equal to close-idle-tsdb-timeout.", + "type": "string", + "x-cli-flag": "limits.query-ingesters-within", + "x-format": "duration" + }, "query_partial_data": { "default": false, "description": "Enable to allow queries to be evaluated with data from a single zone, if other zones are not available.", @@ -5475,6 +5488,13 @@ }, "type": "object" }, + "query_store_after": { + "default": "0s", + "description": "Minimum age of data before querying the long-term storage. Queries for data younger than this will only query ingesters. This is a per-tenant limit that can be overridden in the runtime configuration.", + "type": "string", + "x-cli-flag": "limits.query-store-after", + "x-format": "duration" + }, "query_vertical_shard_size": { "default": 0, "description": "[Experimental] Number of shards to use when distributing shardable PromQL queries.", @@ -5556,6 +5576,13 @@ "description": "S3 server-side encryption type. Required to enable server-side encryption overrides for a specific tenant. If not set, the default S3 client settings are used.", "type": "string" }, + "shuffle_sharding_ingesters_lookback_period": { + "default": "0s", + "description": "Lookback period for shuffle sharding of ingesters. This is a per-tenant limit that can be overridden in the runtime configuration. Should be greater than or equal to query-ingesters-within.", + "type": "string", + "x-cli-flag": "limits.shuffle-sharding-ingesters-lookback-period", + "x-format": "duration" + }, "store_gateway_tenant_shard_size": { "default": 0, "description": "The default tenant's shard size when the shuffle-sharding strategy is used. Must be set when the store-gateway sharding is enabled with the shuffle-sharding strategy. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant. If the value is \u003c 1 the shard size will be a percentage of the total store-gateways.", @@ -5600,6 +5627,17 @@ "type": "number", "x-cli-flag": "memberlist.bind-port" }, + "cluster_label": { + "description": "The cluster label is an optional string to include in outbound packets and gossip streams. Other members in the memberlist cluster will discard any message whose label doesn't match the configured one, unless the 'cluster-label-verification-disabled' configuration option is set to true.", + "type": "string", + "x-cli-flag": "memberlist.cluster-label" + }, + "cluster_label_verification_disabled": { + "default": false, + "description": "When true, memberlist doesn't verify that inbound packets and gossip streams have the cluster label matching the configured one. This verification should be disabled while rolling out the change to the configured cluster label in a live memberlist cluster.", + "type": "boolean", + "x-cli-flag": "memberlist.cluster-label-verification-disabled" + }, "compression_enabled": { "default": true, "description": "Enable message compression. This can be used to reduce bandwidth usage at the cost of slightly more CPU utilization.", @@ -5996,33 +6034,12 @@ "type": "boolean", "x-cli-flag": "querier.per-step-stats-enabled" }, - "query_ingesters_within": { - "default": "0s", - "description": "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.", - "type": "string", - "x-cli-flag": "querier.query-ingesters-within", - "x-format": "duration" - }, - "query_store_after": { - "default": "0s", - "description": "The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. When running the blocks storage, if this option is enabled, the time range of the query sent to the store will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.", - "type": "string", - "x-cli-flag": "querier.query-store-after", - "x-format": "duration" - }, "response_compression": { "default": "gzip", "description": "Use compression for metrics query API or instant and range query APIs. Supported compression 'gzip', 'snappy', 'zstd' and '' (disable compression)", "type": "string", "x-cli-flag": "querier.response-compression" }, - "shuffle_sharding_ingesters_lookback_period": { - "default": "0s", - "description": "When distributor's sharding strategy is shuffle-sharding and this setting is \u003e 0, queriers fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since 'now - lookback period'. The lookback period should be greater or equal than the configured 'query store after' and 'query ingesters within'. If this setting is 0, queriers always query all ingesters (ingesters shuffle sharding on read path is disabled).", - "type": "string", - "x-cli-flag": "querier.shuffle-sharding-ingesters-lookback-period", - "x-format": "duration" - }, "store_gateway_addresses": { "description": "Comma separated list of store-gateway addresses in DNS Service Discovery format. This option should be set when using the blocks storage and the store-gateway sharding is disabled (when enabled, the store-gateway instances form a ring and addresses are picked from the ring).", "type": "string", diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go b/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go index c6bbec019a5..1c75d7c4942 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go +++ b/vendor/github.com/thanos-io/thanos/pkg/query/remote_engine.go @@ -26,15 +26,11 @@ import ( "github.com/thanos-io/thanos/pkg/api/query/querypb" "github.com/thanos-io/thanos/pkg/info/infopb" "github.com/thanos-io/thanos/pkg/server/http/middleware" - "github.com/thanos-io/thanos/pkg/store/labelpb" + "github.com/thanos-io/thanos/pkg/store" "github.com/thanos-io/thanos/pkg/store/storepb/prompb" grpc_tracing "github.com/thanos-io/thanos/pkg/tracing/tracing_middleware" ) -// defaultResponseBatchSize is the default number of timeseries to batch per gRPC response message. -// This value provides a good balance between reducing per-message overhead and keeping message sizes reasonable. -const defaultResponseBatchSize = 64 - type RemoteEndpointsCreator func( replicaLabels []string, partialResponse bool, @@ -121,15 +117,14 @@ func (r remoteEndpoints) Engines() []api.RemoteEngine { type remoteEngine struct { opts Opts logger log.Logger - client Client - mintOnce sync.Once - mint int64 - maxtOnce sync.Once - maxt int64 - labelSetsOnce sync.Once - labelSets []labels.Labels + initOnce sync.Once + + mint int64 + maxt int64 + labelSets []labels.Labels + partitionLabelSets []labels.Labels } func NewRemoteEngine(logger log.Logger, queryClient Client, opts Opts) *remoteEngine { @@ -140,100 +135,109 @@ func NewRemoteEngine(logger log.Logger, queryClient Client, opts Opts) *remoteEn } } -// MinT returns the minimum timestamp that is safe to query in the remote engine. -// In order to calculate it, we find the highest min time for each label set, and we return -// the lowest of those values. -// Calculating the MinT this way makes remote queries resilient to cases where one tsdb replica would delete -// a block due to retention before other replicas did the same. -// See https://github.com/thanos-io/promql-engine/issues/187. -func (r *remoteEngine) MinT() int64 { +func (r *remoteEngine) init() { + r.initOnce.Do(func() { + replicaLabelSet := make(map[string]struct{}) + for _, lbl := range r.opts.ReplicaLabels { + replicaLabelSet[lbl] = struct{}{} + } + partitionLabelsSet := make(map[string]struct{}) + for _, lbl := range r.opts.PartitionLabels { + partitionLabelsSet[lbl] = struct{}{} + } + + // strip out replica labels and scopes the remaining labels + // onto the partition labels if they are set. - r.mintOnce.Do(func() { + // partitionLabelSets are used to compute how to push down, they are the minimum set of labels + // that form a partition of the remote engines. + // labelSets are all labelsets of the remote engine, they are used for fan-out pruning on labels + // that dont meaningfully contribute to the partitioning but are still useful. var ( hashBuf = make([]byte, 0, 128) highestMintByLabelSet = make(map[uint64]int64) + + labelSetsBuilder labels.ScratchBuilder + partitionLabelSetsBuilder labels.ScratchBuilder + + labelSets = make([]labels.Labels, 0, len(r.client.tsdbInfos)) + partitionLabelSets = make([]labels.Labels, 0, len(r.client.tsdbInfos)) ) - for _, lset := range r.adjustedInfos() { - key, _ := labelpb.ZLabelsToPromLabels(lset.Labels.Labels).HashWithoutLabels(hashBuf) + for _, info := range r.client.tsdbInfos { + labelSetsBuilder.Reset() + partitionLabelSetsBuilder.Reset() + for _, lbl := range info.Labels.Labels { + if _, ok := replicaLabelSet[lbl.Name]; ok { + continue + } + labelSetsBuilder.Add(lbl.Name, lbl.Value) + if _, ok := partitionLabelsSet[lbl.Name]; !ok && len(partitionLabelsSet) > 0 { + continue + } + partitionLabelSetsBuilder.Add(lbl.Name, lbl.Value) + } + + partitionLabelSet := partitionLabelSetsBuilder.Labels() + labelSet := labelSetsBuilder.Labels() + labelSets = append(labelSets, labelSet) + partitionLabelSets = append(partitionLabelSets, partitionLabelSet) + + key, _ := partitionLabelSet.HashWithoutLabels(hashBuf) lsetMinT, ok := highestMintByLabelSet[key] if !ok { - highestMintByLabelSet[key] = lset.MinTime + highestMintByLabelSet[key] = info.MinTime continue } // If we are querying with overlapping intervals, we want to find the first available timestamp // otherwise we want to find the last available timestamp. - if r.opts.QueryDistributedWithOverlappingInterval && lset.MinTime < lsetMinT { - highestMintByLabelSet[key] = lset.MinTime - } else if !r.opts.QueryDistributedWithOverlappingInterval && lset.MinTime > lsetMinT { - highestMintByLabelSet[key] = lset.MinTime + if r.opts.QueryDistributedWithOverlappingInterval && info.MinTime < lsetMinT { + highestMintByLabelSet[key] = info.MinTime + } else if !r.opts.QueryDistributedWithOverlappingInterval && info.MinTime > lsetMinT { + highestMintByLabelSet[key] = info.MinTime } } - var mint int64 = math.MaxInt64 + + // mint is the minimum timestamp that is safe to query in the remote engine. + // In order to calculate it, we find the highest min time for each label set, and we return + // the lowest of those values. + // Calculating the MinT this way makes remote queries resilient to cases where one tsdb replica would delete + // a block due to retention before other replicas did the same. + // See https://github.com/thanos-io/promql-engine/issues/187. + var ( + mint = int64(math.MaxInt64) + maxt = r.client.tsdbInfos.MaxT() + ) for _, m := range highestMintByLabelSet { if m < mint { mint = m } } + r.mint = mint + r.maxt = maxt + r.labelSets = labelSets + r.partitionLabelSets = partitionLabelSets }) +} +func (r *remoteEngine) MinT() int64 { + r.init() return r.mint } func (r *remoteEngine) MaxT() int64 { - r.maxtOnce.Do(func() { - r.maxt = r.client.tsdbInfos.MaxT() - }) + r.init() return r.maxt } -func (r *remoteEngine) PartitionLabelSets() []labels.Labels { - r.labelSetsOnce.Do(func() { - r.labelSets = r.adjustedInfos().LabelSets() - }) - return r.labelSets -} - func (r *remoteEngine) LabelSets() []labels.Labels { - r.labelSetsOnce.Do(func() { - r.labelSets = r.adjustedInfos().LabelSets() - }) + r.init() return r.labelSets } -// adjustedInfos strips out replica labels and scopes the remaining labels -// onto the partition labels if they are set. -func (r *remoteEngine) adjustedInfos() infopb.TSDBInfos { - replicaLabelSet := make(map[string]struct{}) - for _, lbl := range r.opts.ReplicaLabels { - replicaLabelSet[lbl] = struct{}{} - } - partitionLabelsSet := make(map[string]struct{}) - for _, lbl := range r.opts.PartitionLabels { - partitionLabelsSet[lbl] = struct{}{} - } - - // Strip replica labels from the result. - infos := make(infopb.TSDBInfos, 0, len(r.client.tsdbInfos)) - var builder labels.ScratchBuilder - for _, info := range r.client.tsdbInfos { - builder.Reset() - for _, lbl := range info.Labels.Labels { - if _, ok := replicaLabelSet[lbl.Name]; ok { - continue - } - if _, ok := partitionLabelsSet[lbl.Name]; !ok && len(partitionLabelsSet) > 0 { - continue - } - builder.Add(lbl.Name, lbl.Value) - } - infos = append(infos, infopb.NewTSDBInfo( - info.MinTime, - info.MaxTime, - labelpb.ZLabelsFromPromLabels(builder.Labels())), - ) - } - return infos +func (r *remoteEngine) PartitionLabelSets() []labels.Labels { + r.init() + return r.partitionLabelSets } func (r *remoteEngine) NewRangeQuery(_ context.Context, _ promql.QueryOpts, plan api.RemoteQuery, start, end time.Time, interval time.Duration) (promql.Query, error) { @@ -345,7 +349,7 @@ func (r *remoteQuery) Exec(ctx context.Context) *promql.Result { ReplicaLabels: r.opts.ReplicaLabels, MaxResolutionSeconds: maxResolution, EnableDedup: true, - ResponseBatchSize: defaultResponseBatchSize, + ResponseBatchSize: store.DefaultResponseBatchSize, } qry, err := r.client.Query(qctx, request) @@ -424,7 +428,7 @@ func (r *remoteQuery) Exec(ctx context.Context) *promql.Result { ReplicaLabels: r.opts.ReplicaLabels, MaxResolutionSeconds: maxResolution, EnableDedup: true, - ResponseBatchSize: defaultResponseBatchSize, + ResponseBatchSize: store.DefaultResponseBatchSize, } qry, err := r.client.QueryRange(qctx, request) if err != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/batchable.go b/vendor/github.com/thanos-io/thanos/pkg/store/batchable.go index 9a399a51f83..555c4e481b8 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/batchable.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/batchable.go @@ -66,3 +66,7 @@ func (b *batchableServer) Send(response *storepb.SeriesResponse) error { return nil } + +// DefaultResponseBatchSize is the default number of timeseries to batch per gRPC response message. +// This value provides a good balance between reducing per-message overhead and keeping message sizes reasonable. +const DefaultResponseBatchSize = 64 diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index d4872287593..db1f55101ce 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -17,6 +17,7 @@ linters: - ineffassign - misspell - modernize + - noctx - perfsprint - revive - staticcheck @@ -88,6 +89,16 @@ linters: deny: - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal desc: Do not use cross-module internal packages. + semconv: + list-mode: lax + files: + - "!**/semconv/**" + - "!**/exporters/zipkin/**" + deny: + - pkg: go.opentelemetry.io/otel/semconv + desc: "Use go.opentelemetry.io/otel/semconv/v1.40.0 instead. If a newer semconv version has been released, update the depguard rule." + allow: + - go.opentelemetry.io/otel/semconv/v1.40.0 gocritic: disabled-checks: - appendAssign @@ -194,6 +205,7 @@ linters: arguments: - ["ID"] # AllowList - ["Otel", "Aws", "Gcp"] # DenyList + - - skip-package-name-collision-with-go-std: true - name: waitgroup-by-value testifylint: enable-all: true diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index e725282bec2..20edda4418c 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,90 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.43.0/0.65.0/0.19.0] 2026-04-02 + +### Added + +- Add `IsRandom` and `WithRandom` on `TraceFlags`, and `IsRandom` on `SpanContext` in `go.opentelemetry.io/otel/trace` for [W3C Trace Context Level 2 Random Trace ID Flag](https://www.w3.org/TR/trace-context-2/#random-trace-id-flag) support. (#8012) +- Add service detection with `WithService` in `go.opentelemetry.io/otel/sdk/resource`. (#7642) +- Add `DefaultWithContext` and `EnvironmentWithContext` in `go.opentelemetry.io/otel/sdk/resource` to support plumbing `context.Context` through default and environment detectors. (#8051) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#8038) +- Add support for per-series start time tracking for cumulative metrics in `go.opentelemetry.io/otel/sdk/metric`. + Set `OTEL_GO_X_PER_SERIES_START_TIMESTAMPS=true` to enable. (#8060) +- Add `WithCardinalityLimitSelector` for metric reader for configuring cardinality limits specific to the instrument kind. (#7855) + +### Changed + +- Introduce the `EMPTY` Type in `go.opentelemetry.io/otel/attribute` to reflect that an empty value is now a valid value, with `INVALID` remaining as a deprecated alias of `EMPTY`. (#8038) +- Improve slice handling in `go.opentelemetry.io/otel/attribute` to optimize short slice values with fixed-size fast paths. (#8039) +- Improve performance of span metric recording in `go.opentelemetry.io/otel/sdk/trace` by returning early if self-observability is not enabled. (#8067) +- Improve formatting of metric data diffs in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#8073) + +### Deprecated + +- Deprecate `INVALID` in `go.opentelemetry.io/otel/attribute`. Use `EMPTY` instead. (#8038) + +### Fixed + +- Return spec-compliant `TraceIdRatioBased` description. This is a breaking behavioral change, but it is necessary to + make the implementation [spec-compliant](https://opentelemetry.io/docs/specs/otel/trace/sdk/#traceidratiobased). (#8027) +- Fix a race condition in `go.opentelemetry.io/otel/sdk/metric` where the lastvalue aggregation could collect the value 0 even when no zero-value measurements were recorded. (#8056) +- Limit HTTP response body to 4 MiB in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` to mitigate excessive memory usage caused by a misconfigured or malicious server. + Responses exceeding the limit are treated as non-retryable errors. (#8108) +- Limit HTTP response body to 4 MiB in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` to mitigate excessive memory usage caused by a misconfigured or malicious server. + Responses exceeding the limit are treated as non-retryable errors. (#8108) +- Limit HTTP response body to 4 MiB in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` to mitigate excessive memory usage caused by a misconfigured or malicious server. + Responses exceeding the limit are treated as non-retryable errors. (#8108) +- `WithHostID` detector in `go.opentelemetry.io/otel/sdk/resource` to use full path for `kenv` command on BSD. (#8113) +- Fix missing `request.GetBody` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` to correctly handle HTTP2 GOAWAY frame. (#8096) + +## [1.42.0/0.64.0/0.18.0/0.0.16] 2026-03-06 + +### Added + +- Add `go.opentelemetry.io/otel/semconv/v1.40.0` package. + The package contains semantic conventions from the `v1.40.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.40.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.39.0`. (#7985) +- Add `Err` and `SetErr` on `Record` in `go.opentelemetry.io/otel/log` to attach an error and set record exception attributes in `go.opentelemetry.io/otel/log/sdk`. (#7924) + +### Changed + +- `TracerProvider.ForceFlush` in `go.opentelemetry.io/otel/sdk/trace` joins errors together and continues iteration through SpanProcessors as opposed to returning the first encountered error without attempting exports on subsequent SpanProcessors. (#7856) + +### Fixed + +- Fix missing `request.GetBody` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` to correctly handle HTTP2 GOAWAY frame. (#7931) +- Fix semconv v1.39.0 generated metric helpers skipping required attributes when extra attributes were empty. (#7964) +- Preserve W3C TraceFlags bitmask (including the random Trace ID flag) during trace context extraction and injection in `go.opentelemetry.io/otel/propagation`. (#7834) + +### Removed + +- Drop support for [Go 1.24]. (#7984) + +## [1.41.0/0.63.0/0.17.0/0.0.15] 2026-03-02 + +This release is the last to support [Go 1.24]. +The next release will require at least [Go 1.25]. + +### Added + +- Support testing of [Go 1.26]. (#7902) + +### Fixed + +- Update `Baggage` in `go.opentelemetry.io/otel/propagation` and `Parse` and `New` in `go.opentelemetry.io/otel/baggage` to comply with W3C Baggage specification limits. + `New` and `Parse` now return partial baggage along with an error when limits are exceeded. + Errors from baggage extraction are reported to the global error handler. (#7880) +- Return an error when the endpoint is configured as insecure and with TLS configuration in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7914) +- Return an error when the endpoint is configured as insecure and with TLS configuration in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7914) +- Return an error when the endpoint is configured as insecure and with TLS configuration in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7914) + ## [1.40.0/0.62.0/0.16.0] 2026-02-02 ### Added @@ -3535,7 +3619,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.40.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.43.0...HEAD +[1.43.0/0.65.0/0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.43.0 +[1.42.0/0.64.0/0.18.0/0.0.16]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.42.0 +[1.41.0/0.63.0/0.17.0/0.0.15]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.41.0 [1.40.0/0.62.0/0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.40.0 [1.39.0/0.61.0/0.15.0/0.0.14]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.39.0 [1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0 @@ -3635,6 +3722,7 @@ It contains api and sdk for trace and meter. +[Go 1.26]: https://go.dev/doc/go1.26 [Go 1.25]: https://go.dev/doc/go1.25 [Go 1.24]: https://go.dev/doc/go1.24 [Go 1.23]: https://go.dev/doc/go1.23 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 38dede93296..12de3607a34 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -746,8 +746,8 @@ Encapsulate setup in constructor functions, ensuring clear ownership and scope: import ( "errors" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) type SDKComponent struct { @@ -1039,7 +1039,7 @@ func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) All observability metrics should follow the [OpenTelemetry Semantic Conventions for SDK metrics](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/otel/sdk-metrics.md). -Use the metric semantic conventions convenience package [otelconv](./semconv/v1.39.0/otelconv/metric.go). +Use the metric semantic conventions convenience package [otelconv](./semconv/v1.40.0/otelconv/metric.go). ##### Component Identification diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 44870248c32..42466f2d6a1 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -38,10 +38,14 @@ CROSSLINK = $(TOOLS)/crosslink $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit +SEMCONVKIT_FILES := $(sort $(shell find $(TOOLS_MOD_DIR)/semconvkit -type f)) $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit +$(TOOLS)/semconvkit: $(SEMCONVKIT_FILES) VERIFYREADMES = $(TOOLS)/verifyreadmes +VERIFYREADMES_FILES := $(sort $(shell find $(TOOLS_MOD_DIR)/verifyreadmes -type f)) $(TOOLS)/verifyreadmes: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/verifyreadmes +$(TOOLS)/verifyreadmes: $(VERIFYREADMES_FILES) GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/v2/cmd/golangci-lint @@ -185,11 +189,10 @@ test-coverage: $(GOCOVMERGE) .PHONY: benchmark benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) benchmark/%: - @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ - && cd $* \ - && $(GO) list ./... \ - | grep -v third_party \ - | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. + cd $* && $(GO) test -run='^$$' -bench=. $(ARGS) ./... + +print-sharded-benchmarks: + @echo $(OTEL_GO_MOD_DIRS) | jq -cR 'split(" ")' .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix @@ -215,7 +218,7 @@ go-mod-tidy/%: crosslink && $(GO) mod tidy -compat=1.21 .PHONY: lint -lint: misspell go-mod-tidy golangci-lint govulncheck +lint: misspell go-mod-tidy golangci-lint .PHONY: vanity-import-check vanity-import-check: $(PORTO) diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index c6335954311..16a72004c08 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -53,20 +53,20 @@ Currently, this project supports the following environments. | OS | Go Version | Architecture | |----------|------------|--------------| +| Ubuntu | 1.26 | amd64 | | Ubuntu | 1.25 | amd64 | -| Ubuntu | 1.24 | amd64 | +| Ubuntu | 1.26 | 386 | | Ubuntu | 1.25 | 386 | -| Ubuntu | 1.24 | 386 | +| Ubuntu | 1.26 | arm64 | | Ubuntu | 1.25 | arm64 | -| Ubuntu | 1.24 | arm64 | +| macOS | 1.26 | amd64 | | macOS | 1.25 | amd64 | -| macOS | 1.24 | amd64 | +| macOS | 1.26 | arm64 | | macOS | 1.25 | arm64 | -| macOS | 1.24 | arm64 | +| Windows | 1.26 | amd64 | | Windows | 1.25 | amd64 | -| Windows | 1.24 | amd64 | +| Windows | 1.26 | 386 | | Windows | 1.25 | 386 | -| Windows | 1.24 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 861756fd745..6aff7548c9f 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -4,7 +4,9 @@ Create a `Version Release` issue to track the release process. -## Semantic Convention Generation +## Semantic Convention Upgrade + +### Semantic Convention Generation New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. The `semconv-generate` make target is used for this. @@ -22,6 +24,43 @@ make semconv-generate # Uses the exported TAG. This should create a new sub-package of [`semconv`](./semconv). Ensure things look correct before submitting a pull request to include the addition. +The `CHANGELOG.md` should also be updated to reflect the new changes: + +```md +- The `go.opentelemetry.io/otel/semconv/` package. The package contains semantic conventions from the `` version of the OpenTelemetry Semantic Conventions. See the [migration documentation](./semconv//MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/`. (#PR_NUMBER) +``` + +> **Tip:** Change to the release and prior version to match the changes + +### Update semconv imports + +Once the new semconv module has been generated, update all semconv imports throughout the codebase to reference the new version: + +```go +// Before +semconv "go.opentelemetry.io/otel/semconv/v1.37.0" +"go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + + +// After +semconv "go.opentelemetry.io/otel/semconv/v1.39.0" +"go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" +``` + +Once complete, run `make` to check for any compilation or test failures. + +#### Handling attribute changes + +Some semconv releases might add new attributes or impact attributes that are currently being used. Changes could stem from a simple renaming, to more complex changes like merging attributes and property values being changed. + +One should update the code to the new attributes that supersede the impacted ones, hence sticking to the semantic conventions. However, legacy attributes might still be emitted in accordance to the `OTEL_SEMCONV_STABILITY_OPT_IN` environment variable. + +For an example on how such migration might have to be tracked and performed, see issue [#7806](https://github.com/open-telemetry/opentelemetry-go/issues/7806). + +### Go contrib linter update + +Update [.golangci.yml](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/.golangci.yml) in [opentelemetry-go-contrib](https://github.com/open-telemetry/opentelemetry-go-contrib/) to mandate the new semconv version. + ## Breaking changes validation You can run `make gorelease` which runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes made in the public API. diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 6cc1a1655cf..771dd69c55e 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -53,7 +53,7 @@ var ( _ Encoder = &defaultAttrEncoder{} // encoderIDCounter is for generating IDs for other attribute encoders. - encoderIDCounter uint64 + encoderIDCounter atomic.Uint64 defaultEncoderOnce sync.Once defaultEncoderID = NewEncoderID() @@ -64,7 +64,7 @@ var ( // once per each type of attribute encoder. Preferably in init() or in var // definition. func NewEncoderID() EncoderID { - return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} + return EncoderID{value: encoderIDCounter.Add(1)} } // DefaultEncoder returns an attribute encoder that encodes attributes in such diff --git a/vendor/go.opentelemetry.io/otel/attribute/hash.go b/vendor/go.opentelemetry.io/otel/attribute/hash.go index 6aa69aeaecf..b09caaa6d7c 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/hash.go +++ b/vendor/go.opentelemetry.io/otel/attribute/hash.go @@ -27,6 +27,7 @@ const ( int64SliceID uint64 = 3762322556277578591 // "_[]int64" (little endian) float64SliceID uint64 = 7308324551835016539 // "[]double" (little endian) stringSliceID uint64 = 7453010373645655387 // "[]string" (little endian) + emptyID uint64 = 7305809155345288421 // "__empty_" (little endian) ) // hashKVs returns a new xxHash64 hash of kvs. @@ -80,7 +81,8 @@ func hashKV(h xxhash.Hash, kv KeyValue) xxhash.Hash { for i := 0; i < rv.Len(); i++ { h = h.String(rv.Index(i).String()) } - case INVALID: + case EMPTY: + h = h.Uint64(emptyID) default: // Logging is an alternative, but using the internal logger here // causes an import cycle so it is not done. diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go index 7f5eae877da..d9f51fa2d7f 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go +++ b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go @@ -11,80 +11,63 @@ import ( "reflect" ) -// BoolSliceValue converts a bool slice into an array with same elements as slice. -func BoolSliceValue(v []bool) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[bool]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() +// sliceElem is the exact set of element types stored in attribute slice values. +// Using a closed set prevents accidental instantiations for unsupported types. +type sliceElem interface { + bool | int64 | float64 | string } -// Int64SliceValue converts an int64 slice into an array with same elements as slice. -func Int64SliceValue(v []int64) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[int64]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() -} - -// Float64SliceValue converts a float64 slice into an array with same elements as slice. -func Float64SliceValue(v []float64) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[float64]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() -} +// SliceValue converts a slice into an array with the same elements. +func SliceValue[T sliceElem](v []T) any { + // Keep only the common tiny-slice cases out of reflection. Extending this + // much further increases code size for diminishing benefit while larger + // slices still need the generic reflective path to preserve comparability. + // This matches the short lengths that show up most often in local + // benchmarks and semantic convention examples while leaving larger, less + // predictable slices on the generic reflective path. + switch len(v) { + case 0: + return [0]T{} + case 1: + return [1]T{v[0]} + case 2: + return [2]T{v[0], v[1]} + case 3: + return [3]T{v[0], v[1], v[2]} + } -// StringSliceValue converts a string slice into an array with same elements as slice. -func StringSliceValue(v []string) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[string]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() + return sliceValueReflect(v) } -// AsBoolSlice converts a bool array into a slice into with same elements as array. -func AsBoolSlice(v any) []bool { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil +// AsSlice converts an array into a slice with the same elements. +func AsSlice[T sliceElem](v any) []T { + // Mirror the small fixed-array fast path used by SliceValue. + switch a := v.(type) { + case [0]T: + return []T{} + case [1]T: + return []T{a[0]} + case [2]T: + return []T{a[0], a[1]} + case [3]T: + return []T{a[0], a[1], a[2]} } - cpy := make([]bool, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy -} -// AsInt64Slice converts an int64 array into a slice into with same elements as array. -func AsInt64Slice(v any) []int64 { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - cpy := make([]int64, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy + return asSliceReflect[T](v) } -// AsFloat64Slice converts a float64 array into a slice into with same elements as array. -func AsFloat64Slice(v any) []float64 { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - cpy := make([]float64, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy +func sliceValueReflect[T sliceElem](v []T) any { + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[T]())).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } -// AsStringSlice converts a string array into a slice into with same elements as array. -func AsStringSlice(v any) []string { +func asSliceReflect[T sliceElem](v any) []T { rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { + if !rv.IsValid() || rv.Kind() != reflect.Array || rv.Type().Elem() != reflect.TypeFor[T]() { return nil } - cpy := make([]string, rv.Len()) + cpy := make([]T, rv.Len()) if len(cpy) > 0 { _ = reflect.Copy(reflect.ValueOf(cpy), rv) } diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go index 8c6928ca79b..0cc368018be 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -15,7 +15,7 @@ type KeyValue struct { // Valid reports whether kv is a valid OpenTelemetry attribute. func (kv KeyValue) Valid() bool { - return kv.Key.Defined() && kv.Value.Type() != INVALID + return kv.Key.Defined() } // Bool creates a KeyValue with a BOOL Value type. diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go index 24f1fa37dbe..6c04448d6f0 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -8,7 +8,7 @@ func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} - _ = x[INVALID-0] + _ = x[EMPTY-0] _ = x[BOOL-1] _ = x[INT64-2] _ = x[FLOAT64-3] @@ -19,9 +19,9 @@ func _() { _ = x[STRINGSLICE-8] } -const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" +const _Type_name = "EMPTYBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" -var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} +var _Type_index = [...]uint8{0, 5, 9, 14, 21, 27, 36, 46, 58, 69} func (i Type) String() string { idx := int(i) - 0 diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index 5931e71291a..db04b1326c3 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -6,7 +6,6 @@ package attribute // import "go.opentelemetry.io/otel/attribute" import ( "encoding/json" "fmt" - "reflect" "strconv" attribute "go.opentelemetry.io/otel/attribute/internal" @@ -18,6 +17,8 @@ import ( type Type int // nolint: revive // redefines builtin Type. // Value represents the value part in key-value pairs. +// +// Note that the zero value is a valid empty value. type Value struct { vtype Type numeric uint64 @@ -26,8 +27,8 @@ type Value struct { } const ( - // INVALID is used for a Value with no value set. - INVALID Type = iota + // EMPTY is used for a Value with no value set. + EMPTY Type = iota // BOOL is a boolean Type Value. BOOL // INT64 is a 64-bit signed integral Type Value. @@ -44,6 +45,10 @@ const ( FLOAT64SLICE // STRINGSLICE is a slice of strings Type Value. STRINGSLICE + // INVALID is used for a Value with no value set. + // + // Deprecated: Use EMPTY instead as an empty value is a valid value. + INVALID = EMPTY ) // BoolValue creates a BOOL Value. @@ -56,7 +61,7 @@ func BoolValue(v bool) Value { // BoolSliceValue creates a BOOLSLICE Value. func BoolSliceValue(v []bool) Value { - return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} + return Value{vtype: BOOLSLICE, slice: attribute.SliceValue(v)} } // IntValue creates an INT64 Value. @@ -64,16 +69,30 @@ func IntValue(v int) Value { return Int64Value(int64(v)) } -// IntSliceValue creates an INTSLICE Value. +// IntSliceValue creates an INT64SLICE Value. func IntSliceValue(v []int) Value { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[int64]())) - for i, val := range v { - cp.Elem().Index(i).SetInt(int64(val)) - } - return Value{ - vtype: INT64SLICE, - slice: cp.Elem().Interface(), + val := Value{vtype: INT64SLICE} + + // Avoid the common tiny-slice cases from allocating a new slice. + switch len(v) { + case 0: + val.slice = [0]int64{} + case 1: + val.slice = [1]int64{int64(v[0])} + case 2: + val.slice = [2]int64{int64(v[0]), int64(v[1])} + case 3: + val.slice = [3]int64{int64(v[0]), int64(v[1]), int64(v[2])} + default: + // Fallback to a new slice for larger slices. + cp := make([]int64, len(v)) + for i, val := range v { + cp[i] = int64(val) + } + val.slice = attribute.SliceValue(cp) } + + return val } // Int64Value creates an INT64 Value. @@ -86,7 +105,7 @@ func Int64Value(v int64) Value { // Int64SliceValue creates an INT64SLICE Value. func Int64SliceValue(v []int64) Value { - return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} + return Value{vtype: INT64SLICE, slice: attribute.SliceValue(v)} } // Float64Value creates a FLOAT64 Value. @@ -99,7 +118,7 @@ func Float64Value(v float64) Value { // Float64SliceValue creates a FLOAT64SLICE Value. func Float64SliceValue(v []float64) Value { - return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} + return Value{vtype: FLOAT64SLICE, slice: attribute.SliceValue(v)} } // StringValue creates a STRING Value. @@ -112,7 +131,7 @@ func StringValue(v string) Value { // StringSliceValue creates a STRINGSLICE Value. func StringSliceValue(v []string) Value { - return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} + return Value{vtype: STRINGSLICE, slice: attribute.SliceValue(v)} } // Type returns a type of the Value. @@ -136,7 +155,7 @@ func (v Value) AsBoolSlice() []bool { } func (v Value) asBoolSlice() []bool { - return attribute.AsBoolSlice(v.slice) + return attribute.AsSlice[bool](v.slice) } // AsInt64 returns the int64 value. Make sure that the Value's type is @@ -155,7 +174,7 @@ func (v Value) AsInt64Slice() []int64 { } func (v Value) asInt64Slice() []int64 { - return attribute.AsInt64Slice(v.slice) + return attribute.AsSlice[int64](v.slice) } // AsFloat64 returns the float64 value. Make sure that the Value's @@ -174,7 +193,7 @@ func (v Value) AsFloat64Slice() []float64 { } func (v Value) asFloat64Slice() []float64 { - return attribute.AsFloat64Slice(v.slice) + return attribute.AsSlice[float64](v.slice) } // AsString returns the string value. Make sure that the Value's type @@ -193,7 +212,7 @@ func (v Value) AsStringSlice() []string { } func (v Value) asStringSlice() []string { - return attribute.AsStringSlice(v.slice) + return attribute.AsSlice[string](v.slice) } type unknownValueType struct{} @@ -217,6 +236,8 @@ func (v Value) AsInterface() any { return v.stringly case STRINGSLICE: return v.asStringSlice() + case EMPTY: + return nil } return unknownValueType{} } @@ -252,6 +273,8 @@ func (v Value) Emit() string { return string(j) case STRING: return v.stringly + case EMPTY: + return "" default: return "unknown" } diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index c4093e49ae5..878ffbe43a5 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -14,8 +14,7 @@ import ( ) const ( - maxMembers = 180 - maxBytesPerMembers = 4096 + maxMembers = 64 maxBytesPerBaggageString = 8192 listDelimiter = "," @@ -29,7 +28,6 @@ var ( errInvalidProperty = errors.New("invalid baggage list-member property") errInvalidMember = errors.New("invalid baggage list-member") errMemberNumber = errors.New("too many list-members in baggage-string") - errMemberBytes = errors.New("list-member too large") errBaggageBytes = errors.New("baggage-string too large") ) @@ -309,10 +307,6 @@ func newInvalidMember() Member { // an error if the input is invalid according to the W3C Baggage // specification. func parseMember(member string) (Member, error) { - if n := len(member); n > maxBytesPerMembers { - return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) - } - var props properties keyValue, properties, found := strings.Cut(member, propertyDelimiter) if found { @@ -430,6 +424,10 @@ type Baggage struct { //nolint:golint // New returns a new valid Baggage. It returns an error if it results in a // Baggage exceeding limits set in that specification. // +// If the resulting Baggage exceeds the maximum allowed members or bytes, +// members are dropped until the limits are satisfied and an error is returned +// along with the partial result. +// // It expects all the provided members to have already been validated. func New(members ...Member) (Baggage, error) { if len(members) == 0 { @@ -441,7 +439,6 @@ func New(members ...Member) (Baggage, error) { if !m.hasData { return Baggage{}, errInvalidMember } - // OpenTelemetry resolves duplicates by last-one-wins. b[m.key] = baggage.Item{ Value: m.value, @@ -449,17 +446,42 @@ func New(members ...Member) (Baggage, error) { } } - // Check member numbers after deduplication. + var truncateErr error + + // Check member count after deduplication. if len(b) > maxMembers { - return Baggage{}, errMemberNumber + truncateErr = errors.Join(truncateErr, errMemberNumber) + for k := range b { + if len(b) <= maxMembers { + break + } + delete(b, k) + } } - bag := Baggage{b} - if n := len(bag.String()); n > maxBytesPerBaggageString { - return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + // Check byte size and drop members if necessary. + totalBytes := 0 + first := true + for k := range b { + m := Member{ + key: k, + value: b[k].Value, + properties: fromInternalProperties(b[k].Properties), + } + memberSize := len(m.String()) + if !first { + memberSize++ // comma separator + } + if totalBytes+memberSize > maxBytesPerBaggageString { + truncateErr = errors.Join(truncateErr, fmt.Errorf("%w: %d", errBaggageBytes, totalBytes+memberSize)) + delete(b, k) + continue + } + totalBytes += memberSize + first = false } - return bag, nil + return Baggage{b}, truncateErr } // Parse attempts to decode a baggage-string from the passed string. It @@ -470,36 +492,71 @@ func New(members ...Member) (Baggage, error) { // defined (reading left-to-right) will be the only one kept. This diverges // from the W3C Baggage specification which allows duplicate list-members, but // conforms to the OpenTelemetry Baggage specification. +// +// If the baggage-string exceeds the maximum allowed members (64) or bytes +// (8192), members are dropped until the limits are satisfied and an error is +// returned along with the partial result. +// +// Invalid members are skipped and the error is returned along with the +// partial result containing the valid members. func Parse(bStr string) (Baggage, error) { if bStr == "" { return Baggage{}, nil } - if n := len(bStr); n > maxBytesPerBaggageString { - return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) - } - b := make(baggage.List) + sizes := make(map[string]int) // Track per-key byte sizes + var totalBytes int + var truncateErr error for memberStr := range strings.SplitSeq(bStr, listDelimiter) { + // Check member count limit. + if len(b) >= maxMembers { + truncateErr = errors.Join(truncateErr, errMemberNumber) + break + } + m, err := parseMember(memberStr) if err != nil { - return Baggage{}, err + truncateErr = errors.Join(truncateErr, err) + continue // skip invalid member, keep processing } + + // Check byte size limit. + // Account for comma separator between members. + memberBytes := len(m.String()) + _, existingKey := b[m.key] + if !existingKey && len(b) > 0 { + memberBytes++ // comma separator only for new keys + } + + // Calculate new totalBytes if we add/overwrite this key + var newTotalBytes int + if oldSize, exists := sizes[m.key]; exists { + // Overwriting existing key: subtract old size, add new size + newTotalBytes = totalBytes - oldSize + memberBytes + } else { + // New key + newTotalBytes = totalBytes + memberBytes + } + + if newTotalBytes > maxBytesPerBaggageString { + truncateErr = errors.Join(truncateErr, errBaggageBytes) + break + } + // OpenTelemetry resolves duplicates by last-one-wins. b[m.key] = baggage.Item{ Value: m.value, Properties: m.properties.asInternal(), } + sizes[m.key] = memberBytes + totalBytes = newTotalBytes } - // OpenTelemetry does not allow for duplicate list-members, but the W3C - // specification does. Now that we have deduplicated, ensure the baggage - // does not exceed list-member limits. - if len(b) > maxMembers { - return Baggage{}, errMemberNumber + if len(b) == 0 { + return Baggage{}, truncateErr } - - return Baggage{b}, nil + return Baggage{b}, truncateErr } // Member returns the baggage list-member identified by key. diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index 676e79116d1..7a9b3c05590 100644 --- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,4 +1,4 @@ # This is a renovate-friendly source of Docker images. FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python -FROM otel/weaver:v0.20.0@sha256:fa4f1c6954ecea78ab1a4e865bd6f5b4aaba80c1896f9f4a11e2c361d04e197e AS weaver +FROM otel/weaver:v0.22.1@sha256:33ae522ae4b71c1c562563c1d81f46aa0f79f088a0873199143a1f11ac30e5c9 AS weaver FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/internal/errorhandler/errorhandler.go b/vendor/go.opentelemetry.io/otel/internal/errorhandler/errorhandler.go new file mode 100644 index 00000000000..3f0ab313123 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/errorhandler/errorhandler.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package errorhandler provides the global error handler for OpenTelemetry. +// +// This package has no OTel dependencies, allowing it to be imported by any +// package in the module without creating import cycles. +package errorhandler // import "go.opentelemetry.io/otel/internal/errorhandler" + +import ( + "errors" + "log" + "sync" + "sync/atomic" +) + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) +} + +type ErrDelegator struct { + delegate atomic.Pointer[ErrorHandler] +} + +// Compile-time check that delegator implements ErrorHandler. +var _ ErrorHandler = (*ErrDelegator)(nil) + +func (d *ErrDelegator) Handle(err error) { + if eh := d.delegate.Load(); eh != nil { + (*eh).Handle(err) + return + } + log.Print(err) +} + +// setDelegate sets the ErrorHandler delegate. +func (d *ErrDelegator) setDelegate(eh ErrorHandler) { + d.delegate.Store(&eh) +} + +type errorHandlerHolder struct { + eh ErrorHandler +} + +var ( + globalErrorHandler = defaultErrorHandler() + delegateErrorHandlerOnce sync.Once +) + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler.Load().(errorHandlerHolder).eh +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + current := GetErrorHandler() + + if _, cOk := current.(*ErrDelegator); cOk { + if _, ehOk := h.(*ErrDelegator); ehOk && current == h { + // Do not assign to the delegate of the default ErrDelegator to be + // itself. + log.Print(errors.New("no ErrorHandler delegate configured"), " ErrorHandler remains its current value.") + return + } + } + + delegateErrorHandlerOnce.Do(func() { + if def, ok := current.(*ErrDelegator); ok { + def.setDelegate(h) + } + }) + globalErrorHandler.Store(errorHandlerHolder{eh: h}) +} + +func defaultErrorHandler() *atomic.Value { + v := &atomic.Value{} + v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) + return v +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go index 2e47b2964c8..77d0425f54e 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -5,33 +5,13 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( - "log" - "sync/atomic" + "go.opentelemetry.io/otel/internal/errorhandler" ) -// ErrorHandler handles irremediable events. -type ErrorHandler interface { - // Handle handles any error deemed irremediable by an OpenTelemetry - // component. - Handle(error) -} +// ErrorHandler is an alias for errorhandler.ErrorHandler, kept for backward +// compatibility with existing callers of internal/global. +type ErrorHandler = errorhandler.ErrorHandler -type ErrDelegator struct { - delegate atomic.Pointer[ErrorHandler] -} - -// Compile-time check that delegator implements ErrorHandler. -var _ ErrorHandler = (*ErrDelegator)(nil) - -func (d *ErrDelegator) Handle(err error) { - if eh := d.delegate.Load(); eh != nil { - (*eh).Handle(err) - return - } - log.Print(err) -} - -// setDelegate sets the ErrorHandler delegate. -func (d *ErrDelegator) setDelegate(eh ErrorHandler) { - d.delegate.Store(&eh) -} +// ErrDelegator is an alias for errorhandler.ErrDelegator, kept for backward +// compatibility with existing callers of internal/global. +type ErrDelegator = errorhandler.ErrDelegator diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go index 204ea142a50..225c9e50155 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -8,16 +8,13 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/otel/internal/errorhandler" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) type ( - errorHandlerHolder struct { - eh ErrorHandler - } - tracerProviderHolder struct { tp trace.TracerProvider } @@ -32,12 +29,10 @@ type ( ) var ( - globalErrorHandler = defaultErrorHandler() globalTracer = defaultTracerValue() globalPropagators = defaultPropagatorsValue() globalMeterProvider = defaultMeterProvider() - delegateErrorHandlerOnce sync.Once delegateTraceOnce sync.Once delegateTextMapPropagatorOnce sync.Once delegateMeterOnce sync.Once @@ -53,7 +48,7 @@ var ( // Subsequent calls to SetErrorHandler after the first will not forward errors // to the new ErrorHandler for prior returned instances. func GetErrorHandler() ErrorHandler { - return globalErrorHandler.Load().(errorHandlerHolder).eh + return errorhandler.GetErrorHandler() } // SetErrorHandler sets the global ErrorHandler to h. @@ -63,26 +58,7 @@ func GetErrorHandler() ErrorHandler { // ErrorHandler. Subsequent calls will set the global ErrorHandler, but not // delegate errors to h. func SetErrorHandler(h ErrorHandler) { - current := GetErrorHandler() - - if _, cOk := current.(*ErrDelegator); cOk { - if _, ehOk := h.(*ErrDelegator); ehOk && current == h { - // Do not assign to the delegate of the default ErrDelegator to be - // itself. - Error( - errors.New("no ErrorHandler delegate configured"), - "ErrorHandler remains its current value.", - ) - return - } - } - - delegateErrorHandlerOnce.Do(func() { - if def, ok := current.(*ErrDelegator); ok { - def.setDelegate(h) - } - }) - globalErrorHandler.Store(errorHandlerHolder{eh: h}) + errorhandler.SetErrorHandler(h) } // TracerProvider is the internal implementation for global.TracerProvider. @@ -174,12 +150,6 @@ func SetMeterProvider(mp metric.MeterProvider) { globalMeterProvider.Store(meterProviderHolder{mp: mp}) } -func defaultErrorHandler() *atomic.Value { - v := &atomic.Value{} - v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) - return v -} - func defaultTracerValue() *atomic.Value { v := &atomic.Value{} v.Store(tracerProviderHolder{tp: &tracerProvider{}}) diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index eb4f5961fd1..466812d3435 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -211,6 +211,9 @@ type Float64Observer interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Observe(value float64, options ...ObserveOption) } diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index 1dfc4b0f25c..66c971bd8a9 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -210,6 +210,9 @@ type Int64Observer interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Observe(value int64, options ...ObserveOption) } diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index a16c4c0a14e..5606ec4bd9e 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -30,6 +30,9 @@ type MeterProvider interface { // // If the name is empty, then an implementation defined default name will // be used instead. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Meter(name string, opts ...MeterOption) Meter } @@ -51,6 +54,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) // Int64UpDownCounter returns a new Int64UpDownCounter instrument @@ -61,6 +67,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) // Int64Histogram returns a new Int64Histogram instrument identified by @@ -71,6 +80,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) // Int64Gauge returns a new Int64Gauge instrument identified by name and @@ -80,6 +92,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) // Int64ObservableCounter returns a new Int64ObservableCounter identified @@ -95,6 +110,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter @@ -110,6 +128,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Int64ObservableUpDownCounter( name string, options ...Int64ObservableUpDownCounterOption, @@ -128,6 +149,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) // Float64Counter returns a new Float64Counter instrument identified by @@ -148,6 +172,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) // Float64Histogram returns a new Float64Histogram instrument identified by @@ -158,6 +185,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) // Float64Gauge returns a new Float64Gauge instrument identified by name and @@ -167,6 +197,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) // Float64ObservableCounter returns a new Float64ObservableCounter @@ -182,6 +215,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) // Float64ObservableUpDownCounter returns a new @@ -197,6 +233,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Float64ObservableUpDownCounter( name string, options ...Float64ObservableUpDownCounterOption, @@ -215,6 +254,9 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) // RegisterCallback registers f to be called during the collection of a @@ -229,6 +271,9 @@ type Meter interface { // If no instruments are passed, f should not be registered nor called // during collection. // + // Implementations of this method need to be safe for a user to call + // concurrently. + // // The function f needs to be concurrent safe. RegisterCallback(f Callback, instruments ...Observable) (Registration, error) } @@ -263,9 +308,15 @@ type Observer interface { embedded.Observer // ObserveFloat64 records the float64 value for obsrv. + // + // Implementations of this method need to be safe for a user to call + // concurrently. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) // ObserveInt64 records the int64 value for obsrv. + // + // Implementations of this method need to be safe for a user to call + // concurrently. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } @@ -283,6 +334,7 @@ type Registration interface { // Unregister removes the callback registration from a Meter. // - // This method needs to be idempotent and concurrent safe. + // Implementations of this method need to be idempotent and safe for a user + // to call concurrently. Unregister() error } diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go index 57a74c5e657..abb3051d7fc 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -24,12 +24,18 @@ type Float64Counter interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Add(ctx context.Context, incr float64, options ...AddOption) // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Enabled(context.Context) bool } @@ -83,12 +89,18 @@ type Float64UpDownCounter interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Add(ctx context.Context, incr float64, options ...AddOption) // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Enabled(context.Context) bool } @@ -142,12 +154,18 @@ type Float64Histogram interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Record(ctx context.Context, incr float64, options ...RecordOption) // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Enabled(context.Context) bool } @@ -206,12 +224,18 @@ type Float64Gauge interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Record(ctx context.Context, value float64, options ...RecordOption) // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Enabled(context.Context) bool } diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go index ac2d033ea6f..5bbfaf0397e 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -24,12 +24,18 @@ type Int64Counter interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Add(ctx context.Context, incr int64, options ...AddOption) // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Enabled(context.Context) bool } @@ -83,12 +89,18 @@ type Int64UpDownCounter interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Add(ctx context.Context, incr int64, options ...AddOption) // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Enabled(context.Context) bool } @@ -142,12 +154,18 @@ type Int64Histogram interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Record(ctx context.Context, incr int64, options ...RecordOption) // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Enabled(context.Context) bool } @@ -206,12 +224,18 @@ type Int64Gauge interface { // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Record(ctx context.Context, value int64, options ...RecordOption) // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. + // + // Implementations of this method need to be safe for a user to call + // concurrently. Enabled(context.Context) bool } diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go index 0518826020e..2ecca3fed1e 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -7,9 +7,16 @@ import ( "context" "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/internal/errorhandler" ) -const baggageHeader = "baggage" +const ( + baggageHeader = "baggage" + + // W3C Baggage specification limits. + // https://www.w3.org/TR/baggage/#limits + maxMembers = 64 +) // Baggage is a propagator that supports the W3C Baggage format. // @@ -50,6 +57,9 @@ func extractSingleBaggage(parent context.Context, carrier TextMapCarrier) contex bag, err := baggage.Parse(bStr) if err != nil { + errorhandler.GetErrorHandler().Handle(err) + } + if bag.Len() == 0 { return parent } return baggage.ContextWithBaggage(parent, bag) @@ -60,17 +70,27 @@ func extractMultiBaggage(parent context.Context, carrier ValuesGetter) context.C if len(bVals) == 0 { return parent } + var members []baggage.Member for _, bStr := range bVals { currBag, err := baggage.Parse(bStr) if err != nil { + errorhandler.GetErrorHandler().Handle(err) + } + if currBag.Len() == 0 { continue } members = append(members, currBag.Members()...) + if len(members) >= maxMembers { + break + } } b, err := baggage.New(members...) - if err != nil || b.Len() == 0 { + if err != nil { + errorhandler.GetErrorHandler().Handle(err) + } + if b.Len() == 0 { return parent } return baggage.ContextWithBaggage(parent, b) diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 271ab71f1ae..11f404deb73 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -46,8 +46,8 @@ func (TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { carrier.Set(tracestateHeader, ts) } - // Clear all flags other than the trace-context supported sampling bit. - flags := sc.TraceFlags() & trace.FlagsSampled + // Preserve only the spec-defined flags: sampled (0x01) and random (0x02). + flags := sc.TraceFlags() & (trace.FlagsSampled | trace.FlagsRandom) var sb strings.Builder sb.Grow(2 + 32 + 16 + 2 + 3) @@ -104,14 +104,13 @@ func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { if !extractPart(opts[:], &h, 2) { return trace.SpanContext{} } - if version == 0 && (h != "" || opts[0] > 2) { - // version 0 not allow extra - // version 0 not allow other flag + if version == 0 && (h != "" || opts[0] > 3) { + // version 0 does not allow extra fields or reserved flag bits. return trace.SpanContext{} } - // Clear all flags other than the trace-context supported sampling bit. - scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled // nolint:gosec // slice size already checked. + scc.TraceFlags = trace.TraceFlags(opts[0]) & //nolint:gosec // slice size already checked. + (trace.FlagsSampled | trace.FlagsRandom) // Ignore the error returned here. Failure to parse tracestate MUST NOT // affect the parsing of traceparent according to the W3C tracecontext diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index 1bb55fb1cc5..7c541dee79e 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.4.1 +codespell==2.4.2 diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go index bfeb73e811b..694b64a3180 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go @@ -37,3 +37,18 @@ var Observability = newFeature( return "", false }, ) + +// PerSeriesStartTimestamps is an experimental feature flag that determines if the SDK +// uses the new Start Timestamps specification. +// +// To enable this feature set the OTEL_GO_X_PER_SERIES_START_TIMESTAMPS environment variable +// to the case-insensitive string value of "true". +var PerSeriesStartTimestamps = newFeature( + []string{"PER_SERIES_START_TIMESTAMPS"}, + func(v string) (bool, bool) { + if strings.EqualFold(v, "true") { + return true, true + } + return false, false + }, +) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go index c6440a1346c..306e5e3cdce 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go @@ -160,12 +160,14 @@ func WithExemplarFilter(filter exemplar.Filter) Option { }) } -// WithCardinalityLimit sets the cardinality limit for the MeterProvider. +// WithCardinalityLimit sets the global cardinality limit for the MeterProvider. // // The cardinality limit is the hard limit on the number of metric datapoints // that can be collected for a single instrument in a single collect cycle. // // Setting this to a zero or negative value means no limit is applied. +// This value applies to all instrument kinds, but can be overridden per kind by +// the reader's cardinality limit selector (see [WithCardinalityLimitSelector]). func WithCardinalityLimit(limit int) Option { // For backward compatibility, the environment variable `OTEL_GO_X_CARDINALITY_LIMIT` // can also be used to set this value. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go index 2aeba437894..312d73c4575 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -8,10 +8,12 @@ import ( "errors" "math" "sync" + "sync/atomic" "time" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -20,11 +22,6 @@ const ( expoMinScale = -10 smallestNonZeroNormalFloat64 = 0x1p-1022 - - // These redefine the Math constants with a type, so the compiler won't coerce - // them into an int on 32 bit platforms. - maxInt64 int64 = math.MaxInt64 - minInt64 int64 = math.MinInt64 ) // expoHistogramDataPoint is a single data point in an exponential histogram. @@ -32,19 +29,19 @@ type expoHistogramDataPoint[N int64 | float64] struct { attrs attribute.Set res FilteredExemplarReservoir[N] - min N - max N - sum N + minMax atomicMinMax[N] + sum atomicCounter[N] maxSize int noMinMax bool noSum bool - scale int32 + scale atomic.Int32 posBuckets expoBuckets negBuckets expoBuckets - zeroCount uint64 + zeroCount atomic.Uint64 + startTime time.Time } func newExpoHistogramDataPoint[N int64 | float64]( @@ -53,42 +50,30 @@ func newExpoHistogramDataPoint[N int64 | float64]( maxScale int32, noMinMax, noSum bool, ) *expoHistogramDataPoint[N] { // nolint:revive // we need this control flag - f := math.MaxFloat64 - ma := N(f) // if N is int64, max will overflow to -9223372036854775808 - mi := N(-f) - if N(maxInt64) > N(f) { - ma = N(maxInt64) - mi = N(minInt64) - } - return &expoHistogramDataPoint[N]{ - attrs: attrs, - min: ma, - max: mi, - maxSize: maxSize, - noMinMax: noMinMax, - noSum: noSum, - scale: maxScale, + dp := &expoHistogramDataPoint[N]{ + attrs: attrs, + maxSize: maxSize, + noMinMax: noMinMax, + noSum: noSum, + startTime: now(), } + dp.scale.Store(maxScale) + return dp } // record adds a new measurement to the histogram. It will rescale the buckets if needed. func (p *expoHistogramDataPoint[N]) record(v N) { if !p.noMinMax { - if v < p.min { - p.min = v - } - if v > p.max { - p.max = v - } + p.minMax.Update(v) } if !p.noSum { - p.sum += v + p.sum.add(v) } absV := math.Abs(float64(v)) if float64(absV) == 0.0 { - p.zeroCount++ + p.zeroCount.Add(1) return } @@ -102,14 +87,15 @@ func (p *expoHistogramDataPoint[N]) record(v N) { // If the new bin would make the counts larger than maxScale, we need to // downscale current measurements. if scaleDelta := p.scaleChange(bin, bucket.startBin, len(bucket.counts)); scaleDelta > 0 { - if p.scale-scaleDelta < expoMinScale { + currentScale := p.scale.Load() + if currentScale-scaleDelta < expoMinScale { // With a scale of -10 there is only two buckets for the whole range of float64 values. // This can only happen if there is a max size of 1. otel.Handle(errors.New("exponential histogram scale underflow")) return } // Downscale - p.scale -= scaleDelta + p.scale.Add(-scaleDelta) p.posBuckets.downscale(scaleDelta) p.negBuckets.downscale(scaleDelta) @@ -124,7 +110,8 @@ func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 { frac, expInt := math.Frexp(v) // 11-bit exponential. exp := int32(expInt) // nolint: gosec - if p.scale <= 0 { + scale := p.scale.Load() + if scale <= 0 { // Because of the choice of fraction is always 1 power of two higher than we want. var correction int32 = 1 if frac == .5 { @@ -132,9 +119,9 @@ func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 { // will be one higher than we want. correction = 2 } - return (exp - correction) >> (-p.scale) + return (exp - correction) >> (-scale) } - return exp<= b.startBin && int(bin) <= endBin { - b.counts[bin-b.startBin]++ + b.counts[bin-b.startBin].Add(1) return } // if the new bin is before the current start add spaces to the counts @@ -223,16 +211,22 @@ func (b *expoBuckets) record(bin int32) { shift := b.startBin - bin if newLength > cap(b.counts) { - b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...) + b.counts = append(b.counts, make([]atomic.Uint64, newLength-len(b.counts))...) } - copy(b.counts[shift:origLen+int(shift)], b.counts) b.counts = b.counts[:newLength] + + // Shift existing elements to the right. Go's copy() doesn't work for + // structs like atomic.Uint64. + for i := origLen - 1; i >= 0; i-- { + b.counts[i+int(shift)].Store(b.counts[i].Load()) + } + for i := 1; i < int(shift); i++ { - b.counts[i] = 0 + b.counts[i].Store(0) } b.startBin = bin - b.counts[0] = 1 + b.counts[0].Store(1) return } // if the new is after the end add spaces to the end @@ -240,15 +234,15 @@ func (b *expoBuckets) record(bin int32) { if int(bin-b.startBin) < cap(b.counts) { b.counts = b.counts[:bin-b.startBin+1] for i := endBin + 1 - int(b.startBin); i < len(b.counts); i++ { - b.counts[i] = 0 + b.counts[i].Store(0) } - b.counts[bin-b.startBin] = 1 + b.counts[bin-b.startBin].Store(1) return } - end := make([]uint64, int(bin-b.startBin)-len(b.counts)+1) + end := make([]atomic.Uint64, int(bin-b.startBin)-len(b.counts)+1) b.counts = append(b.counts, end...) - b.counts[bin-b.startBin] = 1 + b.counts[bin-b.startBin].Store(1) } } @@ -275,10 +269,10 @@ func (b *expoBuckets) downscale(delta int32) { for i := 1; i < len(b.counts); i++ { idx := i + int(offset) if idx%int(steps) == 0 { - b.counts[idx/int(steps)] = b.counts[i] + b.counts[idx/int(steps)].Store(b.counts[i].Load()) continue } - b.counts[idx/int(steps)] += b.counts[i] + b.counts[idx/int(steps)].Add(b.counts[i].Load()) } lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps) @@ -288,8 +282,8 @@ func (b *expoBuckets) downscale(delta int32) { func (b *expoBuckets) count() uint64 { var total uint64 - for _, count := range b.counts { - total += count + for i := range b.counts { + total += b.counts[i].Load() } return total } @@ -386,8 +380,8 @@ func (e *expoHistogram[N]) delta( hDPts[i].StartTime = e.start hDPts[i].Time = t hDPts[i].Count = val.count() - hDPts[i].Scale = val.scale - hDPts[i].ZeroCount = val.zeroCount + hDPts[i].Scale = val.scale.Load() + hDPts[i].ZeroCount = val.zeroCount.Load() hDPts[i].ZeroThreshold = 0.0 hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin @@ -396,7 +390,9 @@ func (e *expoHistogram[N]) delta( len(val.posBuckets.counts), len(val.posBuckets.counts), ) - copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) + for j := range val.posBuckets.counts { + hDPts[i].PositiveBucket.Counts[j] = val.posBuckets.counts[j].Load() + } hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin hDPts[i].NegativeBucket.Counts = reset( @@ -404,14 +400,18 @@ func (e *expoHistogram[N]) delta( len(val.negBuckets.counts), len(val.negBuckets.counts), ) - copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) + for j := range val.negBuckets.counts { + hDPts[i].NegativeBucket.Counts[j] = val.negBuckets.counts[j].Load() + } if !e.noSum { - hDPts[i].Sum = val.sum + hDPts[i].Sum = val.sum.load() } if !e.noMinMax { - hDPts[i].Min = metricdata.NewExtrema(val.min) - hDPts[i].Max = metricdata.NewExtrema(val.max) + if val.minMax.set.Load() { + hDPts[i].Min = metricdata.NewExtrema(val.minMax.minimum.Load()) + hDPts[i].Max = metricdata.NewExtrema(val.minMax.maximum.Load()) + } } collectExemplars(&hDPts[i].Exemplars, val.res.Collect) @@ -443,14 +443,21 @@ func (e *expoHistogram[N]) cumulative( n := len(e.values) hDPts := reset(h.DataPoints, n, n) + perSeriesStartTimeEnabled := x.PerSeriesStartTimestamps.Enabled() + var i int for _, val := range e.values { hDPts[i].Attributes = val.attrs - hDPts[i].StartTime = e.start + + startTime := e.start + if perSeriesStartTimeEnabled { + startTime = val.startTime + } + hDPts[i].StartTime = startTime hDPts[i].Time = t hDPts[i].Count = val.count() - hDPts[i].Scale = val.scale - hDPts[i].ZeroCount = val.zeroCount + hDPts[i].Scale = val.scale.Load() + hDPts[i].ZeroCount = val.zeroCount.Load() hDPts[i].ZeroThreshold = 0.0 hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin @@ -459,7 +466,9 @@ func (e *expoHistogram[N]) cumulative( len(val.posBuckets.counts), len(val.posBuckets.counts), ) - copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) + for j := range val.posBuckets.counts { + hDPts[i].PositiveBucket.Counts[j] = val.posBuckets.counts[j].Load() + } hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin hDPts[i].NegativeBucket.Counts = reset( @@ -467,14 +476,18 @@ func (e *expoHistogram[N]) cumulative( len(val.negBuckets.counts), len(val.negBuckets.counts), ) - copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) + for j := range val.negBuckets.counts { + hDPts[i].NegativeBucket.Counts[j] = val.negBuckets.counts[j].Load() + } if !e.noSum { - hDPts[i].Sum = val.sum + hDPts[i].Sum = val.sum.load() } if !e.noMinMax { - hDPts[i].Min = metricdata.NewExtrema(val.min) - hDPts[i].Max = metricdata.NewExtrema(val.max) + if val.minMax.set.Load() { + hDPts[i].Min = metricdata.NewExtrema(val.minMax.minimum.Load()) + hDPts[i].Max = metricdata.NewExtrema(val.minMax.maximum.Load()) + } } collectExemplars(&hDPts[i].Exemplars, val.res.Collect) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go index 421325fb728..83582c670cd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -11,6 +11,7 @@ import ( "time" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -27,8 +28,9 @@ type hotColdHistogramPoint[N int64 | float64] struct { hcwg hotColdWaitGroup hotColdPoint [2]histogramPointCounters[N] - attrs attribute.Set - res FilteredExemplarReservoir[N] + attrs attribute.Set + res FilteredExemplarReservoir[N] + startTime time.Time } // histogramPointCounters contains only the atomic counter data, and is used by @@ -298,6 +300,7 @@ func (s *cumulativeHistogram[N]) measure( counts: make([]atomic.Uint64, len(s.bounds)+1), }, }, + startTime: now(), } return hPt }).(*hotColdHistogramPoint[N]) @@ -339,16 +342,23 @@ func (s *cumulativeHistogram[N]) collect( // current length for capacity. hDPts := reset(h.DataPoints, 0, s.values.Len()) + perSeriesStartTimeEnabled := x.PerSeriesStartTimestamps.Enabled() + var i int s.values.Range(func(_, value any) bool { val := value.(*hotColdHistogramPoint[N]) + + startTime := s.start + if perSeriesStartTimeEnabled { + startTime = val.startTime + } // swap, observe, and clear the point readIdx := val.hcwg.swapHotAndWait() var bucketCounts []uint64 count := val.hotColdPoint[readIdx].loadCountsInto(&bucketCounts) newPt := metricdata.HistogramDataPoint[N]{ Attributes: val.attrs, - StartTime: s.start, + StartTime: startTime, Time: t, Count: count, Bounds: bounds, diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go index 4924d732cb3..4c004bc99d8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -8,14 +8,16 @@ import ( "time" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // lastValuePoint is timestamped measurement data. type lastValuePoint[N int64 | float64] struct { - attrs attribute.Set - value atomicN[N] - res FilteredExemplarReservoir[N] + attrs attribute.Set + value atomicN[N] + res FilteredExemplarReservoir[N] + startTime time.Time } // lastValueMap summarizes a set of measurements as the last one made. @@ -31,10 +33,13 @@ func (s *lastValueMap[N]) measure( droppedAttr []attribute.KeyValue, ) { lv := s.values.LoadOrStoreAttr(fltrAttr, func(attr attribute.Set) any { - return &lastValuePoint[N]{ - res: s.newRes(attr), - attrs: attr, + p := &lastValuePoint[N]{ + res: s.newRes(attr), + attrs: attr, + startTime: now(), } + p.value.Store(value) + return p }).(*lastValuePoint[N]) lv.value.Store(value) @@ -156,12 +161,19 @@ func (s *cumulativeLastValue[N]) collect( // current length for capacity. dPts := reset(gData.DataPoints, 0, s.values.Len()) + perSeriesStartTimeEnabled := x.PerSeriesStartTimestamps.Enabled() + var i int s.values.Range(func(_, value any) bool { v := value.(*lastValuePoint[N]) + + startTime := s.start + if perSeriesStartTimeEnabled { + startTime = v.startTime + } newPt := metricdata.DataPoint[N]{ Attributes: v.attrs, - StartTime: s.start, + StartTime: startTime, Time: t, Value: v.value.Load(), } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go index 66cb68085fd..3fe7c7cf046 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -8,13 +8,15 @@ import ( "time" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) type sumValue[N int64 | float64] struct { - n atomicCounter[N] - res FilteredExemplarReservoir[N] - attrs attribute.Set + n atomicCounter[N] + res FilteredExemplarReservoir[N] + attrs attribute.Set + startTime time.Time } type sumValueMap[N int64 | float64] struct { @@ -30,8 +32,9 @@ func (s *sumValueMap[N]) measure( ) { sv := s.values.LoadOrStoreAttr(fltrAttr, func(attr attribute.Set) any { return &sumValue[N]{ - res: s.newRes(attr), - attrs: attr, + res: s.newRes(attr), + attrs: attr, + startTime: now(), } }).(*sumValue[N]) sv.n.add(value) @@ -160,12 +163,19 @@ func (s *cumulativeSum[N]) collect( // current length for capacity. dPts := reset(sData.DataPoints, 0, s.values.Len()) + perSeriesStartTimeEnabled := x.PerSeriesStartTimestamps.Enabled() + var i int s.values.Range(func(_, value any) bool { val := value.(*sumValue[N]) + + startTime := s.start + if perSeriesStartTimeEnabled { + startTime = val.startTime + } newPt := metricdata.DataPoint[N]{ Attributes: val.attrs, - StartTime: s.start, + StartTime: startTime, Time: t, Value: val.n.load(), } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go index 66788c9e967..2d2b987c5dd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go @@ -16,8 +16,8 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go index 5b0630207b5..0357afd455f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go @@ -32,8 +32,9 @@ type ManualReader struct { isShutdown bool externalProducers atomic.Value - temporalitySelector TemporalitySelector - aggregationSelector AggregationSelector + temporalitySelector TemporalitySelector + aggregationSelector AggregationSelector + cardinalityLimitSelector CardinalityLimitSelector inst *observ.Instrumentation } @@ -45,8 +46,9 @@ var _ = map[Reader]struct{}{&ManualReader{}: {}} func NewManualReader(opts ...ManualReaderOption) *ManualReader { cfg := newManualReaderConfig(opts) r := &ManualReader{ - temporalitySelector: cfg.temporalitySelector, - aggregationSelector: cfg.aggregationSelector, + temporalitySelector: cfg.temporalitySelector, + aggregationSelector: cfg.aggregationSelector, + cardinalityLimitSelector: cfg.cardinalityLimitSelector, } r.externalProducers.Store(cfg.producers) @@ -89,6 +91,11 @@ func (mr *ManualReader) aggregation( return mr.aggregationSelector(kind) } +// cardinalityLimit returns the cardinality limit for kind. +func (mr *ManualReader) cardinalityLimit(kind InstrumentKind) (int, bool) { + return mr.cardinalityLimitSelector(kind) +} + // Shutdown closes any connections and frees any resources used by the reader. // // This method is safe to call concurrently. @@ -179,16 +186,18 @@ func (r *ManualReader) MarshalLog() any { // manualReaderConfig contains configuration options for a ManualReader. type manualReaderConfig struct { - temporalitySelector TemporalitySelector - aggregationSelector AggregationSelector - producers []Producer + temporalitySelector TemporalitySelector + aggregationSelector AggregationSelector + cardinalityLimitSelector CardinalityLimitSelector + producers []Producer } // newManualReaderConfig returns a manualReaderConfig configured with options. func newManualReaderConfig(opts []ManualReaderOption) manualReaderConfig { cfg := manualReaderConfig{ - temporalitySelector: DefaultTemporalitySelector, - aggregationSelector: DefaultAggregationSelector, + temporalitySelector: DefaultTemporalitySelector, + aggregationSelector: DefaultAggregationSelector, + cardinalityLimitSelector: defaultCardinalityLimitSelector, } for _, opt := range opts { cfg = opt.applyManual(cfg) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go index ef40ef29a34..d1efc9f374a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -15,7 +15,7 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric/internal/observ" "go.opentelemetry.io/otel/sdk/metric/metricdata" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) // Default periodic reader timing. @@ -26,17 +26,19 @@ const ( // periodicReaderConfig contains configuration options for a PeriodicReader. type periodicReaderConfig struct { - interval time.Duration - timeout time.Duration - producers []Producer + interval time.Duration + timeout time.Duration + producers []Producer + cardinalityLimitSelector CardinalityLimitSelector } // newPeriodicReaderConfig returns a periodicReaderConfig configured with // options. func newPeriodicReaderConfig(options []PeriodicReaderOption) periodicReaderConfig { c := periodicReaderConfig{ - interval: envDuration(envInterval, defaultInterval), - timeout: envDuration(envTimeout, defaultTimeout), + interval: envDuration(envInterval, defaultInterval), + timeout: envDuration(envTimeout, defaultTimeout), + cardinalityLimitSelector: defaultCardinalityLimitSelector, } for _, o := range options { c = o.applyPeriodic(c) @@ -107,14 +109,17 @@ func WithInterval(d time.Duration) PeriodicReaderOption { // exporter. That is left to the user to accomplish. func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *PeriodicReader { conf := newPeriodicReaderConfig(options) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel( //nolint:gosec // cancel called during PeriodicReader shutdown. + context.Background(), + ) r := &PeriodicReader{ - interval: conf.interval, - timeout: conf.timeout, - exporter: exporter, - flushCh: make(chan chan error), - cancel: cancel, - done: make(chan struct{}), + interval: conf.interval, + timeout: conf.timeout, + exporter: exporter, + flushCh: make(chan chan error), + cancel: cancel, + done: make(chan struct{}), + cardinalityLimitSelector: conf.cardinalityLimitSelector, rmPool: sync.Pool{ New: func() any { return &metricdata.ResourceMetrics{} @@ -168,6 +173,8 @@ type PeriodicReader struct { rmPool sync.Pool + cardinalityLimitSelector CardinalityLimitSelector + inst *observ.Instrumentation } @@ -220,6 +227,11 @@ func (r *PeriodicReader) aggregation( return r.exporter.Aggregation(kind) } +// cardinalityLimit returns the cardinality limit for kind. +func (r *PeriodicReader) cardinalityLimit(kind InstrumentKind) (int, bool) { + return r.cardinalityLimitSelector(kind) +} + // collectAndExport gather all metric data related to the periodicReader r from // the SDK and exports it with r's exporter. func (r *PeriodicReader) collectAndExport(ctx context.Context) error { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go index ab269cdfd69..34300a786ca 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -301,7 +301,7 @@ func (i *inserter[N]) addCallback(cback func(context.Context) error) { i.pipeline.callbacks = append(i.pipeline.callbacks, cback) } -var aggIDCount uint64 +var aggIDCount atomic.Uint64 // aggVal is the cached value in an aggregators cache. type aggVal[N int64 | float64] struct { @@ -395,9 +395,7 @@ func (i *inserter[N]) cachedAggregator( b.Filter = stream.AttributeFilter // A value less than or equal to zero will disable the aggregation // limits for the builder (an all the created aggregates). - // cardinalityLimit will be 0 by default if unset (or - // unrecognized input). Use that value directly. - b.AggregationLimit = i.pipeline.cardinalityLimit + b.AggregationLimit = i.getCardinalityLimit(kind) in, out, err := i.aggregateFunc(b, stream.Aggregation, kind) if err != nil { return aggVal[N]{0, nil, err} @@ -413,12 +411,24 @@ func (i *inserter[N]) cachedAggregator( unit: stream.Unit, compAgg: out, }) - id := atomic.AddUint64(&aggIDCount, 1) + id := aggIDCount.Add(1) return aggVal[N]{id, in, err} }) return cv.Measure, cv.ID, cv.Err } +// getCardinalityLimit returns the cardinality limit for the given instrument kind. +// When the reader's selector returns fallback = true, the pipeline's global +// limit is used, then the default if global is unset. When fallback is false, +// the selector's limit is used (0 or less means unlimited). +func (i *inserter[N]) getCardinalityLimit(kind InstrumentKind) int { + limit, fallback := i.pipeline.reader.cardinalityLimit(kind) + if fallback { + return i.pipeline.cardinalityLimit + } + return limit +} + // logConflict validates if an instrument with the same case-insensitive name // as id has already been created. If that instrument conflicts with id, a // warning is logged. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go index 7b205c736c2..99079dd2783 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go @@ -59,6 +59,15 @@ type Reader interface { // Reader methods. aggregation(InstrumentKind) Aggregation // nolint:revive // import-shadow for method scoped by type. + // cardinalityLimit returns the cardinality limit for an instrument kind. + // When fallback is true, the pipeline falls back to the provider's global limit. + // When fallback is false, limit is used: 0 or less means no limit (unlimited), + // and a positive value is the limit for that kind. + // + // This method needs to be concurrent safe with itself and all the other + // Reader methods. + cardinalityLimit(InstrumentKind) (limit int, fallback bool) + // Collect gathers and returns all metric data related to the Reader from // the SDK and stores it in rm. An error is returned if this is called // after Shutdown or if rm is nil. @@ -192,6 +201,25 @@ func DefaultAggregationSelector(ik InstrumentKind) Aggregation { panic("unknown instrument kind") } +// CardinalityLimitSelector selects the cardinality limit to use based on the +// InstrumentKind. The cardinality limit is the maximum number of distinct +// attribute sets that can be recorded for a single instrument. +// +// The selector returns (limit, fallback). When fallback is true, the pipeline +// falls back to the provider's global cardinality limit. +// When fallback is false, the limit is applied: a value of 0 or less means +// no limit, and a positive value is the limit for that kind. +// To avoid overriding the provider's global limit, return (0, true). +type CardinalityLimitSelector func(InstrumentKind) (limit int, fallback bool) + +// defaultCardinalityLimitSelector is the default CardinalityLimitSelector used +// if WithCardinalityLimitSelector is not provided. It returns (0, true) for all +// instrument kinds, allowing the pipeline to fall back to the provider's global +// limit. +func defaultCardinalityLimitSelector(_ InstrumentKind) (int, bool) { + return 0, true +} + // ReaderOption is an option which can be applied to manual or Periodic // readers. type ReaderOption interface { @@ -220,3 +248,33 @@ func (o producerOption) applyPeriodic(c periodicReaderConfig) periodicReaderConf c.producers = append(c.producers, o.p) return c } + +// WithCardinalityLimitSelector sets the CardinalityLimitSelector a reader will +// use to determine the cardinality limit for an instrument based on its kind. +// If this option is not used, the reader will use the +// defaultCardinalityLimitSelector. +// +// The selector should return (limit, false) to set a positive limit, +// (0, false) to explicitly specify unlimited, or +// (0, true) to fall back to the provider's global limit. +// +// See [CardinalityLimitSelector] for more details. +func WithCardinalityLimitSelector(selector CardinalityLimitSelector) ReaderOption { + return cardinalityLimitSelectorOption{selector: selector} +} + +type cardinalityLimitSelectorOption struct { + selector CardinalityLimitSelector +} + +// applyManual returns a manualReaderConfig with option applied. +func (o cardinalityLimitSelectorOption) applyManual(c manualReaderConfig) manualReaderConfig { + c.cardinalityLimitSelector = o.selector + return c +} + +// applyPeriodic returns a periodicReaderConfig with option applied. +func (o cardinalityLimitSelectorOption) applyPeriodic(c periodicReaderConfig) periodicReaderConfig { + c.cardinalityLimitSelector = o.selector + return c +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index ea9e076c7e9..26752be7d71 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.40.0" + return "1.43.0" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 8a7bb330bf9..04f15fcd21f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go index 0d6e213d924..a3d647d92c2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -193,3 +193,11 @@ func WithContainer() Option { func WithContainerID() Option { return WithDetectors(cgroupContainerIDDetector{}) } + +// WithService adds all the Service attributes to the configured Resource. +func WithService() Option { + return WithDetectors( + defaultServiceInstanceIDDetector{}, + defaultServiceNameDetector{}, + ) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index a19b39def83..e977ff1c48a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type containerIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index c49157224e8..bc0e5c19e38 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 023621ba76b..755c0824274 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type hostIDProvider func() (string, error) @@ -31,19 +31,19 @@ type hostIDReaderBSD struct { readFile fileReader } -// read attempts to read the machine-id from /etc/hostid. If not found it will -// execute `kenv -q smbios.system.uuid`. If neither location yields an id an -// error will be returned. +// read attempts to read the machine-id from /etc/hostid. +// If not found it will execute: /bin/kenv -q smbios.system.uuid. +// If neither location yields an id an error will be returned. func (r *hostIDReaderBSD) read() (string, error) { if result, err := r.readFile("/etc/hostid"); err == nil { return strings.TrimSpace(result), nil } - if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil { + if result, err := r.execCommand("/bin/kenv", "-q", "smbios.system.uuid"); err == nil { return strings.TrimSpace(result), nil } - return "", errors.New("host id not found in: /etc/hostid or kenv") + return "", errors.New("host id not found in: /etc/hostid or /bin/kenv") } // hostIDReaderDarwin implements hostIDReader. diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go index 6354b356022..c95d87685c9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go @@ -8,7 +8,7 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" import "os" func readFile(filename string) (string, error) { - b, err := os.ReadFile(filename) + b, err := os.ReadFile(filename) // nolint:gosec // false positive if err != nil { return "", err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 534809e2178..f5682cad414 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type osDescriptionProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index a1189553c71..99dce64f6d3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index 28e1e4f7ebd..f715be53eda 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -232,6 +232,15 @@ func Empty() *Resource { // Default returns an instance of Resource with a default // "service.name" and OpenTelemetrySDK attributes. func Default() *Resource { + return DefaultWithContext(context.Background()) +} + +// DefaultWithContext returns an instance of Resource with a default +// "service.name" and OpenTelemetrySDK attributes. +// +// If the default resource has already been initialized, the provided ctx +// is ignored and the cached resource is returned. +func DefaultWithContext(ctx context.Context) *Resource { defaultResourceOnce.Do(func() { var err error defaultDetectors := []Detector{ @@ -243,7 +252,7 @@ func Default() *Resource { defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...) } defaultResource, err = Detect( - context.Background(), + ctx, defaultDetectors..., ) if err != nil { @@ -260,8 +269,14 @@ func Default() *Resource { // Environment returns an instance of Resource with attributes // extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. func Environment() *Resource { + return EnvironmentWithContext(context.Background()) +} + +// EnvironmentWithContext returns an instance of Resource with attributes +// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. +func EnvironmentWithContext(ctx context.Context) *Resource { detector := &fromEnv{} - resource, err := detector.Detect(context.Background()) + resource, err := detector.Detect(ctx) if err != nil { otel.Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 7d15cbb9c0f..32854b14a35 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -68,7 +68,7 @@ type batchSpanProcessor struct { o BatchSpanProcessorOptions queue chan ReadOnlySpan - dropped uint32 + dropped atomic.Uint32 inst *observ.BSP @@ -123,12 +123,10 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO otel.Handle(err) } - bsp.stopWait.Add(1) - go func() { - defer bsp.stopWait.Done() + bsp.stopWait.Go(func() { bsp.processQueue() bsp.drainQueue() - }() + }) return bsp } @@ -295,7 +293,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { } if l := len(bsp.batch); l > 0 { - global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) + global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", bsp.dropped.Load()) if bsp.inst != nil { bsp.inst.Processed(ctx, int64(l)) } @@ -423,7 +421,7 @@ func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) case bsp.queue <- sd: return true default: - atomic.AddUint32(&bsp.dropped, 1) + bsp.dropped.Add(1) if bsp.inst != nil { bsp.inst.ProcessedQueueFull(ctx, 1) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go index d9cfba0b45e..c31e03aa0a9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go @@ -13,8 +13,8 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go index 8afd0526799..0e77cd95375 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go @@ -13,8 +13,8 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) var measureAttrsPool = sync.Pool{ diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go index 13a2db2969b..560d316f2f7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/x" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" "go.opentelemetry.io/otel/trace" ) @@ -55,6 +55,10 @@ func NewTracer() (Tracer, error) { func (t Tracer) Enabled() bool { return t.enabled } func (t Tracer) SpanStarted(ctx context.Context, psc trace.SpanContext, span trace.Span) { + if !t.started.Enabled(ctx) { + return + } + key := spanStartedKey{ parent: parentStateNoParent, sampling: samplingStateDrop, @@ -89,6 +93,10 @@ func (t Tracer) SpanEnded(ctx context.Context, span trace.Span) { } func (t Tracer) spanLive(ctx context.Context, value int64, span trace.Span) { + if !t.live.Enabled(ctx) { + return + } + key := spanLiveKey{sampled: span.SpanContext().IsSampled()} opts := spanLiveOpts[key] t.live.Add(ctx, value, opts...) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index d2cf4ebd3e7..cd40d299d6c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -5,6 +5,7 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "errors" "fmt" "sync" "sync/atomic" @@ -262,6 +263,7 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error { return nil } + var err error for _, sps := range spss { select { case <-ctx.Done(): @@ -269,11 +271,9 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error { default: } - if err := sps.sp.ForceFlush(ctx); err != nil { - return err - } + err = errors.Join(err, sps.sp.ForceFlush(ctx)) } - return nil + return err } // Shutdown shuts down TracerProvider. All registered span processors are shut down @@ -303,14 +303,7 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error { sps.state.Do(func() { err = sps.sp.Shutdown(ctx) }) - if err != nil { - if retErr == nil { - retErr = err - } else { - // Poor man's list of errors - retErr = fmt.Errorf("%w; %w", retErr, err) - } - } + retErr = errors.Join(retErr, err) } p.spanProcessors.Store(&spanProcessorStates{}) return retErr diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index 81c5060ad66..845e292c2bb 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -69,17 +69,17 @@ type traceIDRatioSampler struct { } func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { - psc := trace.SpanContextFromContext(p.ParentContext) + state := trace.SpanContextFromContext(p.ParentContext).TraceState() x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 if x < ts.traceIDUpperBound { return SamplingResult{ Decision: RecordAndSample, - Tracestate: psc.TraceState(), + Tracestate: state, } } return SamplingResult{ Decision: Drop, - Tracestate: psc.TraceState(), + Tracestate: state, } } @@ -94,12 +94,20 @@ func (ts traceIDRatioSampler) Description() string { // //nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` func TraceIDRatioBased(fraction float64) Sampler { + // Cannot use AlwaysSample() and NeverSample(), must return spec-compliant descriptions. + // See https://opentelemetry.io/docs/specs/otel/trace/sdk/#traceidratiobased. if fraction >= 1 { - return AlwaysSample() + return predeterminedSampler{ + description: "TraceIDRatioBased{1}", + decision: RecordAndSample, + } } if fraction <= 0 { - fraction = 0 + return predeterminedSampler{ + description: "TraceIDRatioBased{0}", + decision: Drop, + } } return &traceIDRatioSampler{ @@ -118,6 +126,7 @@ func (alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { } func (alwaysOnSampler) Description() string { + // https://opentelemetry.io/docs/specs/otel/trace/sdk/#alwayson return "AlwaysOnSampler" } @@ -139,6 +148,7 @@ func (alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { } func (alwaysOffSampler) Description() string { + // https://opentelemetry.io/docs/specs/otel/trace/sdk/#alwaysoff return "AlwaysOffSampler" } @@ -147,6 +157,22 @@ func NeverSample() Sampler { return alwaysOffSampler{} } +type predeterminedSampler struct { + description string + decision SamplingDecision +} + +func (s predeterminedSampler) ShouldSample(p SamplingParameters) SamplingResult { + return SamplingResult{ + Decision: s.decision, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } +} + +func (s predeterminedSampler) Description() string { + return s.description +} + // ParentBased returns a sampler decorator which behaves differently, // based on the parent of the span. If the span has no parent, // the decorated sampler is used to make sampling decision. If the span has diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index d4666105916..7d55ce1dc2e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index b5497c2816b..766731dd25d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.40.0" + return "1.43.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/MIGRATION.md deleted file mode 100644 index fed7013e6ff..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/MIGRATION.md +++ /dev/null @@ -1,78 +0,0 @@ - -# Migration from v1.38.0 to v1.39.0 - -The `go.opentelemetry.io/otel/semconv/v1.39.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.38.0` with the following exceptions. - -## Removed - -The following declarations have been removed. -Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions. - -If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use. -If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case. - -- `LinuxMemorySlabStateKey` -- `LinuxMemorySlabStateReclaimable` -- `LinuxMemorySlabStateUnreclaimable` -- `PeerService` -- `PeerServiceKey` -- `RPCConnectRPCErrorCodeAborted` -- `RPCConnectRPCErrorCodeAlreadyExists` -- `RPCConnectRPCErrorCodeCancelled` -- `RPCConnectRPCErrorCodeDataLoss` -- `RPCConnectRPCErrorCodeDeadlineExceeded` -- `RPCConnectRPCErrorCodeFailedPrecondition` -- `RPCConnectRPCErrorCodeInternal` -- `RPCConnectRPCErrorCodeInvalidArgument` -- `RPCConnectRPCErrorCodeKey` -- `RPCConnectRPCErrorCodeNotFound` -- `RPCConnectRPCErrorCodeOutOfRange` -- `RPCConnectRPCErrorCodePermissionDenied` -- `RPCConnectRPCErrorCodeResourceExhausted` -- `RPCConnectRPCErrorCodeUnauthenticated` -- `RPCConnectRPCErrorCodeUnavailable` -- `RPCConnectRPCErrorCodeUnimplemented` -- `RPCConnectRPCErrorCodeUnknown` -- `RPCConnectRPCRequestMetadata` -- `RPCConnectRPCResponseMetadata` -- `RPCGRPCRequestMetadata` -- `RPCGRPCResponseMetadata` -- `RPCGRPCStatusCodeAborted` -- `RPCGRPCStatusCodeAlreadyExists` -- `RPCGRPCStatusCodeCancelled` -- `RPCGRPCStatusCodeDataLoss` -- `RPCGRPCStatusCodeDeadlineExceeded` -- `RPCGRPCStatusCodeFailedPrecondition` -- `RPCGRPCStatusCodeInternal` -- `RPCGRPCStatusCodeInvalidArgument` -- `RPCGRPCStatusCodeKey` -- `RPCGRPCStatusCodeNotFound` -- `RPCGRPCStatusCodeOk` -- `RPCGRPCStatusCodeOutOfRange` -- `RPCGRPCStatusCodePermissionDenied` -- `RPCGRPCStatusCodeResourceExhausted` -- `RPCGRPCStatusCodeUnauthenticated` -- `RPCGRPCStatusCodeUnavailable` -- `RPCGRPCStatusCodeUnimplemented` -- `RPCGRPCStatusCodeUnknown` -- `RPCJSONRPCErrorCode` -- `RPCJSONRPCErrorCodeKey` -- `RPCJSONRPCErrorMessage` -- `RPCJSONRPCErrorMessageKey` -- `RPCJSONRPCRequestID` -- `RPCJSONRPCRequestIDKey` -- `RPCJSONRPCVersion` -- `RPCJSONRPCVersionKey` -- `RPCService` -- `RPCServiceKey` -- `RPCSystemApacheDubbo` -- `RPCSystemConnectRPC` -- `RPCSystemDotnetWcf` -- `RPCSystemGRPC` -- `RPCSystemJSONRPC` -- `RPCSystemJavaRmi` -- `RPCSystemKey` -- `RPCSystemOncRPC` - -[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions -[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/README.md deleted file mode 100644 index 4b0e6f7f3eb..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.39.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.39.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.39.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/MIGRATION.md new file mode 100644 index 00000000000..e246b1692d6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/MIGRATION.md @@ -0,0 +1,27 @@ + +# Migration from v1.39.0 to v1.40.0 + +The `go.opentelemetry.io/otel/semconv/v1.40.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.39.0` with the following exceptions. + +## Removed + +The following declarations have been removed. +Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions. + +If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use. +If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case. + +- `ErrorMessage` +- `ErrorMessageKey` +- `RPCMessageCompressedSize` +- `RPCMessageCompressedSizeKey` +- `RPCMessageID` +- `RPCMessageIDKey` +- `RPCMessageTypeKey` +- `RPCMessageTypeReceived` +- `RPCMessageTypeSent` +- `RPCMessageUncompressedSize` +- `RPCMessageUncompressedSizeKey` + +[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions +[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/README.md new file mode 100644 index 00000000000..c51b7fb7b07 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.40.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.40.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.40.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/attribute_group.go similarity index 95% rename from vendor/go.opentelemetry.io/otel/semconv/v1.39.0/attribute_group.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.40.0/attribute_group.go index 080365fc196..ee6b1f79d62 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/attribute_group.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/attribute_group.go @@ -3,7 +3,7 @@ // Code generated from semantic convention specification. DO NOT EDIT. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0" import "go.opentelemetry.io/otel/attribute" @@ -3431,7 +3431,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "a3bf90e006b2" // @@ -3467,7 +3467,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "gcr.io/opentelemetry/operator" ContainerImageNameKey = attribute.Key("container.image.name") @@ -3478,7 +3478,7 @@ const ( // // Type: string[] // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", @@ -3497,7 +3497,7 @@ const ( // // Type: string[] // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "v1.27.1", "3.5.7-0" // @@ -3856,6 +3856,12 @@ const ( // [Generating query summary] // section. // + // For batch operations, if the individual operations are known to have the same + // query summary + // then that query summary SHOULD be used prepended by `BATCH `, + // otherwise `db.query.summary` SHOULD be `BATCH` or some other database + // system specific term if more applicable. + // // [Generating query summary]: /docs/db/database-spans.md#generating-a-summary-of-the-query DBQuerySummaryKey = attribute.Key("db.query.summary") @@ -4623,27 +4629,6 @@ func EnduserPseudoID(val string) attribute.KeyValue { // Namespace: error const ( - // ErrorMessageKey is the attribute Key conforming to the "error.message" - // semantic conventions. It represents a message providing more detail about an - // error in human-readable form. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Unexpected input type: string", "The user has exceeded their - // storage quota" - // Note: `error.message` should provide additional context and detail about an - // error. - // It is NOT RECOMMENDED to duplicate the value of `error.type` in - // `error.message`. - // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in - // `error.message`. - // - // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded - // cardinality and overlap with span status. - ErrorMessageKey = attribute.Key("error.message") - // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic // conventions. It represents the describes a class of error the operation ended // with. @@ -4683,13 +4668,6 @@ const ( ErrorTypeKey = attribute.Key("error.type") ) -// ErrorMessage returns an attribute KeyValue conforming to the "error.message" -// semantic conventions. It represents a message providing more detail about an -// error in human-readable form. -func ErrorMessage(val string) attribute.KeyValue { - return ErrorMessageKey.String(val) -} - // Enum values for error.type var ( // A fallback error value to be used when the instrumentation doesn't define a @@ -4710,6 +4688,9 @@ const ( // Stability: Stable // // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" + // Note: > [!WARNING] + // + // > This attribute may contain sensitive information. ExceptionMessageKey = attribute.Key("exception.message") // ExceptionStacktraceKey is the attribute Key conforming to the @@ -5165,6 +5146,19 @@ const ( // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") + // FeatureFlagErrorMessageKey is the attribute Key conforming to the + // "feature_flag.error.message" semantic conventions. It represents a message + // providing more detail about an error that occurred during feature flag + // evaluation in human-readable form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "Unexpected input type: string", "The user has exceeded their + // storage quota" + FeatureFlagErrorMessageKey = attribute.Key("feature_flag.error.message") + // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" // semantic conventions. It represents the lookup key of the feature flag. // @@ -5266,6 +5260,14 @@ func FeatureFlagContextID(val string) attribute.KeyValue { return FeatureFlagContextIDKey.String(val) } +// FeatureFlagErrorMessage returns an attribute KeyValue conforming to the +// "feature_flag.error.message" semantic conventions. It represents a message +// providing more detail about an error that occurred during feature flag +// evaluation in human-readable form. +func FeatureFlagErrorMessage(val string) attribute.KeyValue { + return FeatureFlagErrorMessageKey.String(val) +} + // FeatureFlagKey returns an attribute KeyValue conforming to the // "feature_flag.key" semantic conventions. It represents the lookup key of the // feature flag. @@ -5980,6 +5982,41 @@ const ( // // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name") + + // GCPGCEInstanceGroupManagerNameKey is the attribute Key conforming to the + // "gcp.gce.instance_group_manager.name" semantic conventions. It represents the + // name of the Instance Group Manager (IGM) that manages this VM, if any. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "web-igm", "my-managed-group" + GCPGCEInstanceGroupManagerNameKey = attribute.Key("gcp.gce.instance_group_manager.name") + + // GCPGCEInstanceGroupManagerRegionKey is the attribute Key conforming to the + // "gcp.gce.instance_group_manager.region" semantic conventions. It represents + // the region of a **regional** Instance Group Manager (e.g., `us-central1`). + // Set this **only** when the IGM is regional. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1", "europe-west1" + GCPGCEInstanceGroupManagerRegionKey = attribute.Key("gcp.gce.instance_group_manager.region") + + // GCPGCEInstanceGroupManagerZoneKey is the attribute Key conforming to the + // "gcp.gce.instance_group_manager.zone" semantic conventions. It represents the + // zone of a **zonal** Instance Group Manager (e.g., `us-central1-a`). Set this + // **only** when the IGM is zonal. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1-a", "europe-west1-b" + GCPGCEInstanceGroupManagerZoneKey = attribute.Key("gcp.gce.instance_group_manager.zone") ) // GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the @@ -6103,6 +6140,29 @@ func GCPGCEInstanceName(val string) attribute.KeyValue { return GCPGCEInstanceNameKey.String(val) } +// GCPGCEInstanceGroupManagerName returns an attribute KeyValue conforming to the +// "gcp.gce.instance_group_manager.name" semantic conventions. It represents the +// name of the Instance Group Manager (IGM) that manages this VM, if any. +func GCPGCEInstanceGroupManagerName(val string) attribute.KeyValue { + return GCPGCEInstanceGroupManagerNameKey.String(val) +} + +// GCPGCEInstanceGroupManagerRegion returns an attribute KeyValue conforming to +// the "gcp.gce.instance_group_manager.region" semantic conventions. It +// represents the region of a **regional** Instance Group Manager (e.g., +// `us-central1`). Set this **only** when the IGM is regional. +func GCPGCEInstanceGroupManagerRegion(val string) attribute.KeyValue { + return GCPGCEInstanceGroupManagerRegionKey.String(val) +} + +// GCPGCEInstanceGroupManagerZone returns an attribute KeyValue conforming to the +// "gcp.gce.instance_group_manager.zone" semantic conventions. It represents the +// zone of a **zonal** Instance Group Manager (e.g., `us-central1-a`). Set this +// **only** when the IGM is zonal. +func GCPGCEInstanceGroupManagerZone(val string) attribute.KeyValue { + return GCPGCEInstanceGroupManagerZoneKey.String(val) +} + // Enum values for gcp.apphub.service.criticality_type var ( // Mission critical service. @@ -6265,6 +6325,17 @@ const ( // Examples: "Math Tutor", "Fiction Writer" GenAIAgentNameKey = attribute.Key("gen_ai.agent.name") + // GenAIAgentVersionKey is the attribute Key conforming to the + // "gen_ai.agent.version" semantic conventions. It represents the version of the + // GenAI agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.0.0", "2025-05-01" + GenAIAgentVersionKey = attribute.Key("gen_ai.agent.version") + // GenAIConversationIDKey is the attribute Key conforming to the // "gen_ai.conversation.id" semantic conventions. It represents the unique // identifier for a conversation (session, thread), used to store and correlate @@ -6663,6 +6734,44 @@ const ( // Examples: "gpt-4-0613" GenAIResponseModelKey = attribute.Key("gen_ai.response.model") + // GenAIRetrievalDocumentsKey is the attribute Key conforming to the + // "gen_ai.retrieval.documents" semantic conventions. It represents the + // documents retrieved. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "id": "doc_123",\n "score": 0.95\n },\n {\n "id": + // "doc_456",\n "score": 0.87\n },\n {\n "id": "doc_789",\n "score": 0.82\n + // }\n]\n" + // Note: Instrumentations MUST follow [Retrieval documents JSON schema]. + // When the attribute is recorded on events, it MUST be recorded in structured + // form. When recorded on spans, it MAY be recorded as a JSON string if + // structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Each document object SHOULD contain at least the following properties: + // `id` (string): A unique identifier for the document, `score` (double): The + // relevance score of the document + // + // [Retrieval documents JSON schema]: /docs/gen-ai/gen-ai-retrieval-documents.json + GenAIRetrievalDocumentsKey = attribute.Key("gen_ai.retrieval.documents") + + // GenAIRetrievalQueryTextKey is the attribute Key conforming to the + // "gen_ai.retrieval.query.text" semantic conventions. It represents the query + // text used for retrieval. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "What is the capital of France?", "weather in Paris" + // Note: > [!Warning] + // + // > This attribute may contain sensitive information. + GenAIRetrievalQueryTextKey = attribute.Key("gen_ai.retrieval.query.text") + // GenAISystemInstructionsKey is the attribute Key conforming to the // "gen_ai.system_instructions" semantic conventions. It represents the system // message or instructions provided to the GenAI model separately from the chat @@ -6837,6 +6946,30 @@ const ( // updates. GenAIToolTypeKey = attribute.Key("gen_ai.tool.type") + // GenAIUsageCacheCreationInputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.cache_creation.input_tokens" semantic conventions. It + // represents the number of input tokens written to a provider-managed cache. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 25 + // Note: The value SHOULD be included in `gen_ai.usage.input_tokens`. + GenAIUsageCacheCreationInputTokensKey = attribute.Key("gen_ai.usage.cache_creation.input_tokens") + + // GenAIUsageCacheReadInputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.cache_read.input_tokens" semantic conventions. It represents + // the number of input tokens served from a provider-managed cache. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 50 + // Note: The value SHOULD be included in `gen_ai.usage.input_tokens`. + GenAIUsageCacheReadInputTokensKey = attribute.Key("gen_ai.usage.cache_read.input_tokens") + // GenAIUsageInputTokensKey is the attribute Key conforming to the // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of // tokens used in the GenAI input (prompt). @@ -6846,6 +6979,12 @@ const ( // Stability: Development // // Examples: 100 + // Note: This value SHOULD include all types of input tokens, including cached + // tokens. + // Instrumentations SHOULD make a best effort to populate this value, using a + // total + // provided by the provider when available or, depending on the provider API, + // by summing different token types parsed from the provider output. GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") // GenAIUsageOutputTokensKey is the attribute Key conforming to the @@ -6880,6 +7019,13 @@ func GenAIAgentName(val string) attribute.KeyValue { return GenAIAgentNameKey.String(val) } +// GenAIAgentVersion returns an attribute KeyValue conforming to the +// "gen_ai.agent.version" semantic conventions. It represents the version of the +// GenAI agent. +func GenAIAgentVersion(val string) attribute.KeyValue { + return GenAIAgentVersionKey.String(val) +} + // GenAIConversationID returns an attribute KeyValue conforming to the // "gen_ai.conversation.id" semantic conventions. It represents the unique // identifier for a conversation (session, thread), used to store and correlate @@ -7036,6 +7182,13 @@ func GenAIResponseModel(val string) attribute.KeyValue { return GenAIResponseModelKey.String(val) } +// GenAIRetrievalQueryText returns an attribute KeyValue conforming to the +// "gen_ai.retrieval.query.text" semantic conventions. It represents the query +// text used for retrieval. +func GenAIRetrievalQueryText(val string) attribute.KeyValue { + return GenAIRetrievalQueryTextKey.String(val) +} + // GenAIToolCallID returns an attribute KeyValue conforming to the // "gen_ai.tool.call.id" semantic conventions. It represents the tool call // identifier. @@ -7064,6 +7217,20 @@ func GenAIToolType(val string) attribute.KeyValue { return GenAIToolTypeKey.String(val) } +// GenAIUsageCacheCreationInputTokens returns an attribute KeyValue conforming to +// the "gen_ai.usage.cache_creation.input_tokens" semantic conventions. It +// represents the number of input tokens written to a provider-managed cache. +func GenAIUsageCacheCreationInputTokens(val int) attribute.KeyValue { + return GenAIUsageCacheCreationInputTokensKey.Int(val) +} + +// GenAIUsageCacheReadInputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.cache_read.input_tokens" semantic conventions. It represents the +// number of input tokens served from a provider-managed cache. +func GenAIUsageCacheReadInputTokens(val int) attribute.KeyValue { + return GenAIUsageCacheReadInputTokensKey.Int(val) +} + // GenAIUsageInputTokens returns an attribute KeyValue conforming to the // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of // tokens used in the GenAI input (prompt). @@ -7100,6 +7267,11 @@ var ( // // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") + // Retrieval operation such as [OpenAI Search Vector Store API] + // Stability: development + // + // [OpenAI Search Vector Store API]: https://platform.openai.com/docs/api-reference/vector-stores/search + GenAIOperationNameRetrieval = GenAIOperationNameKey.String("retrieval") // Create GenAI agent // Stability: development GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent") @@ -7889,9 +8061,18 @@ const ( // the list of known HTTP methods. If this override is done via environment // variable, then the environment variable MUST be named // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of - // case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is not a - // list of known methods in addition to the defaults). + // case-sensitive known HTTP methods. + // + // + // If this override is done via declarative configuration, then the list MUST be + // configurable via the `known_methods` property + // (an array of case-sensitive strings with minimum items 0) under + // `.instrumentation/development.general.http.client` and/or + // `.instrumentation/development.general.http.server`. + // + // In either case, this list MUST be a full override of the default known + // methods, + // it is not a list of known methods in addition to the defaults. // // HTTP method names are case-sensitive and `http.request.method` attribute // value MUST match a known HTTP method name exactly. @@ -8845,7 +9026,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "opentelemetry-cluster" K8SClusterNameKey = attribute.Key("k8s.cluster.name") @@ -8856,7 +9037,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever @@ -8892,7 +9073,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "redis" K8SContainerNameKey = attribute.Key("k8s.container.name") @@ -8904,7 +9085,7 @@ const ( // // Type: int // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") @@ -8955,7 +9136,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "opentelemetry" K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") @@ -8965,7 +9146,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") @@ -8976,7 +9157,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "opentelemetry" K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") @@ -8986,7 +9167,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") @@ -8997,7 +9178,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "opentelemetry" K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") @@ -9008,7 +9189,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") @@ -9098,7 +9279,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "opentelemetry" K8SJobNameKey = attribute.Key("k8s.job.name") @@ -9108,7 +9289,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SJobUIDKey = attribute.Key("k8s.job.uid") @@ -9119,7 +9300,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "default" K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") @@ -9184,7 +9365,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "node-1" K8SNodeNameKey = attribute.Key("k8s.node.name") @@ -9194,7 +9375,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" K8SNodeUIDKey = attribute.Key("k8s.node.uid") @@ -9204,7 +9385,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "collector-gateway" // Note: The K8s Pod spec has an optional hostname field, which can be used to @@ -9224,7 +9405,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "172.18.0.2" // Note: This attribute aligns with the `podIP` field of the @@ -9238,7 +9419,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "opentelemetry-pod-autoconf" K8SPodNameKey = attribute.Key("k8s.pod.name") @@ -9249,7 +9430,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "2025-12-04T08:41:03Z" // Note: Date and time at which the object was acknowledged by the Kubelet. @@ -9293,7 +9474,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SPodUIDKey = attribute.Key("k8s.pod.uid") @@ -9304,7 +9485,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "opentelemetry" K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") @@ -9315,7 +9496,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") @@ -9383,13 +9564,152 @@ const ( // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid") + // K8SServiceEndpointAddressTypeKey is the attribute Key conforming to the + // "k8s.service.endpoint.address_type" semantic conventions. It represents the + // address type of the service endpoint. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "IPv4", "IPv6" + // Note: The network address family or type of the endpoint. + // This attribute aligns with the `addressType` field of the + // [K8s EndpointSlice]. + // It is used to differentiate metrics when a Service is backed by multiple + // address types + // (e.g., in dual-stack clusters). + // + // [K8s EndpointSlice]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1/ + K8SServiceEndpointAddressTypeKey = attribute.Key("k8s.service.endpoint.address_type") + + // K8SServiceEndpointConditionKey is the attribute Key conforming to the + // "k8s.service.endpoint.condition" semantic conventions. It represents the + // condition of the service endpoint. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ready", "serving", "terminating" + // Note: The current operational condition of the service endpoint. + // An endpoint can have multiple conditions set at once (e.g., both `serving` + // and `terminating` during rollout). + // This attribute aligns with the condition fields in the [K8s EndpointSlice]. + // + // [K8s EndpointSlice]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1/ + K8SServiceEndpointConditionKey = attribute.Key("k8s.service.endpoint.condition") + + // K8SServiceEndpointZoneKey is the attribute Key conforming to the + // "k8s.service.endpoint.zone" semantic conventions. It represents the zone of + // the service endpoint. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-east-1a", "us-west-2b", "zone-a", "" + // Note: The zone where the endpoint is located, typically corresponding to a + // failure domain. + // This attribute aligns with the `zone` field of endpoints in the + // [K8s EndpointSlice]. + // It enables zone-aware monitoring of service endpoint distribution and + // supports + // features like [Topology Aware Routing]. + // + // If the zone is not populated (e.g., nodes without the + // `topology.kubernetes.io/zone` label), + // the attribute value will be an empty string. + // + // [K8s EndpointSlice]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1/ + // [Topology Aware Routing]: https://kubernetes.io/docs/concepts/services-networking/topology-aware-routing/ + K8SServiceEndpointZoneKey = attribute.Key("k8s.service.endpoint.zone") + + // K8SServiceNameKey is the attribute Key conforming to the "k8s.service.name" + // semantic conventions. It represents the name of the Service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-service" + K8SServiceNameKey = attribute.Key("k8s.service.name") + + // K8SServicePublishNotReadyAddressesKey is the attribute Key conforming to the + // "k8s.service.publish_not_ready_addresses" semantic conventions. It represents + // the whether the Service publishes not-ready endpoints. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true, false + // Note: Whether the Service is configured to publish endpoints before the pods + // are ready. + // This attribute is typically used to indicate that a Service (such as a + // headless + // Service for a StatefulSet) allows peer discovery before pods pass their + // readiness probes. + // It aligns with the `publishNotReadyAddresses` field of the + // [K8s ServiceSpec]. + // + // [K8s ServiceSpec]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec + K8SServicePublishNotReadyAddressesKey = attribute.Key("k8s.service.publish_not_ready_addresses") + + // K8SServiceTrafficDistributionKey is the attribute Key conforming to the + // "k8s.service.traffic_distribution" semantic conventions. It represents the + // traffic distribution policy for the Service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "PreferSameZone", "PreferSameNode" + // Note: Specifies how traffic is distributed to endpoints for this Service. + // This attribute aligns with the `trafficDistribution` field of the + // [K8s ServiceSpec]. + // Known values include `PreferSameZone` (prefer endpoints in the same zone as + // the client) and + // `PreferSameNode` (prefer endpoints on the same node, fallback to same zone, + // then cluster-wide). + // If this field is not set on the Service, the attribute SHOULD NOT be emitted. + // When not set, Kubernetes distributes traffic evenly across all endpoints + // cluster-wide. + // + // [K8s ServiceSpec]: https://kubernetes.io/docs/reference/networking/virtual-ips/#traffic-distribution + K8SServiceTrafficDistributionKey = attribute.Key("k8s.service.traffic_distribution") + + // K8SServiceTypeKey is the attribute Key conforming to the "k8s.service.type" + // semantic conventions. It represents the type of the Kubernetes Service. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ClusterIP", "NodePort", "LoadBalancer" + // Note: This attribute aligns with the `type` field of the + // [K8s ServiceSpec]. + // + // [K8s ServiceSpec]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec + K8SServiceTypeKey = attribute.Key("k8s.service.type") + + // K8SServiceUIDKey is the attribute Key conforming to the "k8s.service.uid" + // semantic conventions. It represents the UID of the Service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SServiceUIDKey = attribute.Key("k8s.service.uid") + // K8SStatefulSetNameKey is the attribute Key conforming to the // "k8s.statefulset.name" semantic conventions. It represents the name of the // StatefulSet. // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "opentelemetry" K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") @@ -9400,7 +9720,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Alpha + // Stability: Beta // // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") @@ -9803,6 +10123,64 @@ func K8SResourceQuotaUID(val string) attribute.KeyValue { return K8SResourceQuotaUIDKey.String(val) } +// K8SServiceAnnotation returns an attribute KeyValue conforming to the +// "k8s.service.annotation" semantic conventions. It represents the annotation +// placed on the Service, the `` being the annotation name, the value being +// the annotation value, even if the value is empty. +func K8SServiceAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.service.annotation."+key, val) +} + +// K8SServiceEndpointZone returns an attribute KeyValue conforming to the +// "k8s.service.endpoint.zone" semantic conventions. It represents the zone of +// the service endpoint. +func K8SServiceEndpointZone(val string) attribute.KeyValue { + return K8SServiceEndpointZoneKey.String(val) +} + +// K8SServiceLabel returns an attribute KeyValue conforming to the +// "k8s.service.label" semantic conventions. It represents the label placed on +// the Service, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SServiceLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.service.label."+key, val) +} + +// K8SServiceName returns an attribute KeyValue conforming to the +// "k8s.service.name" semantic conventions. It represents the name of the +// Service. +func K8SServiceName(val string) attribute.KeyValue { + return K8SServiceNameKey.String(val) +} + +// K8SServicePublishNotReadyAddresses returns an attribute KeyValue conforming to +// the "k8s.service.publish_not_ready_addresses" semantic conventions. It +// represents the whether the Service publishes not-ready endpoints. +func K8SServicePublishNotReadyAddresses(val bool) attribute.KeyValue { + return K8SServicePublishNotReadyAddressesKey.Bool(val) +} + +// K8SServiceSelector returns an attribute KeyValue conforming to the +// "k8s.service.selector" semantic conventions. It represents the selector +// key-value pair placed on the Service, the `` being the selector key, the +// value being the selector value. +func K8SServiceSelector(key string, val string) attribute.KeyValue { + return attribute.String("k8s.service.selector."+key, val) +} + +// K8SServiceTrafficDistribution returns an attribute KeyValue conforming to the +// "k8s.service.traffic_distribution" semantic conventions. It represents the +// traffic distribution policy for the Service. +func K8SServiceTrafficDistribution(val string) attribute.KeyValue { + return K8SServiceTrafficDistributionKey.String(val) +} + +// K8SServiceUID returns an attribute KeyValue conforming to the +// "k8s.service.uid" semantic conventions. It represents the UID of the Service. +func K8SServiceUID(val string) attribute.KeyValue { + return K8SServiceUIDKey.String(val) +} + // K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the // "k8s.statefulset.annotation" semantic conventions. It represents the // annotation placed on the StatefulSet, the `` being the annotation name, @@ -9995,6 +10373,48 @@ var ( K8SPodStatusReasonUnexpectedAdmissionError = K8SPodStatusReasonKey.String("UnexpectedAdmissionError") ) +// Enum values for k8s.service.endpoint.address_type +var ( + // IPv4 address type + // Stability: development + K8SServiceEndpointAddressTypeIPv4 = K8SServiceEndpointAddressTypeKey.String("IPv4") + // IPv6 address type + // Stability: development + K8SServiceEndpointAddressTypeIPv6 = K8SServiceEndpointAddressTypeKey.String("IPv6") + // FQDN address type + // Stability: development + K8SServiceEndpointAddressTypeFqdn = K8SServiceEndpointAddressTypeKey.String("FQDN") +) + +// Enum values for k8s.service.endpoint.condition +var ( + // The endpoint is ready to receive new connections. + // Stability: development + K8SServiceEndpointConditionReady = K8SServiceEndpointConditionKey.String("ready") + // The endpoint is currently handling traffic. + // Stability: development + K8SServiceEndpointConditionServing = K8SServiceEndpointConditionKey.String("serving") + // The endpoint is in the process of shutting down. + // Stability: development + K8SServiceEndpointConditionTerminating = K8SServiceEndpointConditionKey.String("terminating") +) + +// Enum values for k8s.service.type +var ( + // ClusterIP service type + // Stability: development + K8SServiceTypeClusterIP = K8SServiceTypeKey.String("ClusterIP") + // NodePort service type + // Stability: development + K8SServiceTypeNodePort = K8SServiceTypeKey.String("NodePort") + // LoadBalancer service type + // Stability: development + K8SServiceTypeLoadBalancer = K8SServiceTypeKey.String("LoadBalancer") + // ExternalName service type + // Stability: development + K8SServiceTypeExternalName = K8SServiceTypeKey.String("ExternalName") +) + // Enum values for k8s.volume.type var ( // A [persistentVolumeClaim] volume @@ -11770,6 +12190,16 @@ func OncRPCVersion(val int) attribute.KeyValue { // Namespace: openai const ( + // OpenAIAPITypeKey is the attribute Key conforming to the "openai.api.type" + // semantic conventions. It represents the type of OpenAI API being used. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OpenAIAPITypeKey = attribute.Key("openai.api.type") + // OpenAIRequestServiceTierKey is the attribute Key conforming to the // "openai.request.service_tier" semantic conventions. It represents the service // tier requested. May be a specific tier, default, or auto. @@ -11818,6 +12248,20 @@ func OpenAIResponseSystemFingerprint(val string) attribute.KeyValue { return OpenAIResponseSystemFingerprintKey.String(val) } +// Enum values for openai.api.type +var ( + // The OpenAI [Chat Completions API]. + // Stability: development + // + // [Chat Completions API]: https://developers.openai.com/api/reference/chat-completions/overview + OpenAIAPITypeChatCompletions = OpenAIAPITypeKey.String("chat_completions") + // The OpenAI [Responses API]. + // Stability: development + // + // [Responses API]: https://developers.openai.com/api/reference/responses/overview + OpenAIAPITypeResponses = OpenAIAPITypeKey.String("responses") +) + // Enum values for openai.request.service_tier var ( // The system will utilize scale tier credits until they are exhausted. @@ -11892,6 +12336,158 @@ var ( OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from") ) +// Namespace: oracle +const ( + // OracleDBDomainKey is the attribute Key conforming to the "oracle.db.domain" + // semantic conventions. It represents the database domain associated with the + // connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.com", "corp.internal", "prod.db.local" + // Note: This attribute SHOULD be set to the value of the `DB_DOMAIN` + // initialization parameter, + // as exposed in `v$parameter`. `DB_DOMAIN` defines the domain portion of the + // global + // database name and SHOULD be configured when a database is, or may become, + // part of a + // distributed environment. Its value consists of one or more valid identifiers + // (alphanumeric ASCII characters) separated by periods. + OracleDBDomainKey = attribute.Key("oracle.db.domain") + + // OracleDBInstanceNameKey is the attribute Key conforming to the + // "oracle.db.instance.name" semantic conventions. It represents the instance + // name associated with the connection in an Oracle Real Application Clusters + // environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ORCL1", "ORCL2", "ORCL3" + // Note: There can be multiple instances associated with a single database + // service. It indicates the + // unique instance name to which the connection is currently bound. For non-RAC + // databases, this value + // defaults to the `oracle.db.name`. + OracleDBInstanceNameKey = attribute.Key("oracle.db.instance.name") + + // OracleDBNameKey is the attribute Key conforming to the "oracle.db.name" + // semantic conventions. It represents the database name associated with the + // connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ORCL1", "FREE" + // Note: This attribute SHOULD be set to the value of the parameter `DB_NAME` + // exposed in `v$parameter`. + OracleDBNameKey = attribute.Key("oracle.db.name") + + // OracleDBPdbKey is the attribute Key conforming to the "oracle.db.pdb" + // semantic conventions. It represents the pluggable database (PDB) name + // associated with the connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "PDB1", "FREEPDB" + // Note: This attribute SHOULD reflect the PDB that the session is currently + // connected to. + // If instrumentation cannot reliably obtain the active PDB name for each + // operation + // without issuing an additional query (such as `SELECT SYS_CONTEXT`), it is + // RECOMMENDED to fall back to the PDB name specified at connection + // establishment. + OracleDBPdbKey = attribute.Key("oracle.db.pdb") + + // OracleDBServiceKey is the attribute Key conforming to the "oracle.db.service" + // semantic conventions. It represents the service name currently associated + // with the database connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "order-processing-service", "db_low.adb.oraclecloud.com", + // "db_high.adb.oraclecloud.com" + // Note: The effective service name for a connection can change during its + // lifetime, + // for example after executing sql, `ALTER SESSION`. If an instrumentation + // cannot reliably + // obtain the current service name for each operation without issuing an + // additional + // query (such as `SELECT SYS_CONTEXT`), it is RECOMMENDED to fall back to the + // service name originally provided at connection establishment. + OracleDBServiceKey = attribute.Key("oracle.db.service") +) + +// OracleDBDomain returns an attribute KeyValue conforming to the +// "oracle.db.domain" semantic conventions. It represents the database domain +// associated with the connection. +func OracleDBDomain(val string) attribute.KeyValue { + return OracleDBDomainKey.String(val) +} + +// OracleDBInstanceName returns an attribute KeyValue conforming to the +// "oracle.db.instance.name" semantic conventions. It represents the instance +// name associated with the connection in an Oracle Real Application Clusters +// environment. +func OracleDBInstanceName(val string) attribute.KeyValue { + return OracleDBInstanceNameKey.String(val) +} + +// OracleDBName returns an attribute KeyValue conforming to the "oracle.db.name" +// semantic conventions. It represents the database name associated with the +// connection. +func OracleDBName(val string) attribute.KeyValue { + return OracleDBNameKey.String(val) +} + +// OracleDBPdb returns an attribute KeyValue conforming to the "oracle.db.pdb" +// semantic conventions. It represents the pluggable database (PDB) name +// associated with the connection. +func OracleDBPdb(val string) attribute.KeyValue { + return OracleDBPdbKey.String(val) +} + +// OracleDBService returns an attribute KeyValue conforming to the +// "oracle.db.service" semantic conventions. It represents the service name +// currently associated with the database connection. +func OracleDBService(val string) attribute.KeyValue { + return OracleDBServiceKey.String(val) +} + +// Namespace: oracle_cloud +const ( + // OracleCloudRealmKey is the attribute Key conforming to the + // "oracle_cloud.realm" semantic conventions. It represents the OCI realm + // identifier that indicates the isolated partition in which the tenancy and its + // resources reside. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "oc1", "oc2" + // Note: See [OCI documentation on realms] + // + // [OCI documentation on realms]: https://docs.oracle.com/iaas/Content/General/Concepts/regions.htm + OracleCloudRealmKey = attribute.Key("oracle_cloud.realm") +) + +// OracleCloudRealm returns an attribute KeyValue conforming to the +// "oracle_cloud.realm" semantic conventions. It represents the OCI realm +// identifier that indicates the isolated partition in which the tenancy and its +// resources reside. +func OracleCloudRealm(val string) attribute.KeyValue { + return OracleCloudRealmKey.String(val) +} + // Namespace: os const ( // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic @@ -12423,6 +13019,33 @@ const ( // // Examples: "/bazinga/" PprofProfileKeepFramesKey = attribute.Key("pprof.profile.keep_frames") + + // PprofScopeDefaultSampleTypeKey is the attribute Key conforming to the + // "pprof.scope.default_sample_type" semantic conventions. It represents the + // records the pprof's default_sample_type in the original profile. Not set if + // the default sample type was missing. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpu" + // Note: This attribute, if present, MUST be set at the scope level + // (resource_profiles[].scope_profiles[].scope.attributes[]). + PprofScopeDefaultSampleTypeKey = attribute.Key("pprof.scope.default_sample_type") + + // PprofScopeSampleTypeOrderKey is the attribute Key conforming to the + // "pprof.scope.sample_type_order" semantic conventions. It represents the + // records the indexes of the sample types in the original profile. + // + // Type: int[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3, 0, 1, 2 + // Note: This attribute, if present, MUST be set at the scope level + // (resource_profiles[].scope_profiles[].scope.attributes[]). + PprofScopeSampleTypeOrderKey = attribute.Key("pprof.scope.sample_type_order") ) // PprofLocationIsFolded returns an attribute KeyValue conforming to the @@ -12494,6 +13117,21 @@ func PprofProfileKeepFrames(val string) attribute.KeyValue { return PprofProfileKeepFramesKey.String(val) } +// PprofScopeDefaultSampleType returns an attribute KeyValue conforming to the +// "pprof.scope.default_sample_type" semantic conventions. It represents the +// records the pprof's default_sample_type in the original profile. Not set if +// the default sample type was missing. +func PprofScopeDefaultSampleType(val string) attribute.KeyValue { + return PprofScopeDefaultSampleTypeKey.String(val) +} + +// PprofScopeSampleTypeOrder returns an attribute KeyValue conforming to the +// "pprof.scope.sample_type_order" semantic conventions. It represents the +// records the indexes of the sample types in the original profile. +func PprofScopeSampleTypeOrder(val ...int) attribute.KeyValue { + return PprofScopeSampleTypeOrderKey.IntSlice(val) +} + // Namespace: process const ( // ProcessArgsCountKey is the attribute Key conforming to the @@ -13258,59 +13896,13 @@ var ( // Namespace: rpc const ( - // RPCMessageCompressedSizeKey is the attribute Key conforming to the - // "rpc.message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") - - // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" - // semantic conventions. It MUST be calculated as two different counters - // starting from `1` one for sent messages and one for received message.. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: This way we guarantee that the values will be consistent between - // different implementations. - RPCMessageIDKey = attribute.Key("rpc.message.id") - - // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type" - // semantic conventions. It represents the whether this is a received or sent - // message. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCMessageTypeKey = attribute.Key("rpc.message.type") - - // RPCMessageUncompressedSizeKey is the attribute Key conforming to the - // "rpc.message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") - // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic // conventions. It represents the fully-qualified logical name of the method // from the RPC interface perspective. // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "com.example.ExampleService/exampleMethod", "EchoService/Echo", // "_OTHER" @@ -13345,7 +13937,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "com.myservice.EchoService/catchAll", // "com.myservice.EchoService/unknownMethod", "InvalidMethod" @@ -13357,7 +13949,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: "OK", "DEADLINE_EXCEEDED", "-32602" // Note: Usually it represents an error code, but may also represent partial @@ -13373,7 +13965,7 @@ const ( // // Type: Enum // RequirementLevel: Recommended - // Stability: Development + // Stability: Release_Candidate // // Examples: // Note: The client and server RPC systems may differ for the same RPC @@ -13383,27 +13975,6 @@ const ( RPCSystemNameKey = attribute.Key("rpc.system.name") ) -// RPCMessageCompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.compressed_size" semantic conventions. It represents the -// compressed size of the message in bytes. -func RPCMessageCompressedSize(val int) attribute.KeyValue { - return RPCMessageCompressedSizeKey.Int(val) -} - -// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id" -// semantic conventions. It MUST be calculated as two different counters starting -// from `1` one for sent messages and one for received message.. -func RPCMessageID(val int) attribute.KeyValue { - return RPCMessageIDKey.Int(val) -} - -// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func RPCMessageUncompressedSize(val int) attribute.KeyValue { - return RPCMessageUncompressedSizeKey.Int(val) -} - // RPCMethod returns an attribute KeyValue conforming to the "rpc.method" // semantic conventions. It represents the fully-qualified logical name of the // method from the RPC interface perspective. @@ -13441,25 +14012,15 @@ func RPCResponseStatusCode(val string) attribute.KeyValue { return RPCResponseStatusCodeKey.String(val) } -// Enum values for rpc.message.type -var ( - // sent - // Stability: development - RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") - // received - // Stability: development - RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") -) - // Enum values for rpc.system.name var ( // [gRPC] - // Stability: development + // Stability: release_candidate // // [gRPC]: https://grpc.io/ RPCSystemNameGRPC = RPCSystemNameKey.String("grpc") // [Apache Dubbo] - // Stability: development + // Stability: release_candidate // // [Apache Dubbo]: https://dubbo.apache.org/ RPCSystemNameDubbo = RPCSystemNameKey.String("dubbo") @@ -13674,13 +14235,28 @@ func ServerPort(val int) attribute.KeyValue { // Namespace: service const ( + // ServiceCriticalityKey is the attribute Key conforming to the + // "service.criticality" semantic conventions. It represents the operational + // criticality of the service. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "critical", "high", "medium", "low" + // Note: Application developers are encouraged to set `service.criticality` to + // express the operational importance of their services. Telemetry consumers MAY + // use this attribute to optimize telemetry collection or improve user + // experience. + ServiceCriticalityKey = attribute.Key("service.criticality") + // ServiceInstanceIDKey is the attribute Key conforming to the // "service.instance.id" semantic conventions. It represents the string ID of // the service instance. // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Stable // // Examples: "627cc493-f310-47de-96bd-71410b7dec09" // Note: MUST be unique for each instance of the same @@ -13754,7 +14330,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: Development + // Stability: Stable // // Examples: "Shop" // Note: A string value having a meaning that helps to distinguish a group of @@ -13856,6 +14432,29 @@ func ServiceVersion(val string) attribute.KeyValue { return ServiceVersionKey.String(val) } +// Enum values for service.criticality +var ( + // Service is business-critical; downtime directly impacts revenue, user + // experience, or core functionality. + // + // Stability: development + ServiceCriticalityCritical = ServiceCriticalityKey.String("critical") + // Service is important but has degradation tolerance or fallback mechanisms. + // + // Stability: development + ServiceCriticalityHigh = ServiceCriticalityKey.String("high") + // Service provides supplementary functionality; degradation has limited user + // impact. + // + // Stability: development + ServiceCriticalityMedium = ServiceCriticalityKey.String("medium") + // Service is non-essential to core operations; used for background tasks or + // internal tools. + // + // Stability: development + ServiceCriticalityLow = ServiceCriticalityKey.String("low") +) + // Namespace: session const ( // SessionIDKey is the attribute Key conforming to the "session.id" semantic @@ -15175,6 +15774,18 @@ const ( // // This list is subject to change over time. // + // Matching of query parameter keys against the sensitive list SHOULD be + // case-sensitive. + // + // + // Instrumentation MAY provide a way to override this list via declarative + // configuration. + // If so, it SHOULD use the `sensitive_query_parameters` property + // (an array of case-sensitive strings with minimum items 0) under + // `.instrumentation/development.general.sanitization.url`. + // This list is a full override of the default sensitive query parameter keys, + // it is not a list of keys in addition to the defaults. + // // When a query string value is redacted, the query string key SHOULD still be // preserved, e.g. // `https://www.example.com/path?color=blue&sig=REDACTED`. @@ -15250,6 +15861,17 @@ const ( // // This list is subject to change over time. // + // Matching of query parameter keys against the sensitive list SHOULD be + // case-sensitive. + // + // Instrumentation MAY provide a way to override this list via declarative + // configuration. + // If so, it SHOULD use the `sensitive_query_parameters` property + // (an array of case-sensitive strings with minimum items 0) under + // `.instrumentation/development.general.sanitization.url`. + // This list is a full override of the default sensitive query parameter keys, + // it is not a list of keys in addition to the defaults. + // // When a query string value is redacted, the query string key SHOULD still be // preserved, e.g. // `q=OpenTelemetry&sig=REDACTED`. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/doc.go similarity index 80% rename from vendor/go.opentelemetry.io/otel/semconv/v1.39.0/doc.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.40.0/doc.go index 852362ef770..c5c41e4d276 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/doc.go @@ -4,6 +4,6 @@ // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.39.0 +// patterns for OpenTelemetry things. This package represents the v1.40.0 // version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/error_type.go similarity index 62% rename from vendor/go.opentelemetry.io/otel/semconv/v1.39.0/error_type.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.40.0/error_type.go index 84cf636a727..6d26e52821b 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/error_type.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/error_type.go @@ -1,9 +1,10 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0" import ( + "errors" "reflect" "go.opentelemetry.io/otel/attribute" @@ -14,12 +15,14 @@ import ( // If err is nil, the returned attribute has the default value // [ErrorTypeOther]. // -// If err's type has the method +// If err or one of the errors in its chain has the method // // ErrorType() string // -// then the returned attribute has the value of err.ErrorType(). Otherwise, the -// returned attribute has a value derived from the concrete type of err. +// the returned attribute has that method's return value. If multiple errors in +// the chain implement this method, the value from the first match found by +// [errors.As] is used. Otherwise, the returned attribute has a value derived +// from the concrete type of err. // // The key of the returned attribute is [ErrorTypeKey]. func ErrorType(err error) attribute.KeyValue { @@ -33,8 +36,15 @@ func ErrorType(err error) attribute.KeyValue { func errorType(err error) string { var s string if et, ok := err.(interface{ ErrorType() string }); ok { - // Prioritize the ErrorType method if available. + // Fast path: check the top-level error first. s = et.ErrorType() + } else { + // Fallback: search the error chain for an ErrorType method. + var et interface{ ErrorType() string } + if errors.As(err, &et) { + // Prioritize the ErrorType method if available. + s = et.ErrorType() + } } if s == "" { // Fallback to reflection if the ErrorType method is not supported or diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/exception.go similarity index 74% rename from vendor/go.opentelemetry.io/otel/semconv/v1.39.0/exception.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.40.0/exception.go index 7b688ecc33d..6a26231a1af 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/exception.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0" const ( // ExceptionEventName is the name of the Span event representing an exception. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/otelconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/otelconv/metric.go similarity index 100% rename from vendor/go.opentelemetry.io/otel/semconv/v1.39.0/otelconv/metric.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.40.0/otelconv/metric.go diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/schema.go similarity index 71% rename from vendor/go.opentelemetry.io/otel/semconv/v1.39.0/schema.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.40.0/schema.go index e1a199d89bf..a07ffa3361e 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/schema.go @@ -1,9 +1,9 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.39.0" +const SchemaURL = "https://opentelemetry.io/schemas/1.40.0" diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go index 604fdab446c..9316fd0ac45 100644 --- a/vendor/go.opentelemetry.io/otel/trace/auto.go +++ b/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/internal/telemetry" ) diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index ee6f4bcb2aa..e3d103c4b61 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -12,6 +12,11 @@ const ( // with the sampling bit set means the span is sampled. FlagsSampled = TraceFlags(0x01) + // FlagsRandom is a bitmask with the random trace ID flag set. When + // set, it signals that the trace ID was generated randomly with at + // least 56 bits of randomness (W3C Trace Context Level 2). + FlagsRandom = TraceFlags(0x02) + errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" @@ -191,6 +196,20 @@ func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // return tf &^ FlagsSampled } +// IsRandom reports whether the random bit is set in the TraceFlags. +func (tf TraceFlags) IsRandom() bool { + return tf&FlagsRandom == FlagsRandom +} + +// WithRandom sets the random bit in a new copy of the TraceFlags. +func (tf TraceFlags) WithRandom(random bool) TraceFlags { // nolint:revive // random is not a control flag. + if random { + return tf | FlagsRandom + } + + return tf &^ FlagsRandom +} + // MarshalJSON implements a custom marshal function to encode TraceFlags // as a hex string. func (tf TraceFlags) MarshalJSON() ([]byte, error) { @@ -317,6 +336,11 @@ func (sc SpanContext) IsSampled() bool { return sc.traceFlags.IsSampled() } +// IsRandom reports whether the random bit is set in the SpanContext's TraceFlags. +func (sc SpanContext) IsRandom() bool { + return sc.traceFlags.IsRandom() +} + // WithTraceFlags returns a new SpanContext with the TraceFlags replaced. func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { return SpanContext{ diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index 073adae2faa..e9cb3fd4d1f 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -61,7 +61,10 @@ func checkValue(val string) bool { func checkKeyRemain(key string) bool { // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) for _, v := range key { - if isAlphaNum(byte(v)) { + if v > 127 { + return false + } + if isAlphaNumASCII(v) { continue } switch v { @@ -89,7 +92,7 @@ func checkKeyPart(key string, n int) bool { return ret && checkKeyRemain(key[1:]) } -func isAlphaNum(c byte) bool { +func isAlphaNumASCII[T rune | byte](c T) bool { if c >= 'a' && c <= 'z' { return true } @@ -105,7 +108,7 @@ func checkKeyTenant(key string, n int) bool { if key == "" { return false } - return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) + return isAlphaNumASCII(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) } // based on the W3C Trace Context specification diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 7c8f50803f7..1db4f47e435 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.40.0" + return "1.43.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 9daa2df9db0..bcc6ee78a48 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.40.0 + version: v1.43.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -22,11 +22,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.62.0 + version: v0.65.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.16.0 + version: v0.19.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/log/logtest @@ -36,7 +36,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.14 + version: v0.0.16 modules: - go.opentelemetry.io/otel/schema excluded-modules: @@ -64,3 +64,6 @@ modules: go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp: version-refs: - ./internal/version.go + go.opentelemetry.io/otel/exporters/stdout/stdoutlog: + version-refs: + - ./internal/version.go diff --git a/vendor/golang.org/x/sys/cpu/asm_darwin_arm64_gc.s b/vendor/golang.org/x/sys/cpu/asm_darwin_arm64_gc.s new file mode 100644 index 00000000000..e07fa75eb58 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_darwin_arm64_gc.s @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && arm64 && gc + +#include "textflag.h" + +TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctlbyname(SB) +GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index af2aa99f9f0..5fc09e2935d 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -44,14 +44,11 @@ func initOptions() { } func archInit() { - switch runtime.GOOS { - case "freebsd": + if runtime.GOOS == "freebsd" { readARM64Registers() - case "linux", "netbsd", "openbsd": + } else { + // Most platforms don't seem to allow directly reading these registers. doinit() - default: - // Many platforms don't seem to allow reading these registers. - setMinimalFeatures() } } diff --git a/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64.go new file mode 100644 index 00000000000..0b470744a0b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64.go @@ -0,0 +1,67 @@ +// Copyright 2026 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && arm64 && gc + +package cpu + +func doinit() { + setMinimalFeatures() + + // The feature flags are explained in [Instruction Set Detection]. + // There are some differences between MacOS versions: + // + // MacOS 11 and 12 do not have "hw.optional" sysctl values for some of the features. + // + // MacOS 13 changed some of the naming conventions to align with ARM Architecture Reference Manual. + // For example "hw.optional.armv8_2_sha512" became "hw.optional.arm.FEAT_SHA512". + // It currently checks both to stay compatible with MacOS 11 and 12. + // The old names also work with MacOS 13, however it's not clear whether + // they will continue working with future OS releases. + // + // Once MacOS 12 is no longer supported the old names can be removed. + // + // [Instruction Set Detection]: https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_instruction_set_characteristics + + // Encryption, hashing and checksum capabilities + + // For the following flags there are no MacOS 11 sysctl flags. + ARM64.HasAES = true || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_AES\x00")) + ARM64.HasPMULL = true || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_PMULL\x00")) + ARM64.HasSHA1 = true || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SHA1\x00")) + ARM64.HasSHA2 = true || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SHA256\x00")) + + ARM64.HasSHA3 = darwinSysctlEnabled([]byte("hw.optional.armv8_2_sha3\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SHA3\x00")) + ARM64.HasSHA512 = darwinSysctlEnabled([]byte("hw.optional.armv8_2_sha512\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SHA512\x00")) + + ARM64.HasCRC32 = darwinSysctlEnabled([]byte("hw.optional.armv8_crc32\x00")) + + // Atomic and memory ordering + ARM64.HasATOMICS = darwinSysctlEnabled([]byte("hw.optional.armv8_1_atomics\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_LSE\x00")) + ARM64.HasLRCPC = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_LRCPC\x00")) + + // SIMD and floating point capabilities + ARM64.HasFPHP = darwinSysctlEnabled([]byte("hw.optional.neon_fp16\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_FP16\x00")) + ARM64.HasASIMDHP = darwinSysctlEnabled([]byte("hw.optional.neon_hpfp\x00")) || darwinSysctlEnabled([]byte("hw.optional.AdvSIMD_HPFPCvt\x00")) + ARM64.HasASIMDRDM = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_RDM\x00")) + ARM64.HasASIMDDP = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_DotProd\x00")) + ARM64.HasASIMDFHM = darwinSysctlEnabled([]byte("hw.optional.armv8_2_fhm\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_FHM\x00")) + ARM64.HasI8MM = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_I8MM\x00")) + + ARM64.HasJSCVT = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_JSCVT\x00")) + ARM64.HasFCMA = darwinSysctlEnabled([]byte("hw.optional.armv8_3_compnum\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_FCMA\x00")) + + // Miscellaneous + ARM64.HasDCPOP = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_DPB\x00")) + ARM64.HasEVTSTRM = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_ECV\x00")) + ARM64.HasDIT = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_DIT\x00")) + + // Not supported, but added for completeness + ARM64.HasCPUID = false + + ARM64.HasSM3 = false // darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SM3\x00")) + ARM64.HasSM4 = false // darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SM4\x00")) + ARM64.HasSVE = false // darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SVE\x00")) + ARM64.HasSVE2 = false // darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SVE2\x00")) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64_other.go b/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64_other.go new file mode 100644 index 00000000000..4ee68e38d9b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64_other.go @@ -0,0 +1,29 @@ +// Copyright 2026 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && arm64 && !gc + +package cpu + +func doinit() { + setMinimalFeatures() + + ARM64.HasASIMD = true + ARM64.HasFP = true + + // Go already assumes these to be available because they were on the M1 + // and these are supported on all Apple arm64 chips. + ARM64.HasAES = true + ARM64.HasPMULL = true + ARM64.HasSHA1 = true + ARM64.HasSHA2 = true + + if runtime.GOOS != "ios" { + // Apple A7 processors do not support these, however + // M-series SoCs are at least armv8.4-a + ARM64.HasCRC32 = true // armv8.1 + ARM64.HasATOMICS = true // armv8.2 + ARM64.HasJSCVT = true // armv8.3, if HasFP + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go index 7f1946780bd..05913081ec6 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -9,3 +9,4 @@ package cpu func getisar0() uint64 { return 0 } func getisar1() uint64 { return 0 } func getpfr0() uint64 { return 0 } +func getzfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go index 5341e7f88d7..6c7c5bfd533 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -2,8 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !linux && !netbsd && !openbsd && arm64 +//go:build !darwin && !linux && !netbsd && !openbsd && !windows && arm64 package cpu -func doinit() {} +func doinit() { + setMinimalFeatures() +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_windows_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_windows_arm64.go new file mode 100644 index 00000000000..d09e85a3619 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_windows_arm64.go @@ -0,0 +1,42 @@ +// Copyright 2026 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "golang.org/x/sys/windows" +) + +func doinit() { + // set HasASIMD and HasFP to true as per + // https://learn.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=msvc-170#base-requirements + // + // The ARM64 version of Windows always presupposes that it's running on an ARMv8 or later architecture. + // Both floating-point and NEON support are presumed to be present in hardware. + // + ARM64.HasASIMD = true + ARM64.HasFP = true + + if windows.IsProcessorFeaturePresent(windows.PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) { + ARM64.HasAES = true + ARM64.HasPMULL = true + ARM64.HasSHA1 = true + ARM64.HasSHA2 = true + } + ARM64.HasSHA3 = windows.IsProcessorFeaturePresent(windows.PF_ARM_SHA3_INSTRUCTIONS_AVAILABLE) + ARM64.HasCRC32 = windows.IsProcessorFeaturePresent(windows.PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE) + ARM64.HasSHA512 = windows.IsProcessorFeaturePresent(windows.PF_ARM_SHA512_INSTRUCTIONS_AVAILABLE) + ARM64.HasATOMICS = windows.IsProcessorFeaturePresent(windows.PF_ARM_V81_ATOMIC_INSTRUCTIONS_AVAILABLE) + if windows.IsProcessorFeaturePresent(windows.PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE) { + ARM64.HasASIMDDP = true + ARM64.HasASIMDRDM = true + } + if windows.IsProcessorFeaturePresent(windows.PF_ARM_V83_LRCPC_INSTRUCTIONS_AVAILABLE) { + ARM64.HasLRCPC = true + ARM64.HasSM3 = true + } + ARM64.HasSVE = windows.IsProcessorFeaturePresent(windows.PF_ARM_SVE_INSTRUCTIONS_AVAILABLE) + ARM64.HasSVE2 = windows.IsProcessorFeaturePresent(windows.PF_ARM_SVE2_INSTRUCTIONS_AVAILABLE) + ARM64.HasJSCVT = windows.IsProcessorFeaturePresent(windows.PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE) +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_darwin_arm64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_darwin_arm64_gc.go new file mode 100644 index 00000000000..7b4e67ff9c9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_darwin_arm64_gc.go @@ -0,0 +1,54 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy from internal/cpu and runtime to make sysctl calls. + +//go:build darwin && arm64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +type Errno = syscall.Errno + +// adapted from internal/cpu/cpu_arm64_darwin.go +func darwinSysctlEnabled(name []byte) bool { + out := int32(0) + nout := unsafe.Sizeof(out) + if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil { + return false + } + return out > 0 +} + +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +var libc_sysctlbyname_trampoline_addr uintptr + +// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix +func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + if _, _, err := syscall_syscall6( + libc_sysctlbyname_trampoline_addr, + uintptr(unsafe.Pointer(name)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + 0, + ); err != 0 { + return err + } + + return nil +} + +//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 diff --git a/vendor/golang.org/x/sys/unix/ioctl_signed.go b/vendor/golang.org/x/sys/unix/ioctl_signed.go index 5b0759bd865..be0f3fba65e 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_signed.go +++ b/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -6,9 +6,7 @@ package unix -import ( - "unsafe" -) +import "unsafe" // ioctl itself should not be exposed directly, but additional get/set // functions for specific types are permissible. @@ -28,6 +26,13 @@ func IoctlSetPointerInt(fd int, req int, value int) error { return ioctlPtr(fd, req, unsafe.Pointer(&v)) } +// IoctlSetString performs an ioctl operation which sets a string value +// on fd, using the specified request number. +func IoctlSetString(fd int, req int, value string) error { + bs := append([]byte(value), 0) + return ioctlPtr(fd, req, unsafe.Pointer(&bs[0])) +} + // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // // To change fd's window size, the req argument should be TIOCSWINSZ. diff --git a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go index 20f470b9d09..f0c282136db 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go +++ b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -6,9 +6,7 @@ package unix -import ( - "unsafe" -) +import "unsafe" // ioctl itself should not be exposed directly, but additional get/set // functions for specific types are permissible. @@ -28,6 +26,13 @@ func IoctlSetPointerInt(fd int, req uint, value int) error { return ioctlPtr(fd, req, unsafe.Pointer(&v)) } +// IoctlSetString performs an ioctl operation which sets a string value +// on fd, using the specified request number. +func IoctlSetString(fd int, req uint, value string) error { + bs := append([]byte(value), 0) + return ioctlPtr(fd, req, unsafe.Pointer(&bs[0])) +} + // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // // To change fd's window size, the req argument should be TIOCSWINSZ. diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 18a3d9bdabc..a6a2ea0cc05 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -1052,14 +1052,6 @@ func IoctlSetIntRetInt(fd int, req int, arg int) (int, error) { return ioctlRet(fd, req, uintptr(arg)) } -func IoctlSetString(fd int, req int, val string) error { - bs := make([]byte, len(val)+1) - copy(bs[:len(bs)-1], val) - err := ioctlPtr(fd, req, unsafe.Pointer(&bs[0])) - runtime.KeepAlive(&bs[0]) - return err -} - // Lifreq Helpers func (l *Lifreq) SetName(name string) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 4e92e5aa406..de6fccf9aa1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -367,7 +367,9 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from iov[0].SetLen(len(p)) } var rsa RawSockaddrAny - n, oobn, recvflags, err = recvmsgRaw(fd, iov[:], oob, flags, &rsa) + if n, oobn, recvflags, err = recvmsgRaw(fd, iov[:], oob, flags, &rsa); err != nil { + return + } // source address is only specified if the socket is unconnected if rsa.Addr.Family != AF_UNSPEC { from, err = anyToSockaddr(fd, &rsa) @@ -389,8 +391,10 @@ func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn in } } var rsa RawSockaddrAny - n, oobn, recvflags, err = recvmsgRaw(fd, iov, oob, flags, &rsa) - if err == nil && rsa.Addr.Family != AF_UNSPEC { + if n, oobn, recvflags, err = recvmsgRaw(fd, iov, oob, flags, &rsa); err != nil { + return + } + if rsa.Addr.Family != AF_UNSPEC { from, err = anyToSockaddr(fd, &rsa) } return diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index c1a46701719..45476a73c61 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -593,110 +593,115 @@ const ( ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFAL_LABEL = 0x2 - IFAL_ADDRESS = 0x1 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfAddrlblmsg = 0xc - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFAL_LABEL = 0x2 + IFAL_ADDRESS = 0x1 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + PREFIX_UNSPEC = 0x0 + PREFIX_ADDRESS = 0x1 + PREFIX_CACHEINFO = 0x2 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofPrefixmsg = 0xc + SizeofPrefixCacheinfo = 0x8 + SizeofIfAddrmsg = 0x8 + SizeofIfAddrlblmsg = 0xc + SizeofIfaCacheinfo = 0x10 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -735,6 +740,22 @@ type IfInfomsg struct { Change uint32 } +type Prefixmsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + Type uint8 + Len uint8 + Flags uint8 + Pad3 uint8 +} + +type PrefixCacheinfo struct { + Preferred_time uint32 + Valid_time uint32 +} + type IfAddrmsg struct { Family uint8 Prefixlen uint8 diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go index 16f90560a23..96317966e52 100644 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -8,5 +8,6 @@ package windows import "syscall" +type Signal = syscall.Signal type Errno = syscall.Errno type SysProcAttr = syscall.SysProcAttr diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go index 39aeeb644f5..7cc6ff3afa0 100644 --- a/vendor/golang.org/x/sys/windows/registry/key.go +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -198,7 +198,20 @@ type KeyInfo struct { // ModTime returns the key's last write time. func (ki *KeyInfo) ModTime() time.Time { - return time.Unix(0, ki.lastWriteTime.Nanoseconds()) + lastHigh, lastLow := ki.lastWriteTime.HighDateTime, ki.lastWriteTime.LowDateTime + // 100-nanosecond intervals since January 1, 1601 + hsec := uint64(lastHigh)<<32 + uint64(lastLow) + // Convert _before_ gauging; the nanosecond difference between Epoch (00:00:00 + // UTC, January 1, 1970) and Filetime's zero offset (January 1, 1601) is out + // of bounds for int64: -11644473600*1e7*1e2 < math.MinInt64 + sec := int64(hsec/1e7) - 11644473600 + nsec := int64(hsec%1e7) * 100 + return time.Unix(sec, nsec) +} + +// modTimeZero reports whether the key's last write time is zero. +func (ki *KeyInfo) modTimeZero() bool { + return ki.lastWriteTime.LowDateTime == 0 && ki.lastWriteTime.HighDateTime == 0 } // Stat retrieves information about the open key k. diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 69439df2a46..d766436587f 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -900,6 +900,7 @@ const socket_error = uintptr(^uint32(0)) //sys NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyRouteChange2 //sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange //sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 +//sys IsProcessorFeaturePresent(ProcessorFeature uint32) (ret bool) = kernel32.IsProcessorFeaturePresent // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -1489,20 +1490,6 @@ func Getgid() (gid int) { return -1 } func Getegid() (egid int) { return -1 } func Getgroups() (gids []int, err error) { return nil, syscall.EWINDOWS } -type Signal int - -func (s Signal) Signal() {} - -func (s Signal) String() string { - if 0 <= s && int(s) < len(signals) { - str := signals[s] - if str != "" { - return str - } - } - return "signal " + itoa(int(s)) -} - func LoadCreateSymbolicLink() error { return procCreateSymbolicLinkW.Find() } diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 6e4f50eb483..d5658a138cf 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -3938,3 +3938,88 @@ const ( MOUSE_EVENT = 0x0002 WINDOW_BUFFER_SIZE_EVENT = 0x0004 ) + +// The processor features to be tested for IsProcessorFeaturePresent, see +// https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-isprocessorfeaturepresent +const ( + PF_ARM_64BIT_LOADSTORE_ATOMIC = 25 + PF_ARM_DIVIDE_INSTRUCTION_AVAILABLE = 24 + PF_ARM_EXTERNAL_CACHE_AVAILABLE = 26 + PF_ARM_FMAC_INSTRUCTIONS_AVAILABLE = 27 + PF_ARM_VFP_32_REGISTERS_AVAILABLE = 18 + PF_3DNOW_INSTRUCTIONS_AVAILABLE = 7 + PF_CHANNELS_ENABLED = 16 + PF_COMPARE_EXCHANGE_DOUBLE = 2 + PF_COMPARE_EXCHANGE128 = 14 + PF_COMPARE64_EXCHANGE128 = 15 + PF_FASTFAIL_AVAILABLE = 23 + PF_FLOATING_POINT_EMULATED = 1 + PF_FLOATING_POINT_PRECISION_ERRATA = 0 + PF_MMX_INSTRUCTIONS_AVAILABLE = 3 + PF_NX_ENABLED = 12 + PF_PAE_ENABLED = 9 + PF_RDTSC_INSTRUCTION_AVAILABLE = 8 + PF_RDWRFSGSBASE_AVAILABLE = 22 + PF_SECOND_LEVEL_ADDRESS_TRANSLATION = 20 + PF_SSE3_INSTRUCTIONS_AVAILABLE = 13 + PF_SSSE3_INSTRUCTIONS_AVAILABLE = 36 + PF_SSE4_1_INSTRUCTIONS_AVAILABLE = 37 + PF_SSE4_2_INSTRUCTIONS_AVAILABLE = 38 + PF_AVX_INSTRUCTIONS_AVAILABLE = 39 + PF_AVX2_INSTRUCTIONS_AVAILABLE = 40 + PF_AVX512F_INSTRUCTIONS_AVAILABLE = 41 + PF_VIRT_FIRMWARE_ENABLED = 21 + PF_XMMI_INSTRUCTIONS_AVAILABLE = 6 + PF_XMMI64_INSTRUCTIONS_AVAILABLE = 10 + PF_XSAVE_ENABLED = 17 + PF_ARM_V8_INSTRUCTIONS_AVAILABLE = 29 + PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE = 30 + PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE = 31 + PF_ARM_V81_ATOMIC_INSTRUCTIONS_AVAILABLE = 34 + PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE = 43 + PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE = 44 + PF_ARM_V83_LRCPC_INSTRUCTIONS_AVAILABLE = 45 + PF_ARM_SVE_INSTRUCTIONS_AVAILABLE = 46 + PF_ARM_SVE2_INSTRUCTIONS_AVAILABLE = 47 + PF_ARM_SVE2_1_INSTRUCTIONS_AVAILABLE = 48 + PF_ARM_SVE_AES_INSTRUCTIONS_AVAILABLE = 49 + PF_ARM_SVE_PMULL128_INSTRUCTIONS_AVAILABLE = 50 + PF_ARM_SVE_BITPERM_INSTRUCTIONS_AVAILABLE = 51 + PF_ARM_SVE_BF16_INSTRUCTIONS_AVAILABLE = 52 + PF_ARM_SVE_EBF16_INSTRUCTIONS_AVAILABLE = 53 + PF_ARM_SVE_B16B16_INSTRUCTIONS_AVAILABLE = 54 + PF_ARM_SVE_SHA3_INSTRUCTIONS_AVAILABLE = 55 + PF_ARM_SVE_SM4_INSTRUCTIONS_AVAILABLE = 56 + PF_ARM_SVE_I8MM_INSTRUCTIONS_AVAILABLE = 57 + PF_ARM_SVE_F32MM_INSTRUCTIONS_AVAILABLE = 58 + PF_ARM_SVE_F64MM_INSTRUCTIONS_AVAILABLE = 59 + PF_BMI2_INSTRUCTIONS_AVAILABLE = 60 + PF_MOVDIR64B_INSTRUCTION_AVAILABLE = 61 + PF_ARM_LSE2_AVAILABLE = 62 + PF_ARM_SHA3_INSTRUCTIONS_AVAILABLE = 64 + PF_ARM_SHA512_INSTRUCTIONS_AVAILABLE = 65 + PF_ARM_V82_I8MM_INSTRUCTIONS_AVAILABLE = 66 + PF_ARM_V82_FP16_INSTRUCTIONS_AVAILABLE = 67 + PF_ARM_V86_BF16_INSTRUCTIONS_AVAILABLE = 68 + PF_ARM_V86_EBF16_INSTRUCTIONS_AVAILABLE = 69 + PF_ARM_SME_INSTRUCTIONS_AVAILABLE = 70 + PF_ARM_SME2_INSTRUCTIONS_AVAILABLE = 71 + PF_ARM_SME2_1_INSTRUCTIONS_AVAILABLE = 72 + PF_ARM_SME2_2_INSTRUCTIONS_AVAILABLE = 73 + PF_ARM_SME_AES_INSTRUCTIONS_AVAILABLE = 74 + PF_ARM_SME_SBITPERM_INSTRUCTIONS_AVAILABLE = 75 + PF_ARM_SME_SF8MM4_INSTRUCTIONS_AVAILABLE = 76 + PF_ARM_SME_SF8MM8_INSTRUCTIONS_AVAILABLE = 77 + PF_ARM_SME_SF8DP2_INSTRUCTIONS_AVAILABLE = 78 + PF_ARM_SME_SF8DP4_INSTRUCTIONS_AVAILABLE = 79 + PF_ARM_SME_SF8FMA_INSTRUCTIONS_AVAILABLE = 80 + PF_ARM_SME_F8F32_INSTRUCTIONS_AVAILABLE = 81 + PF_ARM_SME_F8F16_INSTRUCTIONS_AVAILABLE = 82 + PF_ARM_SME_F16F16_INSTRUCTIONS_AVAILABLE = 83 + PF_ARM_SME_B16B16_INSTRUCTIONS_AVAILABLE = 84 + PF_ARM_SME_F64F64_INSTRUCTIONS_AVAILABLE = 85 + PF_ARM_SME_I16I64_INSTRUCTIONS_AVAILABLE = 86 + PF_ARM_SME_LUTv2_INSTRUCTIONS_AVAILABLE = 87 + PF_ARM_SME_FA64_INSTRUCTIONS_AVAILABLE = 88 + PF_UMONITOR_INSTRUCTION_AVAILABLE = 89 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index f25b7308a1f..fe7a4ea1247 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -320,6 +320,7 @@ var ( procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") procInitializeProcThreadAttributeList = modkernel32.NewProc("InitializeProcThreadAttributeList") + procIsProcessorFeaturePresent = modkernel32.NewProc("IsProcessorFeaturePresent") procIsWow64Process = modkernel32.NewProc("IsWow64Process") procIsWow64Process2 = modkernel32.NewProc("IsWow64Process2") procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") @@ -2786,6 +2787,12 @@ func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrco return } +func IsProcessorFeaturePresent(ProcessorFeature uint32) (ret bool) { + r0, _, _ := syscall.SyscallN(procIsProcessorFeaturePresent.Addr(), uintptr(ProcessorFeature)) + ret = r0 != 0 + return +} + func IsWow64Process(handle Handle, isWow64 *bool) (err error) { var _p0 uint32 if *isWow64 { diff --git a/vendor/modules.txt b/vendor/modules.txt index 55933404c18..1f00426bfb2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1242,7 +1242,7 @@ github.com/thanos-io/promql-engine/ringbuffer github.com/thanos-io/promql-engine/storage github.com/thanos-io/promql-engine/storage/prometheus github.com/thanos-io/promql-engine/warnings -# github.com/thanos-io/thanos v0.40.1-0.20260204190131-802f43f3bc64 +# github.com/thanos-io/thanos v0.41.0 ## explicit; go 1.25.0 github.com/thanos-io/thanos/pkg/api/query/querypb github.com/thanos-io/thanos/pkg/block @@ -1492,8 +1492,8 @@ go.opentelemetry.io/contrib/propagators/jaeger # go.opentelemetry.io/contrib/propagators/ot v1.36.0 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/propagators/ot -# go.opentelemetry.io/otel v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/attribute/internal @@ -1501,6 +1501,7 @@ go.opentelemetry.io/otel/attribute/internal/xxhash go.opentelemetry.io/otel/baggage go.opentelemetry.io/otel/codes go.opentelemetry.io/otel/internal/baggage +go.opentelemetry.io/otel/internal/errorhandler go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.17.0 @@ -1510,8 +1511,8 @@ go.opentelemetry.io/otel/semconv/v1.30.0 go.opentelemetry.io/otel/semconv/v1.37.0 go.opentelemetry.io/otel/semconv/v1.37.0/httpconv go.opentelemetry.io/otel/semconv/v1.37.0/otelconv -go.opentelemetry.io/otel/semconv/v1.39.0 -go.opentelemetry.io/otel/semconv/v1.39.0/otelconv +go.opentelemetry.io/otel/semconv/v1.40.0 +go.opentelemetry.io/otel/semconv/v1.40.0/otelconv # go.opentelemetry.io/otel/bridge/opentracing v1.36.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/bridge/opentracing @@ -1540,13 +1541,13 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/observ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/x -# go.opentelemetry.io/otel/metric v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/metric v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/sdk v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/x @@ -1555,8 +1556,8 @@ go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/internal/env go.opentelemetry.io/otel/sdk/trace/internal/observ go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/sdk/metric v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/sdk/metric v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/sdk/metric go.opentelemetry.io/otel/sdk/metric/exemplar go.opentelemetry.io/otel/sdk/metric/internal @@ -1564,8 +1565,8 @@ go.opentelemetry.io/otel/sdk/metric/internal/aggregate go.opentelemetry.io/otel/sdk/metric/internal/observ go.opentelemetry.io/otel/sdk/metric/internal/reservoir go.opentelemetry.io/otel/sdk/metric/metricdata -# go.opentelemetry.io/otel/trace v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/trace v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry @@ -1675,8 +1676,8 @@ golang.org/x/oauth2/jwt golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.40.0 -## explicit; go 1.24.0 +# golang.org/x/sys v0.42.0 +## explicit; go 1.25.0 golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows @@ -1756,7 +1757,7 @@ google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/api/label google.golang.org/genproto/googleapis/api/metric google.golang.org/genproto/googleapis/api/monitoredres -# google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3 ## explicit; go 1.24.0 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails diff --git a/website/package-lock.json b/website/package-lock.json index 879b18eb5ae..d0d16d3742e 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -795,10 +795,11 @@ } }, "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.18.1.tgz", + "integrity": "sha512-dMInicTPVE8d1e5otfwmmjlxkZoUpiVLwyeTdUsi/Caj/gfzzblBcCE5sRHV/AsjuCmxWrte2TNGSYuCeCq+0Q==", + "dev": true, + "license": "MIT" }, "node_modules/log-symbols": { "version": "2.2.0", @@ -959,9 +960,9 @@ "license": "ISC" }, "node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz", + "integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==", "dev": true, "license": "MIT", "engines": {