diff --git a/.github/workflows/e2e-tests.yaml b/.github/workflows/e2e-tests.yaml
new file mode 100644
index 00000000..cfc09b59
--- /dev/null
+++ b/.github/workflows/e2e-tests.yaml
@@ -0,0 +1,76 @@
+name: E2E Tests
+
+on:
+ push:
+ branches:
+ - main
+ - master
+ pull_request:
+ branches:
+ - main
+ - master
+ workflow_dispatch: # Allow manual trigger
+
+jobs:
+ e2e-tests:
+ name: Run E2E Tests
+ runs-on: ubuntu-latest
+ timeout-minutes: 30
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: 'go.mod'
+ cache: true
+
+ - name: Install dependencies
+ run: |
+ # Install kubebuilder for CRD generation
+ curl -L -o kubebuilder https://go.kubebuilder.io/dl/latest/$(go env GOOS)/$(go env GOARCH)
+ chmod +x kubebuilder && sudo mv kubebuilder /usr/local/bin/
+
+ - name: Create kind cluster
+ uses: helm/kind-action@v1
+ with:
+ cluster_name: kind
+ config: scripts/kind-config-ci.yaml
+ wait: 300s
+
+ - name: Verify cluster
+ run: |
+ kubectl cluster-info
+ kubectl get nodes
+ kubectl get pods -A
+
+ - name: Build operator image
+ run: |
+ docker build -t example.com/vector-operator:v0.0.1 .
+
+ - name: Load image into kind
+ run: |
+ kind load docker-image example.com/vector-operator:v0.0.1 --name kind
+
+ - name: Run E2E tests
+ run: make test-e2e
+ env:
+ KUBECONFIG: /home/runner/.kube/config
+
+ - name: Upload test results
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: e2e-results-${{ github.run_number }}
+ path: test/e2e/results/
+ retention-days: 7
+
+ - name: Publish test results
+ if: always()
+ uses: EnricoMi/publish-unit-test-result-action@v2
+ with:
+ files: test/e2e/results/run-*/reports/junit-report.xml
+ check_name: E2E Test Results
+ comment_mode: off
diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml
new file mode 100644
index 00000000..b3841489
--- /dev/null
+++ b/.github/workflows/lint.yaml
@@ -0,0 +1,67 @@
+name: Lint
+
+on:
+ push:
+ branches: [main]
+ pull_request:
+ branches: [main]
+
+permissions:
+ contents: read
+
+jobs:
+ golangci-lint:
+ name: golangci-lint
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: go.mod
+ cache: true
+
+ - name: Run golangci-lint
+ uses: golangci/golangci-lint-action@v6
+ with:
+ version: v1.64
+ args: --timeout=5m
+
+ go-fmt:
+ name: go fmt
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: go.mod
+ cache: true
+
+ - name: Check formatting
+ run: |
+ if [ -n "$(gofmt -l .)" ]; then
+ echo "The following files are not formatted:"
+ gofmt -l .
+ exit 1
+ fi
+
+ go-vet:
+ name: go vet
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: go.mod
+ cache: true
+
+ - name: Run go vet
+ run: go vet ./...
diff --git a/.gitignore b/.gitignore
index 37afa32b..3e3a6cd5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,3 +28,6 @@ testbin/*
__debug_bin
vendor
+
+# E2E test results and artifacts
+test/e2e/results/
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 00000000..11c66482
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,25 @@
+run:
+ timeout: 5m
+ modules-download-mode: readonly
+
+linters:
+ enable:
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - unused
+ - errcheck
+ - gofmt
+ - goimports
+
+linters-settings:
+ goimports:
+ local-prefixes: github.com/kaasops/vector-operator
+
+issues:
+ exclude-rules:
+ # Exclude some linters from running on tests files
+ - path: _test\.go
+ linters:
+ - errcheck
diff --git a/Makefile b/Makefile
index c05a018a..833ccf93 100644
--- a/Makefile
+++ b/Makefile
@@ -64,10 +64,79 @@ vet: ## Run go vet against code.
test: manifests generate fmt vet envtest ## Run tests.
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out
-# Utilize Kind or modify the e2e tests to load the image locally, enabling compatibility with other vendors.
-.PHONY: test-e2e # Run the e2e tests against a Kind k8s instance that is spun up.
-test-e2e:
- go test ./test/e2e/ -v -ginkgo.v
+# E2E test configuration
+E2E_FAIL_FAST ?= false
+E2E_RUN_DESCRIPTION ?=
+E2E_LABEL_FILTER ?=
+NAMESPACE ?= vector
+
+.PHONY: test-e2e # Run e2e tests with comprehensive reporting (JUnit XML + JSON + logs + artifacts)
+test-e2e: ginkgo
+ @TIMESTAMP=$$(date +%Y-%m-%d-%H%M%S); \
+ RUN_DIR="test/e2e/results/run-$$TIMESTAMP"; \
+ echo "==> Running e2e tests..."; \
+ echo "==> Results will be saved to: $$RUN_DIR"; \
+ mkdir -p "$$RUN_DIR/reports"; \
+ export E2E_ARTIFACTS_DIR="$$RUN_DIR/artifacts"; \
+ export E2E_ARTIFACTS_ENABLED=true; \
+ export E2E_GIT_COMMIT=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
+ export E2E_GIT_BRANCH=$$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown"); \
+ export E2E_GIT_DIRTY=$$(git diff --quiet 2>/dev/null || echo "dirty"; git diff --cached --quiet 2>/dev/null || echo "staged"); \
+ export E2E_RUN_DESCRIPTION="$(E2E_RUN_DESCRIPTION)"; \
+ echo "==> Git info: commit=$$E2E_GIT_COMMIT branch=$$E2E_GIT_BRANCH dirty=$$E2E_GIT_DIRTY"; \
+ if [ -n "$$E2E_RUN_DESCRIPTION" ]; then \
+ echo "==> Run description: $$E2E_RUN_DESCRIPTION"; \
+ fi; \
+ GINKGO_FLAGS="-v -timeout=30m"; \
+ if [ "$(E2E_FAIL_FAST)" = "true" ]; then \
+ echo "==> Fail-fast mode enabled (stop on first failure)"; \
+ GINKGO_FLAGS="$$GINKGO_FLAGS --fail-fast"; \
+ fi; \
+ if [ -n "$(E2E_LABEL_FILTER)" ]; then \
+ echo "==> Label filter: $(E2E_LABEL_FILTER)"; \
+ GINKGO_FLAGS="$$GINKGO_FLAGS --label-filter=\"$(E2E_LABEL_FILTER)\""; \
+ fi; \
+ cd test/e2e && $(GINKGO) $$GINKGO_FLAGS \
+ --junit-report="../../$$RUN_DIR/reports/junit-report.xml" \
+ --json-report="../../$$RUN_DIR/reports/report.json" \
+ | tee "../../$$RUN_DIR/reports/test-output.log"; \
+ EXIT_CODE=$$?; \
+ echo ""; \
+ echo "==> Test run complete!"; \
+ echo "==> All results in one place: $$RUN_DIR"; \
+ echo " Reports:"; \
+ echo " - JUnit XML: $$RUN_DIR/reports/junit-report.xml"; \
+ echo " - JSON: $$RUN_DIR/reports/report.json"; \
+ echo " - Logs: $$RUN_DIR/reports/test-output.log"; \
+ if [ -d "$$RUN_DIR/artifacts" ] && [ "$$(find $$RUN_DIR/artifacts -mindepth 1 -maxdepth 1 2>/dev/null | wc -l)" -gt 1 ]; then \
+ echo " Artifacts: $$RUN_DIR/artifacts/ (collected for failed tests)"; \
+ else \
+ echo " Artifacts: None (all tests passed)"; \
+ fi; \
+ echo ""; \
+ echo "Quick commands:"; \
+ echo " View summary: cat $$RUN_DIR/artifacts/metadata.json 2>/dev/null || echo 'All tests passed'"; \
+ echo " View failures: grep -A 5 'FAILED' $$RUN_DIR/reports/test-output.log 2>/dev/null || echo 'No failures'"; \
+ exit $$EXIT_CODE
+
+.PHONY: test-report
+test-report: ## Generate interactive HTML report from e2e test results
+ @echo "==> Generating test report..."
+ @cd test/e2e/results && python3 ../scripts/generate_report.py
+ @echo "==> Report generated: test/e2e/results/test_results_report.html"
+
+.PHONY: deploy-helm-e2e
+deploy-helm-e2e: manifests ## Deploy operator using Helm for e2e tests (use IMG and NAMESPACE variables)
+ @echo "==> Installing CRDs..."
+ $(KUBECTL) apply -f config/crd/bases
+ @echo "==> Creating namespace $(NAMESPACE)..."
+ $(KUBECTL) create namespace $(NAMESPACE) || true
+ @echo "==> Deploying operator via Helm to namespace $(NAMESPACE)..."
+ helm upgrade --install vector-operator ./helm/charts/vector-operator \
+ --namespace $(NAMESPACE) \
+ --set image.repository=$$(echo $(IMG) | cut -d: -f1) \
+ --set image.tag=$$(echo $(IMG) | cut -d: -f2) \
+ --wait --timeout 5m
.PHONY: lint
lint: golangci-lint ## Run golangci-lint linter
@@ -160,12 +229,14 @@ KUSTOMIZE ?= $(LOCALBIN)/kustomize
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
ENVTEST ?= $(LOCALBIN)/setup-envtest
GOLANGCI_LINT = $(LOCALBIN)/golangci-lint
+GINKGO ?= $(LOCALBIN)/ginkgo
## Tool Versions
KUSTOMIZE_VERSION ?= v5.4.3
CONTROLLER_TOOLS_VERSION ?= v0.16.1
ENVTEST_VERSION ?= release-0.19
-GOLANGCI_LINT_VERSION ?= v1.59.1
+GOLANGCI_LINT_VERSION ?= v1.64.8
+GINKGO_VERSION ?= v2.20.2
.PHONY: kustomize
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
@@ -187,6 +258,11 @@ golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
$(GOLANGCI_LINT): $(LOCALBIN)
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION))
+.PHONY: ginkgo
+ginkgo: $(GINKGO) ## Download ginkgo locally if necessary.
+$(GINKGO): $(LOCALBIN)
+ $(call go-install-tool,$(GINKGO),github.com/onsi/ginkgo/v2/ginkgo,$(GINKGO_VERSION))
+
# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist
# $1 - target path with name of binary
# $2 - package url which can be installed
diff --git a/api/v1alpha1/clustervectorpipeline.go b/api/v1alpha1/clustervectorpipeline.go
index da07cbe8..02a40016 100644
--- a/api/v1alpha1/clustervectorpipeline.go
+++ b/api/v1alpha1/clustervectorpipeline.go
@@ -2,10 +2,12 @@ package v1alpha1
import (
"context"
+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "github.com/kaasops/vector-operator/internal/utils/k8s"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
)
func (vp *ClusterVectorPipeline) GetSpec() VectorPipelineSpec {
diff --git a/api/v1alpha1/vectorpipeline.go b/api/v1alpha1/vectorpipeline.go
index 5fa963c0..a21b295c 100644
--- a/api/v1alpha1/vectorpipeline.go
+++ b/api/v1alpha1/vectorpipeline.go
@@ -2,10 +2,12 @@ package v1alpha1
import (
"context"
+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "github.com/kaasops/vector-operator/internal/utils/k8s"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
)
type VectorPipelineRole string
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index 88f3346a..5b439271 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -21,7 +21,7 @@ limitations under the License.
package v1alpha1
import (
- "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)
diff --git a/cmd/event_collector/main.go b/cmd/event_collector/main.go
index ae2f07f0..8d986c0c 100644
--- a/cmd/event_collector/main.go
+++ b/cmd/event_collector/main.go
@@ -5,20 +5,22 @@ import (
"errors"
"flag"
"fmt"
- "github.com/fsnotify/fsnotify"
- "github.com/kaasops/vector-operator/internal/buildinfo"
- "github.com/kaasops/vector-operator/internal/evcollector"
- "github.com/prometheus/client_golang/prometheus/promhttp"
- "github.com/spf13/viper"
- "k8s.io/client-go/kubernetes"
"log/slog"
"net"
"net/http"
"os"
"os/signal"
- ctrl "sigs.k8s.io/controller-runtime"
"strings"
"syscall"
+
+ "github.com/fsnotify/fsnotify"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ "github.com/spf13/viper"
+ "k8s.io/client-go/kubernetes"
+ ctrl "sigs.k8s.io/controller-runtime"
+
+ "github.com/kaasops/vector-operator/internal/buildinfo"
+ "github.com/kaasops/vector-operator/internal/evcollector"
)
func main() {
@@ -112,7 +114,7 @@ func main() {
http.Handle("/metrics", promhttp.Handler())
go func() {
- if err = http.ListenAndServe(net.JoinHostPort("", *port), nil); err != nil && !errors.Is(http.ErrServerClosed, err) {
+ if err = http.ListenAndServe(net.JoinHostPort("", *port), nil); err != nil && !errors.Is(err, http.ErrServerClosed) {
log.Error("failed to start http server", "error", err)
os.Exit(1)
}
diff --git a/cmd/evgen/main.go b/cmd/evgen/main.go
index 21c50a48..41c0ef94 100644
--- a/cmd/evgen/main.go
+++ b/cmd/evgen/main.go
@@ -4,11 +4,12 @@ import (
"context"
"flag"
"fmt"
- "k8s.io/apimachinery/pkg/util/rand"
- ctrl "sigs.k8s.io/controller-runtime"
"sync"
"time"
+ "k8s.io/apimachinery/pkg/util/rand"
+ ctrl "sigs.k8s.io/controller-runtime"
+
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
@@ -34,8 +35,8 @@ func main() {
wg := sync.WaitGroup{}
for i := 0; i < *workers; i++ {
+ wg.Add(1)
go func() {
- wg.Add(1)
defer wg.Done()
clientset, err := kubernetes.NewForConfig(config)
diff --git a/cmd/manager/main.go b/cmd/manager/main.go
index 2f8ea057..61be4773 100644
--- a/cmd/manager/main.go
+++ b/cmd/manager/main.go
@@ -21,10 +21,11 @@ import (
"crypto/tls"
"flag"
"fmt"
- "github.com/kaasops/vector-operator/internal/buildinfo"
"os"
"time"
+ "github.com/kaasops/vector-operator/internal/buildinfo"
+
monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
diff --git a/docs/ci-cd.md b/docs/ci-cd.md
new file mode 100644
index 00000000..5a4e3dbb
--- /dev/null
+++ b/docs/ci-cd.md
@@ -0,0 +1,174 @@
+# CI/CD Documentation
+
+## GitHub Actions Workflows
+
+### E2E Tests Workflow
+
+The E2E tests workflow automatically runs end-to-end tests for every pull request and push to the main branch.
+
+**Workflow File:** `.github/workflows/e2e-tests.yaml`
+
+#### Triggers
+
+- **Push to main/master**: Runs on every push to the main or master branch
+- **Pull Requests**: Runs on PRs targeting main/master
+- **Manual**: Can be triggered manually via GitHub Actions UI (workflow_dispatch)
+
+#### Workflow Steps
+
+1. **Checkout code**: Clones the repository
+2. **Set up Go**: Installs Go using version from `go.mod`
+3. **Install dependencies**: Installs kubebuilder for CRD generation
+4. **Create kind cluster**: Creates a single-node Kubernetes cluster using `scripts/kind-config-ci.yaml`
+5. **Verify cluster**: Checks cluster health and connectivity
+6. **Build image**: Builds operator Docker image
+7. **Load image**: Loads image into the kind cluster
+8. **Run E2E tests**: Executes `make test-e2e` with JUnit reporting
+9. **Upload test results**: Saves test results as artifacts (retained for 7 days)
+10. **Publish test results**: Publishes JUnit results as GitHub check
+
+#### Configuration
+
+**Kind Cluster (CI):** `scripts/kind-config-ci.yaml`
+- Single control-plane node
+- Control-plane allows scheduling workloads for faster execution
+- Port mappings for ingress (80, 443)
+
+#### Test Reports
+
+Test results are available in multiple formats:
+
+1. **JUnit XML**: `test/e2e/results/run-*/reports/junit-report.xml`
+ - Machine-readable format
+ - Used by GitHub Actions to display test results
+
+2. **JSON Report**: `test/e2e/results/run-*/reports/report.json`
+ - Detailed test execution data
+ - Suitable for programmatic analysis
+
+3. **Plain text log**: `test/e2e/results/run-*/reports/test-output.log`
+ - Human-readable test output
+ - Contains full test execution logs
+
+4. **HTML Report**: Generated via `make test-report`
+ - Interactive visualization
+ - Requires Python 3
+
+#### Artifacts
+
+**Test Results** (7 days retention):
+- JUnit XML report
+- JSON report
+- Plain text test output
+- Failure artifacts (pod logs, events, resource states)
+- Available for all workflow runs
+
+#### Viewing Results
+
+1. **GitHub UI**:
+ - Go to Actions tab → E2E Tests workflow
+ - Click on a specific run to view results
+
+2. **PR Checks**:
+ - Test results appear as a check on PRs
+ - Click "Details" to view full report
+
+#### Running E2E Tests Locally
+
+```bash
+# Run e2e tests with full reporting
+make test-e2e
+
+# Run with fail-fast (stop on first failure)
+make test-e2e E2E_FAIL_FAST=true
+
+# Run with label filter
+make test-e2e E2E_LABEL_FILTER="smoke"
+
+# Run with description
+make test-e2e E2E_RUN_DESCRIPTION="Testing new feature"
+
+# Generate HTML report from results
+make test-report
+```
+
+#### Troubleshooting
+
+**Tests fail in CI but pass locally:**
+- Check timing issues (CI may be slower)
+- Verify kind-config-ci.yaml configuration
+- Check resource limits in CI environment
+
+**Cluster creation timeout:**
+- Increase `wait` timeout in workflow
+- Check Docker daemon health in CI
+- Verify kind version compatibility
+
+**Image loading fails:**
+- Ensure Docker build succeeds
+- Check image names match between build and load steps
+- Verify kind cluster name is correct
+
+**Tests timeout:**
+- Default timeout is 30 minutes
+- Adjust `timeout-minutes` in workflow if needed
+- Check for hanging pods or resources
+
+#### Manual Trigger
+
+To manually trigger the E2E tests workflow:
+
+1. Go to Actions tab in GitHub
+2. Select "E2E Tests" workflow
+3. Click "Run workflow" button
+4. Select branch and click "Run workflow"
+
+#### Performance
+
+**Typical execution time:**
+- Cluster creation: ~1-2 minutes
+- Image build: ~2-3 minutes
+- Image load: ~30 seconds
+- E2E tests: ~10-15 minutes
+- **Total: ~15-20 minutes**
+
+### Lint Workflow
+
+**Workflow File:** `.github/workflows/lint.yaml`
+
+#### Jobs
+
+1. **golangci-lint**: Runs golangci-lint with project configuration
+2. **go fmt**: Checks code formatting
+3. **go vet**: Runs Go static analysis
+
+#### Configuration
+
+Linter configuration is defined in `.golangci.yml`:
+
+```yaml
+linters:
+ enable:
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - unused
+ - errcheck
+ - gofmt
+ - goimports
+
+linters-settings:
+ goimports:
+ local-prefixes: github.com/kaasops/vector-operator
+```
+
+#### Running Locally
+
+```bash
+# Run linter
+make lint
+
+# Run linter with auto-fix
+make lint-fix
+```
diff --git a/docs/specification.md b/docs/specification.md
index dea9e45c..5eb3000c 100644
--- a/docs/specification.md
+++ b/docs/specification.md
@@ -115,7 +115,7 @@
address
-
The network address to which the API should bind. If you’re running Vector in a Docker container, make sure to bind to 0.0.0.0. Otherwise the API will not be exposed outside the container. By default - 0.0.0.0:8686
+
The network address to which the API should bind. Uses dual-stack IPv6/IPv4 binding (::) by default, which accepts connections on both IPv4 and IPv6. By default - [::]:8686
enabled
diff --git a/go.mod b/go.mod
index 2e47d2db..c618e4a8 100644
--- a/go.mod
+++ b/go.mod
@@ -7,14 +7,14 @@ require (
github.com/fsnotify/fsnotify v1.7.0
github.com/go-logr/logr v1.4.2
github.com/mitchellh/mapstructure v1.5.0
- github.com/onsi/ginkgo/v2 v2.19.0
- github.com/onsi/gomega v1.33.1
+ github.com/onsi/ginkgo/v2 v2.20.2
+ github.com/onsi/gomega v1.34.1
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.60.1
github.com/prometheus/client_golang v1.19.1
github.com/spf13/viper v1.19.0
github.com/stoewer/go-strcase v1.2.0
github.com/stretchr/testify v1.9.0
- golang.org/x/sync v0.7.0
+ golang.org/x/sync v0.8.0
google.golang.org/grpc v1.65.0
google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v2 v2.4.0
@@ -58,7 +58,7 @@ require (
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
- github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af // indirect
+ github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
@@ -109,14 +109,14 @@ require (
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.26.0 // indirect
- golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect
- golang.org/x/net v0.26.0 // indirect
+ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
+ golang.org/x/net v0.28.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect
- golang.org/x/sys v0.21.0 // indirect
- golang.org/x/term v0.21.0 // indirect
- golang.org/x/text v0.16.0 // indirect
+ golang.org/x/sys v0.24.0 // indirect
+ golang.org/x/term v0.23.0 // indirect
+ golang.org/x/text v0.17.0 // indirect
golang.org/x/time v0.5.0 // indirect
- golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
+ golang.org/x/tools v0.24.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
diff --git a/go.sum b/go.sum
index f749aa74..c6e55c2d 100644
--- a/go.sum
+++ b/go.sum
@@ -191,8 +191,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
-github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
+github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA=
+github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -261,10 +261,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
-github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
-github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
-github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
+github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4=
+github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag=
+github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
+github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -412,8 +412,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY=
-golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -467,8 +467,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
-golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
+golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
+golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -487,8 +487,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
-golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -528,19 +528,19 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
-golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
+golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
-golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
+golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
+golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
-golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
+golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
+golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -588,8 +588,8 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
+golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/helm/charts/vector-operator/Chart.yaml b/helm/charts/vector-operator/Chart.yaml
index a560dc3d..beafa686 100644
--- a/helm/charts/vector-operator/Chart.yaml
+++ b/helm/charts/vector-operator/Chart.yaml
@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
-version: "0.7.1"
+version: "0.7.2"
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
-appVersion: "v0.3.2"
+appVersion: "v0.3.3"
home: https://github.com/kaasops/vector-operator
sources:
diff --git a/helm/index.yaml b/helm/index.yaml
index 35cb2a4c..75d50fd5 100644
--- a/helm/index.yaml
+++ b/helm/index.yaml
@@ -1,9 +1,22 @@
apiVersion: v1
entries:
vector-operator:
+ - apiVersion: v2
+ appVersion: v0.3.3
+ created: "2025-11-17T12:42:54.486235+02:00"
+ description: A Helm chart to install Vector Operator
+ digest: d1e04fd4039e06ce24d89feb9707a7f4a65f3fd4b2bec6f4f0d937b4c9775c4f
+ home: https://github.com/kaasops/vector-operator
+ name: vector-operator
+ sources:
+ - https://github.com/kaasops/vector-operator
+ type: application
+ urls:
+ - https://kaasops.github.io/vector-operator/helm/packages/vector-operator-0.7.2.tgz
+ version: 0.7.2
- apiVersion: v2
appVersion: v0.3.2
- created: "2025-10-03T11:33:29.251489168+03:00"
+ created: "2025-11-17T12:42:54.484496+02:00"
description: A Helm chart to install Vector Operator
digest: 94e6f3d7ad7f41a8edf03e72ffe2f2586f9d43d0762899025a274b1c2329088c
home: https://github.com/kaasops/vector-operator
@@ -16,7 +29,7 @@ entries:
version: 0.7.1
- apiVersion: v2
appVersion: v0.3.2
- created: "2025-10-03T11:33:29.253969117+03:00"
+ created: "2025-11-17T12:42:54.487982+02:00"
description: A Helm chart to install Vector Operator
digest: 67fbdd5181070c542bc7b52457ff15962d6b1dcefe495939f076703f71cd0bde
home: https://github.com/kaasops/vector-operator
@@ -29,7 +42,7 @@ entries:
version: "0.7"
- apiVersion: v2
appVersion: v0.3.0
- created: "2025-10-03T11:33:29.249246225+03:00"
+ created: "2025-11-17T12:42:54.482541+02:00"
description: A Helm chart to install Vector Operator
digest: 69262a286e22bfbf7571297f05d17dfc8f19e6215faa42f4dbdabea8a5610586
home: https://github.com/kaasops/vector-operator
@@ -42,7 +55,7 @@ entries:
version: "0.6"
- apiVersion: v2
appVersion: v0.2.0
- created: "2025-10-03T11:33:29.24645919+03:00"
+ created: "2025-11-17T12:42:54.480921+02:00"
description: A Helm chart to install Vector Operator
digest: f0e89cc2f3b641588e603107ba4aedc5f1ec585452c88bac46784226e56751e2
home: https://github.com/kaasops/vector-operator
@@ -55,7 +68,7 @@ entries:
version: "0.5"
- apiVersion: v2
appVersion: v0.1.2
- created: "2025-10-03T11:33:29.244146612+03:00"
+ created: "2025-11-17T12:42:54.479082+02:00"
description: A Helm chart to install Vector Operator
digest: e1fe0e96c146c7c275c181e727c8a60f21898cabe90629851a2920d2915f84b7
home: https://github.com/kaasops/vector-operator
@@ -68,7 +81,7 @@ entries:
version: "0.4"
- apiVersion: v2
appVersion: v0.1.1
- created: "2025-10-03T11:33:29.241579734+03:00"
+ created: "2025-11-17T12:42:54.477278+02:00"
description: A Helm chart to install Vector Operator
digest: a916c9e9f81bdbf9f734073fb453a6b67d7a724ed7ff4326d7884b136c103ce5
home: https://github.com/kaasops/vector-operator
@@ -81,7 +94,7 @@ entries:
version: "0.3"
- apiVersion: v2
appVersion: v0.1.1
- created: "2025-10-03T11:33:29.23920403+03:00"
+ created: "2025-11-17T12:42:54.475585+02:00"
description: A Helm chart to install Vector Operator
digest: 582d95c6f63134f6cd815bcb85adce5770e179de21a979ec76f91ab7d8531b45
home: https://github.com/kaasops/vector-operator
@@ -94,7 +107,7 @@ entries:
version: "0.2"
- apiVersion: v2
appVersion: v0.1.1
- created: "2025-10-03T11:33:29.237032712+03:00"
+ created: "2025-11-17T12:42:54.473719+02:00"
description: A Helm chart to install Vector Operator
digest: 3ac5a422f3f1861528f737a0fea077cad5f7b7516db3fe5d392c887d5d3459d5
home: https://github.com/kaasops/vector-operator
@@ -107,7 +120,7 @@ entries:
version: 0.1.1
- apiVersion: v2
appVersion: v0.1.0
- created: "2025-10-03T11:33:29.233939161+03:00"
+ created: "2025-11-17T12:42:54.471729+02:00"
description: A Helm chart to install Vector Operator
digest: 191ec4f83f11541df19680ce220992c84fb10210f0d54a5acc29361ccfd787bb
home: https://github.com/kaasops/vector-operator
@@ -120,7 +133,7 @@ entries:
version: 0.1.0
- apiVersion: v2
appVersion: pre-v0.1.0-r1
- created: "2025-10-03T11:33:29.231243725+03:00"
+ created: "2025-11-17T12:42:54.469521+02:00"
description: A Helm chart to install Vector Operator
digest: 01bd2e347c5782127511a0a0fbbab72508cbe667664f2c9b615cabb80a4c40c7
home: https://github.com/kaasops/vector-operator
@@ -133,7 +146,7 @@ entries:
version: 0.1.0-rc1
- apiVersion: v2
appVersion: v0.0.40
- created: "2025-10-03T11:33:29.226307689+03:00"
+ created: "2025-11-17T12:42:54.46547+02:00"
description: A Helm chart to install Vector Operator
digest: c50e673e811b8d4c03ad45c92ce23b9bc54eef4665091e70fab9a4b7e4c6c3f1
home: https://github.com/kaasops/vector-operator
@@ -146,7 +159,7 @@ entries:
version: 0.0.40
- apiVersion: v2
appVersion: v0.0.39
- created: "2025-10-03T11:33:29.225419535+03:00"
+ created: "2025-11-17T12:42:54.464527+02:00"
description: A Helm chart to install Vector Operator
digest: e1de38c869bf896bfb9f5f615c329d3ccce3bd91cb64bb6fa1c783a40ea290a2
home: https://github.com/kaasops/vector-operator
@@ -159,7 +172,7 @@ entries:
version: 0.0.39
- apiVersion: v2
appVersion: v0.0.38
- created: "2025-10-03T11:33:29.224540967+03:00"
+ created: "2025-11-17T12:42:54.463849+02:00"
description: A Helm chart to install Vector Operator
digest: 565b148184400900f5572b06ceebaac6340af5e5fd1122b308bdbfcbc2d2040a
home: https://github.com/kaasops/vector-operator
@@ -172,7 +185,7 @@ entries:
version: 0.0.38
- apiVersion: v2
appVersion: v0.0.37
- created: "2025-10-03T11:33:29.223624926+03:00"
+ created: "2025-11-17T12:42:54.463138+02:00"
description: A Helm chart to install Vector Operator
digest: 65e10ee46e6855ba95f51e21ca14abc4411191b260c6b96ec72c775e73c1e331
home: https://github.com/kaasops/vector-operator
@@ -185,7 +198,7 @@ entries:
version: 0.0.37
- apiVersion: v2
appVersion: v0.0.36
- created: "2025-10-03T11:33:29.222291666+03:00"
+ created: "2025-11-17T12:42:54.46229+02:00"
description: A Helm chart to install Vector Operator
digest: 972b6b4048b6d17b0616786e49915ef52cb7c9573cfb6eb359b6c19b66eabe31
home: https://github.com/kaasops/vector-operator
@@ -198,7 +211,7 @@ entries:
version: 0.0.36
- apiVersion: v2
appVersion: v0.0.35
- created: "2025-10-03T11:33:29.221443038+03:00"
+ created: "2025-11-17T12:42:54.461582+02:00"
description: A Helm chart to install Vector Operator
digest: d03ba759c42f2bd8d8f1df71702ee4f26a73b8bc28760ca7254af20d811cee8a
home: https://github.com/kaasops/vector-operator
@@ -211,7 +224,7 @@ entries:
version: 0.0.35
- apiVersion: v2
appVersion: v0.0.34
- created: "2025-10-03T11:33:29.220522814+03:00"
+ created: "2025-11-17T12:42:54.460887+02:00"
description: A Helm chart to install Vector Operator
digest: 01e9d488ee78c8603d821f96344edee568ebcb42049d586de37b8df39b372bd4
home: https://github.com/kaasops/vector-operator
@@ -224,7 +237,7 @@ entries:
version: 0.0.34
- apiVersion: v2
appVersion: v0.0.33
- created: "2025-10-03T11:33:29.219572877+03:00"
+ created: "2025-11-17T12:42:54.460169+02:00"
description: A Helm chart to install Vector Operator
digest: fcde3c94a0fa6caa5f3d333226c95b7c85ede8489d46277e1222a868ed4ec8c3
home: https://github.com/kaasops/vector-operator
@@ -237,7 +250,7 @@ entries:
version: 0.0.33
- apiVersion: v2
appVersion: v0.0.32
- created: "2025-10-03T11:33:29.218359973+03:00"
+ created: "2025-11-17T12:42:54.458973+02:00"
description: A Helm chart to install Vector Operator
digest: 26323037ec47f1703ea930a99ab4ec8fb93b44975ce969514ea68d4130017015
home: https://github.com/kaasops/vector-operator
@@ -250,7 +263,7 @@ entries:
version: 0.0.32
- apiVersion: v2
appVersion: v0.0.31
- created: "2025-10-03T11:33:29.217283384+03:00"
+ created: "2025-11-17T12:42:54.457945+02:00"
description: A Helm chart to install Vector Operator
digest: 45b924c07a825e0f7cd3fb534a6ffd16604790d13be1aff59150c045474754e3
home: https://github.com/kaasops/vector-operator
@@ -263,7 +276,7 @@ entries:
version: 0.0.31
- apiVersion: v2
appVersion: v0.0.30
- created: "2025-10-03T11:33:29.216352085+03:00"
+ created: "2025-11-17T12:42:54.457232+02:00"
description: A Helm chart to install Vector Operator
digest: 03beda549d15f50325028ea29af5f2065ac0b8adf3078bf7dc1312981aa5e7db
home: https://github.com/kaasops/vector-operator
@@ -276,7 +289,7 @@ entries:
version: 0.0.30
- apiVersion: v2
appVersion: v0.0.29
- created: "2025-10-03T11:33:29.21547395+03:00"
+ created: "2025-11-17T12:42:54.456151+02:00"
description: A Helm chart to install Vector Operator
digest: 0f025fc3a924b37b8c4131c4d8cfa437d2d4e557ab9476ed3e69a00232c7dca6
home: https://github.com/kaasops/vector-operator
@@ -289,7 +302,7 @@ entries:
version: 0.0.29
- apiVersion: v2
appVersion: v0.0.28
- created: "2025-10-03T11:33:29.214044879+03:00"
+ created: "2025-11-17T12:42:54.455461+02:00"
description: A Helm chart to install Vector Operator
digest: af856d41314313e04f15e7143409a9c564c6ca610b0d2eaec3112add8573e668
home: https://github.com/kaasops/vector-operator
@@ -302,7 +315,7 @@ entries:
version: 0.0.28
- apiVersion: v2
appVersion: v0.0.27
- created: "2025-10-03T11:33:29.212974647+03:00"
+ created: "2025-11-17T12:42:54.454738+02:00"
description: A Helm chart to install Vector Operator
digest: 631e2ff02bbd7f247cb486494fd2af60c57cc551066a6a3858226551bc1745a4
home: https://github.com/kaasops/vector-operator
@@ -315,7 +328,7 @@ entries:
version: 0.0.27
- apiVersion: v2
appVersion: v0.0.26
- created: "2025-10-03T11:33:29.212181212+03:00"
+ created: "2025-11-17T12:42:54.453874+02:00"
description: A Helm chart to install Vector Operator
digest: 760a2833f4c1a33466982419b079ff18d996331ebacc40cf93b0f55229cdb7db
home: https://github.com/kaasops/vector-operator
@@ -328,7 +341,7 @@ entries:
version: 0.0.26
- apiVersion: v2
appVersion: v0.0.25
- created: "2025-10-03T11:33:29.211347663+03:00"
+ created: "2025-11-17T12:42:54.453002+02:00"
description: A Helm chart to install Vector Operator
digest: fd22b996b071b6d85740ccf76e85cb640fa717c2620748d206d3f4fdd44cbcc2
home: https://github.com/kaasops/vector-operator
@@ -341,7 +354,7 @@ entries:
version: 0.0.25
- apiVersion: v2
appVersion: v0.0.24
- created: "2025-10-03T11:33:29.210075092+03:00"
+ created: "2025-11-17T12:42:54.452119+02:00"
description: A Helm chart to install Vector Operator
digest: ea257e60ecde063a0d1ed52ce5e3283245b8f0e2daba58ea3a5adb0ba82d7799
home: https://github.com/kaasops/vector-operator
@@ -354,7 +367,7 @@ entries:
version: 0.0.24
- apiVersion: v2
appVersion: v0.0.23
- created: "2025-10-03T11:33:29.20912157+03:00"
+ created: "2025-11-17T12:42:54.451084+02:00"
description: A Helm chart to install Vector Operator
digest: 546d202b3b9263f789b88335263191098dfcabd5d8698105f37cad24d56a8ed0
home: https://github.com/kaasops/vector-operator
@@ -367,7 +380,7 @@ entries:
version: 0.0.23
- apiVersion: v2
appVersion: v0.0.22
- created: "2025-10-03T11:33:29.208283787+03:00"
+ created: "2025-11-17T12:42:54.450342+02:00"
description: A Helm chart to install Vector Operator
digest: bf96ddc8ac61e9d6beb8bc763fbf3fa6025d950b29d70d80de6e8a0ea45e0411
home: https://github.com/kaasops/vector-operator
@@ -380,7 +393,7 @@ entries:
version: 0.0.22
- apiVersion: v2
appVersion: v0.0.21
- created: "2025-10-03T11:33:29.207029762+03:00"
+ created: "2025-11-17T12:42:54.44935+02:00"
description: A Helm chart to install Vector Operator
digest: d37b3064c0374d71e06c0131bcac2bf9e60ec4d62fcbbb20704c5277eabd899d
home: https://github.com/kaasops/vector-operator
@@ -393,7 +406,7 @@ entries:
version: 0.0.21
- apiVersion: v2
appVersion: v0.0.20
- created: "2025-10-03T11:33:29.206084826+03:00"
+ created: "2025-11-17T12:42:54.448068+02:00"
description: A Helm chart to install Vector Operator
digest: b95cd9ea8b74fde85175411129f77bf7a7afb4e9324ba2d02d489d0d6ef42d6d
home: https://github.com/kaasops/vector-operator
@@ -406,7 +419,7 @@ entries:
version: 0.0.20
- apiVersion: v2
appVersion: v0.0.19
- created: "2025-10-03T11:33:29.205569011+03:00"
+ created: "2025-11-17T12:42:54.447643+02:00"
description: A Helm chart to install Vector Operator
digest: bc1acd8b21a95e373702daa9c4ce4226b28f56b9c9299482d47b200baddbec14
home: https://github.com/kaasops/vector-operator
@@ -419,7 +432,7 @@ entries:
version: 0.0.19
- apiVersion: v2
appVersion: v0.0.18
- created: "2025-10-03T11:33:29.205016352+03:00"
+ created: "2025-11-17T12:42:54.447209+02:00"
description: A Helm chart to install Vector Operator
digest: 2bf9cde6eec7b00bfc70d7ac79b1e9d4bf3a406749c6b2bd816f20efd0cb44c3
home: https://github.com/kaasops/vector-operator
@@ -432,7 +445,7 @@ entries:
version: 0.0.18
- apiVersion: v2
appVersion: v0.0.17
- created: "2025-10-03T11:33:29.204483226+03:00"
+ created: "2025-11-17T12:42:54.44676+02:00"
description: A Helm chart to install Vector Operator
digest: edb51a059b9231f9bc2e2e0dd82c432d0e799a6767a7829ee113054478e098ed
home: https://github.com/kaasops/vector-operator
@@ -445,7 +458,7 @@ entries:
version: 0.0.17
- apiVersion: v2
appVersion: v0.0.16
- created: "2025-10-03T11:33:29.203627979+03:00"
+ created: "2025-11-17T12:42:54.445975+02:00"
description: A Helm chart to install Vector Operator
digest: 06e33602d72c44cf6779152df4936133ed87e228dd71cbb6615aa4c2666a1ee1
home: https://github.com/kaasops/vector-operator
@@ -458,7 +471,7 @@ entries:
version: 0.0.16
- apiVersion: v2
appVersion: v0.0.15
- created: "2025-10-03T11:33:29.203008445+03:00"
+ created: "2025-11-17T12:42:54.44548+02:00"
description: A Helm chart to install Vector Operator
digest: 6c9f5ba7a914329caa4f93342d3415fcf4e5fe39f5b7db69173896ea13a47c5b
home: https://github.com/kaasops/vector-operator
@@ -471,7 +484,7 @@ entries:
version: 0.0.15
- apiVersion: v2
appVersion: v0.0.14
- created: "2025-10-03T11:33:29.20241002+03:00"
+ created: "2025-11-17T12:42:54.44504+02:00"
description: A Helm chart to install Vector Operator
digest: 9f7a3b66247dea7f826b2b38202b0ddfa72b30ecc0954d75be36e066deda9df9
home: https://github.com/kaasops/vector-operator
@@ -484,7 +497,7 @@ entries:
version: 0.0.14
- apiVersion: v2
appVersion: v0.0.13
- created: "2025-10-03T11:33:29.201794885+03:00"
+ created: "2025-11-17T12:42:54.444616+02:00"
description: A Helm chart to install Vector Operator
digest: c88a1866a20fb2aea4a23886e6e60080eba9ae7ef2706f492d9b329dc9ddf49b
home: https://github.com/kaasops/vector-operator
@@ -497,7 +510,7 @@ entries:
version: 0.0.13
- apiVersion: v2
appVersion: v0.0.12
- created: "2025-10-03T11:33:29.201258232+03:00"
+ created: "2025-11-17T12:42:54.443996+02:00"
description: A Helm chart to install Vector Operator
digest: 384e8fd8f8f743036eaf1415d893158256a2ad9daddcb17a3d0701a528d9f0df
home: https://github.com/kaasops/vector-operator
@@ -510,7 +523,7 @@ entries:
version: 0.0.12
- apiVersion: v2
appVersion: v0.0.11
- created: "2025-10-03T11:33:29.200722312+03:00"
+ created: "2025-11-17T12:42:54.442767+02:00"
description: A Helm chart to install Vector Operator
digest: 29e1e04c1706b88ef61ed6c91a45847e6069843419515a33046c5929b179e273
home: https://github.com/kaasops/vector-operator
@@ -523,7 +536,7 @@ entries:
version: 0.0.11
- apiVersion: v2
appVersion: v0.0.10
- created: "2025-10-03T11:33:29.199843802+03:00"
+ created: "2025-11-17T12:42:54.442243+02:00"
description: A Helm chart to install Vector Operator
digest: f4398224ce88b852b319c950d0f39bfd5e6181801c1fac1b42b069dd2d358078
home: https://github.com/kaasops/vector-operator
@@ -536,7 +549,7 @@ entries:
version: 0.0.10
- apiVersion: v2
appVersion: v0.0.9
- created: "2025-10-03T11:33:29.228477933+03:00"
+ created: "2025-11-17T12:42:54.467253+02:00"
description: A Helm chart to install Vector Operator
digest: 66c528b6daa9f6fb9a8dd91895b69151f3f0183f4685ba4a2bc026fac27f25a7
home: https://github.com/kaasops/vector-operator
@@ -549,7 +562,7 @@ entries:
version: 0.0.9
- apiVersion: v2
appVersion: v0.0.8
- created: "2025-10-03T11:33:29.228015585+03:00"
+ created: "2025-11-17T12:42:54.46693+02:00"
description: A Helm chart to install Vector Operator
digest: 21c4c214cd0206abb743e82ac757804d644de08d80eb5f2edbb82ff9668cfed3
home: https://github.com/kaasops/vector-operator
@@ -562,7 +575,7 @@ entries:
version: 0.0.8
- apiVersion: v2
appVersion: v0.0.7
- created: "2025-10-03T11:33:29.227394919+03:00"
+ created: "2025-11-17T12:42:54.466432+02:00"
description: A Helm chart to install Vector Operator
digest: 27915a2bf70da3f66d08cf4a1f6c41ad38937759ad52eaf8b19f5a3e348e2f2e
home: https://github.com/kaasops/vector-operator
@@ -575,7 +588,7 @@ entries:
version: 0.0.7
- apiVersion: v2
appVersion: v0.0.6
- created: "2025-10-03T11:33:29.226752791+03:00"
+ created: "2025-11-17T12:42:54.465943+02:00"
description: A Helm chart to install Vector Operator
digest: 26760fbc2018336c12e8726307a624970ee994c4ffa021cc216c13669bd82f09
home: https://github.com/kaasops/vector-operator
@@ -588,7 +601,7 @@ entries:
version: 0.0.6
- apiVersion: v2
appVersion: v0.0.5
- created: "2025-10-03T11:33:29.199278578+03:00"
+ created: "2025-11-17T12:42:54.441691+02:00"
description: A Helm chart to install Vector Operator
digest: 1d6034027ae2f08a9dbea4d6ee9a1604117ae44d9daceb3f654b87a99175251f
home: https://github.com/kaasops/vector-operator
@@ -599,4 +612,4 @@ entries:
urls:
- https://kaasops.github.io/vector-operator/helm/packages/vector-operator-0.0.1.tgz
version: 0.0.1
-generated: "2025-10-03T11:33:29.198717087+03:00"
+generated: "2025-11-17T12:42:54.440609+02:00"
diff --git a/helm/packages/vector-operator-0.7.2.tgz b/helm/packages/vector-operator-0.7.2.tgz
new file mode 100644
index 00000000..e3868d96
Binary files /dev/null and b/helm/packages/vector-operator-0.7.2.tgz differ
diff --git a/internal/config/agent.go b/internal/config/agent.go
index e98afb4a..3f53e8cf 100644
--- a/internal/config/agent.go
+++ b/internal/config/agent.go
@@ -2,12 +2,14 @@ package config
import (
"fmt"
- vectorv1alpha1 "github.com/kaasops/vector-operator/api/v1alpha1"
- "github.com/kaasops/vector-operator/internal/pipeline"
- "github.com/kaasops/vector-operator/internal/utils/k8s"
+
"gopkg.in/yaml.v2"
"k8s.io/apimachinery/pkg/labels"
goyaml "sigs.k8s.io/yaml"
+
+ vectorv1alpha1 "github.com/kaasops/vector-operator/api/v1alpha1"
+ "github.com/kaasops/vector-operator/internal/pipeline"
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
)
const (
diff --git a/internal/config/aggregator.go b/internal/config/aggregator.go
index ec469162..c5b06344 100644
--- a/internal/config/aggregator.go
+++ b/internal/config/aggregator.go
@@ -3,13 +3,15 @@ package config
import (
"errors"
"fmt"
- "github.com/kaasops/vector-operator/internal/common"
- "github.com/kaasops/vector-operator/internal/pipeline"
- "github.com/stoewer/go-strcase"
- corev1 "k8s.io/api/core/v1"
"net"
"strconv"
"strings"
+
+ "github.com/stoewer/go-strcase"
+ corev1 "k8s.io/api/core/v1"
+
+ "github.com/kaasops/vector-operator/internal/common"
+ "github.com/kaasops/vector-operator/internal/pipeline"
)
func BuildAggregatorConfig(params VectorConfigParams, pipelines ...pipeline.Pipeline) (*VectorConfig, error) {
diff --git a/internal/config/config.go b/internal/config/config.go
index e51f52fb..0177ba5c 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -20,13 +20,15 @@ import (
"encoding/json"
"errors"
"fmt"
- vectorv1alpha1 "github.com/kaasops/vector-operator/api/v1alpha1"
- "github.com/kaasops/vector-operator/internal/evcollector"
+ "net"
+ "strconv"
+
"github.com/mitchellh/mapstructure"
"gopkg.in/yaml.v2"
- "net"
goyaml "sigs.k8s.io/yaml"
- "strconv"
+
+ vectorv1alpha1 "github.com/kaasops/vector-operator/api/v1alpha1"
+ "github.com/kaasops/vector-operator/internal/evcollector"
)
var (
@@ -49,7 +51,7 @@ func newVectorConfig(p VectorConfigParams) *VectorConfig {
sinks := make(map[string]*Sink)
api := &ApiSpec{
- Address: net.JoinHostPort("0.0.0.0", strconv.Itoa(AgentApiPort)),
+ Address: net.JoinHostPort(net.IPv6zero.String(), strconv.Itoa(AgentApiPort)),
Enabled: p.ApiEnabled,
Playground: p.PlaygroundEnabled,
}
diff --git a/internal/config/configcheck/configcheck.go b/internal/config/configcheck/configcheck.go
index dc4cca23..ffc68bf4 100644
--- a/internal/config/configcheck/configcheck.go
+++ b/internal/config/configcheck/configcheck.go
@@ -24,7 +24,6 @@ import (
api_errors "k8s.io/apimachinery/pkg/api/errors"
- "github.com/kaasops/vector-operator/internal/utils/k8s"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
@@ -36,6 +35,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
+
vectorv1alpha1 "github.com/kaasops/vector-operator/api/v1alpha1"
)
@@ -64,6 +65,8 @@ type ConfigCheck struct {
ConfigCheckTimeout time.Duration
Annotations map[string]string
Labels map[string]string
+ Volumes []corev1.Volume
+ VolumeMounts []corev1.VolumeMount
}
func New(
@@ -113,6 +116,8 @@ func New(
ConfigCheckTimeout: timeout,
Annotations: vc.ConfigCheck.Annotations,
Labels: vc.ConfigCheck.Labels,
+ Volumes: vc.Volumes,
+ VolumeMounts: vc.VolumeMounts,
Initiator: initiator,
}
}
diff --git a/internal/config/configcheck/configcheck_config.go b/internal/config/configcheck/configcheck_config.go
index 3d3a7107..6db0bad3 100644
--- a/internal/config/configcheck/configcheck_config.go
+++ b/internal/config/configcheck/configcheck_config.go
@@ -19,10 +19,11 @@ package configcheck
import (
"context"
- "github.com/kaasops/vector-operator/internal/utils/compression"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/log"
+
+ "github.com/kaasops/vector-operator/internal/utils/compression"
)
func (cc *ConfigCheck) createVectorConfigCheckConfig(ctx context.Context) (*corev1.Secret, error) {
diff --git a/internal/config/configcheck/configcheck_pod.go b/internal/config/configcheck/configcheck_pod.go
index 2456d7db..0a2b6310 100644
--- a/internal/config/configcheck/configcheck_pod.go
+++ b/internal/config/configcheck/configcheck_pod.go
@@ -69,6 +69,16 @@ func (cc *ConfigCheck) createVectorConfigCheckPod() *corev1.Pod {
}
func (cc *ConfigCheck) generateVectorConfigCheckVolume() []corev1.Volume {
+ volume := cc.Volumes
+
+ // Merge user-defined volumes with required volumes.
+ // User-defined volumes take precedence over required volumes with the same name.
+ // Build a set of user-defined volume names to check for conflicts.
+ existingVolumes := make(map[string]bool, len(volume))
+ for _, v := range volume {
+ existingVolumes[v.Name] = true
+ }
+
configVolumeSource := corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: cc.getNameVectorConfigCheck(),
@@ -78,9 +88,10 @@ func (cc *ConfigCheck) generateVectorConfigCheckVolume() []corev1.Volume {
configVolumeSource = corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
}
-
}
- volume := []corev1.Volume{
+
+ // Define required volumes for configcheck
+ requiredVolumes := []corev1.Volume{
{
Name: "config",
VolumeSource: configVolumeSource,
@@ -125,7 +136,13 @@ func (cc *ConfigCheck) generateVectorConfigCheckVolume() []corev1.Volume {
},
}
- if cc.CompressedConfig {
+ for _, reqVol := range requiredVolumes {
+ if !existingVolumes[reqVol.Name] {
+ volume = append(volume, reqVol)
+ }
+ }
+
+ if cc.CompressedConfig && !existingVolumes["app-config-compress"] {
volume = append(volume, corev1.Volume{
Name: "app-config-compress",
VolumeSource: corev1.VolumeSource{
@@ -140,7 +157,18 @@ func (cc *ConfigCheck) generateVectorConfigCheckVolume() []corev1.Volume {
}
func (cc *ConfigCheck) generateVectorConfigCheckVolumeMounts() []corev1.VolumeMount {
- volumeMount := []corev1.VolumeMount{
+ volumeMount := cc.VolumeMounts
+
+ // Merge user-defined volumeMounts with required volumeMounts.
+ // User-defined volumeMounts take precedence over required volumeMounts with the same name.
+ // Build a set of user-defined volumeMount names to check for conflicts.
+ existingVolumeMounts := make(map[string]bool, len(volumeMount))
+ for _, vm := range volumeMount {
+ existingVolumeMounts[vm.Name] = true
+ }
+
+ // Define required volumeMounts for configcheck
+ requiredVolumeMounts := []corev1.VolumeMount{
{
Name: "config",
MountPath: "/etc/vector/",
@@ -167,13 +195,17 @@ func (cc *ConfigCheck) generateVectorConfigCheckVolumeMounts() []corev1.VolumeMo
},
}
- if cc.CompressedConfig {
- volumeMount = append(volumeMount, []corev1.VolumeMount{
- {
- Name: "app-config-compress",
- MountPath: "/tmp/archive",
- },
- }...)
+ for _, reqVm := range requiredVolumeMounts {
+ if !existingVolumeMounts[reqVm.Name] {
+ volumeMount = append(volumeMount, reqVm)
+ }
+ }
+
+ if cc.CompressedConfig && !existingVolumeMounts["app-config-compress"] {
+ volumeMount = append(volumeMount, corev1.VolumeMount{
+ Name: "app-config-compress",
+ MountPath: "/tmp/archive",
+ })
}
return volumeMount
diff --git a/internal/config/default.go b/internal/config/default.go
index 380a154a..77662703 100644
--- a/internal/config/default.go
+++ b/internal/config/default.go
@@ -1,6 +1,9 @@
package config
-import "fmt"
+import (
+ "net"
+ "strconv"
+)
const (
// types
@@ -12,6 +15,7 @@ const (
DefaultSinkName = "defaultSink"
DefaultInternalMetricsSourceName = "internalMetricsSource"
DefaultInternalMetricsSinkName = "internalMetricsSink"
+ DefaultInternalMetricsSinkPort = 9598
DefaultAggregatorSourcePort = 8989
DefaultNamespace = "default"
DefaultPipelineName = "default-pipeline"
@@ -26,7 +30,7 @@ var (
Name: DefaultSourceName,
Type: VectorType,
Options: map[string]any{
- "address": fmt.Sprintf("0.0.0.0:%d", DefaultAggregatorSourcePort),
+ "address": net.JoinHostPort(net.IPv6zero.String(), strconv.Itoa(DefaultAggregatorSourcePort)),
},
}
defaultSink = &Sink{
@@ -63,5 +67,8 @@ var (
Name: DefaultInternalMetricsSinkName,
Type: PrometheusExporterType,
Inputs: []string{DefaultInternalMetricsSourceName},
+ Options: map[string]any{
+ "address": net.JoinHostPort(net.IPv6zero.String(), strconv.Itoa(DefaultInternalMetricsSinkPort)),
+ },
}
)
diff --git a/internal/config/types.go b/internal/config/types.go
index a81fc54a..b5b52f1e 100644
--- a/internal/config/types.go
+++ b/internal/config/types.go
@@ -19,6 +19,7 @@ package config
import (
"encoding/json"
"fmt"
+
"github.com/kaasops/vector-operator/internal/utils/hash"
corev1 "k8s.io/api/core/v1"
diff --git a/internal/controller/clustervectoraggregator_controller.go b/internal/controller/clustervectoraggregator_controller.go
index 7537ecfa..a2aad04d 100644
--- a/internal/controller/clustervectoraggregator_controller.go
+++ b/internal/controller/clustervectoraggregator_controller.go
@@ -19,12 +19,8 @@ package controller
import (
"context"
"errors"
- "github.com/kaasops/vector-operator/internal/config"
- "github.com/kaasops/vector-operator/internal/config/configcheck"
- "github.com/kaasops/vector-operator/internal/pipeline"
- "github.com/kaasops/vector-operator/internal/utils/hash"
- "github.com/kaasops/vector-operator/internal/utils/k8s"
- "github.com/kaasops/vector-operator/internal/vector/aggregator"
+ "time"
+
monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -38,7 +34,13 @@ import (
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/source"
- "time"
+
+ "github.com/kaasops/vector-operator/internal/config"
+ "github.com/kaasops/vector-operator/internal/config/configcheck"
+ "github.com/kaasops/vector-operator/internal/pipeline"
+ "github.com/kaasops/vector-operator/internal/utils/hash"
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
+ "github.com/kaasops/vector-operator/internal/vector/aggregator"
v1alpha1 "github.com/kaasops/vector-operator/api/v1alpha1"
)
diff --git a/internal/controller/clustervectoraggregator_controller_test.go b/internal/controller/clustervectoraggregator_controller_test.go
index 75e9643f..5b416c92 100644
--- a/internal/controller/clustervectoraggregator_controller_test.go
+++ b/internal/controller/clustervectoraggregator_controller_test.go
@@ -18,6 +18,7 @@ package controller
import (
"context"
+
"sigs.k8s.io/controller-runtime/pkg/event"
. "github.com/onsi/ginkgo/v2"
diff --git a/internal/controller/pipeline_controller.go b/internal/controller/pipeline_controller.go
index adf9ca0d..9b1b0d77 100644
--- a/internal/controller/pipeline_controller.go
+++ b/internal/controller/pipeline_controller.go
@@ -23,23 +23,28 @@ import (
"reflect"
"time"
- "github.com/kaasops/vector-operator/internal/config/configcheck"
- "github.com/kaasops/vector-operator/internal/vector/aggregator"
- "github.com/kaasops/vector-operator/internal/vector/vectoragent"
"golang.org/x/sync/errgroup"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/predicate"
- "github.com/kaasops/vector-operator/api/v1alpha1"
- "github.com/kaasops/vector-operator/internal/config"
- "github.com/kaasops/vector-operator/internal/pipeline"
+ "golang.org/x/sync/errgroup"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/predicate"
+
+ "github.com/kaasops/vector-operator/api/v1alpha1"
+ "github.com/kaasops/vector-operator/internal/config"
+ "github.com/kaasops/vector-operator/internal/config/configcheck"
+ "github.com/kaasops/vector-operator/internal/pipeline"
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
+ "github.com/kaasops/vector-operator/internal/vector/aggregator"
+ "github.com/kaasops/vector-operator/internal/vector/vectoragent"
)
type PipelineReconciler struct {
@@ -49,8 +54,6 @@ type PipelineReconciler struct {
// Temp. Wait this issue - https://github.com/kubernetes-sigs/controller-runtime/issues/452
Clientset *kubernetes.Clientset
ConfigCheckTimeout time.Duration
- VectorAgentEventCh chan event.GenericEvent
- VectorAggregatorsEventCh chan event.GenericEvent
ClusterVectorAggregatorsEventCh chan event.GenericEvent
EnableReconciliationInvalidPipelines bool
ReconciliationInvalidPipelinesRetryDelay time.Duration
@@ -143,6 +146,9 @@ func (r *PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
if *pipelineVectorRole == v1alpha1.VectorPipelineRoleAgent {
for _, vector := range vectorAgents {
+ if !k8s.MatchLabels(vector.Spec.Selector.MatchLabels, pipelineCR.GetLabels()) {
+ continue
+ }
eg.Go(func() error {
vaCtrl := vectoragent.NewController(vector, r.Client, r.Clientset)
cfg, byteConfig, err := config.BuildAgentConfig(config.VectorConfigParams{
@@ -182,6 +188,9 @@ func (r *PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
if pipelineCR.GetNamespace() != "" {
for _, vector := range vectorAggregators {
+ if !k8s.MatchLabels(vector.Spec.Selector.MatchLabels, pipelineCR.GetLabels()) {
+ continue
+ }
eg.Go(func() error {
vaCtrl := aggregator.NewController(vector, r.Client, r.Clientset)
cfg, err := config.BuildAggregatorConfig(config.VectorConfigParams{
@@ -228,6 +237,9 @@ func (r *PipelineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
} else {
for _, vector := range clusterVectorAggregators {
+ if !k8s.MatchLabels(vector.Spec.Selector.MatchLabels, pipelineCR.GetLabels()) {
+ continue
+ }
eg.Go(func() error {
vaCtrl := aggregator.NewController(vector, r.Client, r.Clientset)
cfg, err := config.BuildAggregatorConfig(config.VectorConfigParams{
@@ -335,14 +347,11 @@ func (r *PipelineReconciler) SetupWithManager(mgr ctrl.Manager) error {
var specAndAnnotationsPredicate = predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
- oldObject := e.ObjectOld.(client.Object)
- newObject := e.ObjectNew.(client.Object)
-
- if oldObject.GetGeneration() != newObject.GetGeneration() {
+ if e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() {
return true
}
- if !reflect.DeepEqual(oldObject.GetAnnotations(), newObject.GetAnnotations()) {
+ if !reflect.DeepEqual(e.ObjectOld.GetAnnotations(), e.ObjectNew.GetAnnotations()) {
return true
}
diff --git a/internal/controller/pipeline_controller_test.go b/internal/controller/pipeline_controller_test.go
index d0e58d36..28a624e6 100644
--- a/internal/controller/pipeline_controller_test.go
+++ b/internal/controller/pipeline_controller_test.go
@@ -18,6 +18,7 @@ package controller
import (
"context"
+
"sigs.k8s.io/controller-runtime/pkg/event"
. "github.com/onsi/ginkgo/v2"
diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go
index bb5fd174..b82c3251 100644
--- a/internal/controller/suite_test.go
+++ b/internal/controller/suite_test.go
@@ -25,7 +25,7 @@ import (
"time"
"k8s.io/client-go/kubernetes"
- "k8s.io/utils/pointer"
+ "k8s.io/utils/ptr"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -66,7 +66,7 @@ var _ = BeforeSuite(func() {
By("bootstrapping test environment")
testEnv = &envtest.Environment{
- UseExistingCluster: pointer.Bool(true),
+ UseExistingCluster: ptr.To(true),
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
ErrorIfCRDPathMissing: true,
diff --git a/internal/controller/vector_controller.go b/internal/controller/vector_controller.go
index 54961e39..37158a43 100644
--- a/internal/controller/vector_controller.go
+++ b/internal/controller/vector_controller.go
@@ -19,10 +19,11 @@ package controller
import (
"context"
"errors"
+ "time"
+
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/source"
- "time"
"github.com/kaasops/vector-operator/internal/config"
"github.com/kaasops/vector-operator/internal/config/configcheck"
@@ -36,7 +37,6 @@ import (
rbacv1 "k8s.io/api/rbac/v1"
- "github.com/kaasops/vector-operator/api/v1alpha1"
monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
api_errors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
@@ -47,6 +47,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/predicate"
+
+ "github.com/kaasops/vector-operator/api/v1alpha1"
)
// VectorReconciler reconciles a Vector object
diff --git a/internal/controller/vector_controller_test.go b/internal/controller/vector_controller_test.go
index 5c6a2b62..24e1d23e 100644
--- a/internal/controller/vector_controller_test.go
+++ b/internal/controller/vector_controller_test.go
@@ -18,6 +18,7 @@ package controller
import (
"context"
+
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
diff --git a/internal/controller/vectoraggregator_controller.go b/internal/controller/vectoraggregator_controller.go
index e5296867..93f7ffe6 100644
--- a/internal/controller/vectoraggregator_controller.go
+++ b/internal/controller/vectoraggregator_controller.go
@@ -19,12 +19,8 @@ package controller
import (
"context"
"errors"
- "github.com/kaasops/vector-operator/internal/config"
- "github.com/kaasops/vector-operator/internal/config/configcheck"
- "github.com/kaasops/vector-operator/internal/pipeline"
- "github.com/kaasops/vector-operator/internal/utils/hash"
- "github.com/kaasops/vector-operator/internal/utils/k8s"
- "github.com/kaasops/vector-operator/internal/vector/aggregator"
+ "time"
+
monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -37,7 +33,13 @@ import (
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/source"
- "time"
+
+ "github.com/kaasops/vector-operator/internal/config"
+ "github.com/kaasops/vector-operator/internal/config/configcheck"
+ "github.com/kaasops/vector-operator/internal/pipeline"
+ "github.com/kaasops/vector-operator/internal/utils/hash"
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
+ "github.com/kaasops/vector-operator/internal/vector/aggregator"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
diff --git a/internal/controller/vectoraggregator_controller_test.go b/internal/controller/vectoraggregator_controller_test.go
index d65b3f0e..757d2278 100644
--- a/internal/controller/vectoraggregator_controller_test.go
+++ b/internal/controller/vectoraggregator_controller_test.go
@@ -18,9 +18,10 @@ package controller
import (
"context"
- "sigs.k8s.io/controller-runtime/pkg/event"
"time"
+ "sigs.k8s.io/controller-runtime/pkg/event"
+
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
diff --git a/internal/evcollector/collector.go b/internal/evcollector/collector.go
index bb503a7c..f1a3412c 100644
--- a/internal/evcollector/collector.go
+++ b/internal/evcollector/collector.go
@@ -2,14 +2,16 @@ package evcollector
import (
"context"
- "github.com/kaasops/vector-operator/internal/vector/gen"
+ "time"
+
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
- "time"
+
+ "github.com/kaasops/vector-operator/internal/vector/gen"
)
type Logger interface {
diff --git a/internal/evcollector/event.go b/internal/evcollector/event.go
index 3ccfa258..a46ae39f 100644
--- a/internal/evcollector/event.go
+++ b/internal/evcollector/event.go
@@ -1,9 +1,11 @@
package evcollector
import (
- "github.com/kaasops/vector-operator/internal/vector/gen"
- corev1 "k8s.io/api/core/v1"
"time"
+
+ corev1 "k8s.io/api/core/v1"
+
+ "github.com/kaasops/vector-operator/internal/vector/gen"
)
func k8sEventToVectorLog(ev *corev1.Event) *gen.Log {
diff --git a/internal/pipeline/hash.go b/internal/pipeline/hash.go
index 84c6178b..0219849a 100644
--- a/internal/pipeline/hash.go
+++ b/internal/pipeline/hash.go
@@ -18,6 +18,7 @@ package pipeline
import (
"encoding/json"
+
"github.com/kaasops/vector-operator/api/v1alpha1"
"github.com/kaasops/vector-operator/internal/common"
"github.com/kaasops/vector-operator/internal/utils/hash"
diff --git a/internal/pipeline/pipeline.go b/internal/pipeline/pipeline.go
index 559bef6e..35ee0dc0 100644
--- a/internal/pipeline/pipeline.go
+++ b/internal/pipeline/pipeline.go
@@ -19,10 +19,12 @@ package pipeline
import (
"context"
"fmt"
+
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/kaasops/vector-operator/api/v1alpha1"
- "sigs.k8s.io/controller-runtime/pkg/client"
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
)
type Pipeline interface {
@@ -80,7 +82,7 @@ func GetValidPipelines(ctx context.Context, client client.Client, filter FilterP
vp.IsValid() &&
vp.GetRole() == filter.Role &&
(filter.Scope == AllPipelines || vp.Namespace == filter.Namespace) &&
- MatchLabels(matchLabels, vp.Labels) {
+ k8s.MatchLabels(matchLabels, vp.Labels) {
validPipelines = append(validPipelines, vp.DeepCopy())
}
}
@@ -97,7 +99,7 @@ func GetValidPipelines(ctx context.Context, client client.Client, filter FilterP
if !cvp.IsDeleted() &&
cvp.IsValid() &&
cvp.GetRole() == filter.Role &&
- MatchLabels(matchLabels, cvp.Labels) {
+ k8s.MatchLabels(matchLabels, cvp.Labels) {
validPipelines = append(validPipelines, cvp.DeepCopy())
}
}
@@ -146,15 +148,3 @@ func GetClusterVectorPipelines(ctx context.Context, client client.Client) ([]v1a
}
return cvps.Items, nil
}
-
-func MatchLabels(selector map[string]string, labels map[string]string) bool {
- if selector == nil {
- return true
- }
- for k, v := range selector {
- if labels[k] != v {
- return false
- }
- }
- return true
-}
diff --git a/internal/pipeline/pipeline_test.go b/internal/pipeline/pipeline_test.go
deleted file mode 100644
index d04adde6..00000000
--- a/internal/pipeline/pipeline_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package pipeline
-
-import (
- "testing"
-)
-
-func TestMatchLabels(t *testing.T) {
- tests := []struct {
- name string
- selector map[string]string
- labels map[string]string
- want bool
- }{
- {
- name: "NoSelector",
- selector: nil,
- labels: map[string]string{"label1": "value1", "label2": "value2"},
- want: true,
- },
- {
- name: "MatchingLabels",
- selector: map[string]string{"label1": "value1", "label2": "value2"},
- labels: map[string]string{"label1": "value1", "label2": "value2"},
- want: true,
- },
- {
- name: "MismatchedLabelValues",
- selector: map[string]string{"label1": "value1", "label2": "value2"},
- labels: map[string]string{"label1": "value1", "label2": "mismatch"},
- want: false,
- },
- {
- name: "ExtraLabelsInMap",
- selector: map[string]string{"label1": "value1"},
- labels: map[string]string{"label1": "value1", "label2": "value2"},
- want: true,
- },
- {
- name: "SelectorWithNoMatches",
- selector: map[string]string{"label1": "value1", "label2": "value2"},
- labels: map[string]string{"label3": "value3"},
- want: false,
- },
- {
- name: "SelectorWithNoMatches2",
- selector: map[string]string{"label1": "value1", "label2": "value2"},
- labels: map[string]string{"label1": "label1"},
- want: false,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- if got := MatchLabels(test.selector, test.labels); got != test.want {
- t.Errorf("MatchLabels() = %v, want %v", got, test.want)
- }
- })
- }
-}
diff --git a/internal/utils/hash/hash_test.go b/internal/utils/hash/hash_test.go
index 46ce0239..09ab20b7 100644
--- a/internal/utils/hash/hash_test.go
+++ b/internal/utils/hash/hash_test.go
@@ -19,8 +19,9 @@ package hash_test
import (
"testing"
- "github.com/kaasops/vector-operator/internal/utils/hash"
"github.com/stretchr/testify/require"
+
+ "github.com/kaasops/vector-operator/internal/utils/hash"
)
func TestGet(t *testing.T) {
diff --git a/internal/utils/k8s/k8s_test.go b/internal/utils/k8s/k8s_test.go
index 2c5fd442..d02814db 100644
--- a/internal/utils/k8s/k8s_test.go
+++ b/internal/utils/k8s/k8s_test.go
@@ -24,7 +24,6 @@ import (
// . "github.com/onsi/ginkgo/v2"
// . "github.com/onsi/gomega"
- "github.com/kaasops/vector-operator/internal/utils/k8s"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -36,6 +35,8 @@ import (
fakeclientset "k8s.io/client-go/kubernetes/fake"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
)
type objCase struct {
diff --git a/internal/utils/k8s/label.go b/internal/utils/k8s/label.go
index 72c63ebc..de1526f5 100644
--- a/internal/utils/k8s/label.go
+++ b/internal/utils/k8s/label.go
@@ -42,18 +42,31 @@ const (
// being merged into the destination (dst) labels. If a key exists in both maps,
// the destination value is preserved.
func MergeLabels(dst, src map[string]string) map[string]string {
- if dst == nil {
+ if dst == nil {
dst = make(map[string]string)
- }
-
- if src == nil {
- return dst
- }
-
- for k, v := range src {
- if _, ok := dst[k]; !ok {
- dst[k] = v
- }
- }
- return dst
-}
\ No newline at end of file
+ }
+
+ if src == nil {
+ return dst
+ }
+
+ for k, v := range src {
+ if _, ok := dst[k]; !ok {
+ dst[k] = v
+ }
+ }
+ return dst
+}
+
+// MatchLabels matches a set of Kubernetes selectors and a set of Kubernetes labels
+func MatchLabels(selector map[string]string, labels map[string]string) bool {
+ if selector == nil {
+ return true
+ }
+ for k, v := range selector {
+ if labels[k] != v {
+ return false
+ }
+ }
+ return true
+}
diff --git a/internal/utils/k8s/label_test.go b/internal/utils/k8s/label_test.go
new file mode 100644
index 00000000..5186d396
--- /dev/null
+++ b/internal/utils/k8s/label_test.go
@@ -0,0 +1,114 @@
+package k8s
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestMatchLabels(t *testing.T) {
+ tests := []struct {
+ name string
+ selector map[string]string
+ labels map[string]string
+ want bool
+ }{
+ {
+ name: "NoSelector",
+ selector: nil,
+ labels: map[string]string{"label1": "value1", "label2": "value2"},
+ want: true,
+ },
+ {
+ name: "MatchingLabels",
+ selector: map[string]string{"label1": "value1", "label2": "value2"},
+ labels: map[string]string{"label1": "value1", "label2": "value2"},
+ want: true,
+ },
+ {
+ name: "MismatchedLabelValues",
+ selector: map[string]string{"label1": "value1", "label2": "value2"},
+ labels: map[string]string{"label1": "value1", "label2": "mismatch"},
+ want: false,
+ },
+ {
+ name: "ExtraLabelsInMap",
+ selector: map[string]string{"label1": "value1"},
+ labels: map[string]string{"label1": "value1", "label2": "value2"},
+ want: true,
+ },
+ {
+ name: "SelectorWithNoMatches",
+ selector: map[string]string{"label1": "value1", "label2": "value2"},
+ labels: map[string]string{"label3": "value3"},
+ want: false,
+ },
+ {
+ name: "SelectorWithNoMatches2",
+ selector: map[string]string{"label1": "value1", "label2": "value2"},
+ labels: map[string]string{"label1": "label1"},
+ want: false,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ if got := MatchLabels(test.selector, test.labels); got != test.want {
+ t.Errorf("MatchLabels() = %v, want %v", got, test.want)
+ }
+ })
+ }
+}
+
+func TestMergeLabels(t *testing.T) {
+ tests := []struct {
+ name string
+ sourceLabels map[string]string
+ distLabels map[string]string
+ want map[string]string
+ }{
+ {
+ name: "EmptySource",
+ sourceLabels: nil,
+ distLabels: map[string]string{"label1": "value1", "label2": "value2"},
+ want: map[string]string{"label1": "value1", "label2": "value2"},
+ },
+ {
+ name: "EmptyDist",
+ sourceLabels: map[string]string{"label1": "value1", "label2": "value2"},
+ distLabels: nil,
+ want: map[string]string{"label1": "value1", "label2": "value2"},
+ },
+ {
+ name: "DifferentLabelValues",
+ sourceLabels: map[string]string{"label1": "value1", "label2": "value2"},
+ distLabels: map[string]string{"label1": "value1", "label2": "mismatch"},
+ want: map[string]string{"label1": "value1", "label2": "mismatch"},
+ },
+ {
+ name: "SameLabelValues",
+ sourceLabels: map[string]string{"label1": "value1"},
+ distLabels: map[string]string{"label1": "value1", "label2": "value2"},
+ want: map[string]string{"label1": "value1", "label2": "value2"},
+ },
+ {
+ name: "NewLabelValues",
+ sourceLabels: map[string]string{"label1": "value1", "label2": "value2"},
+ distLabels: map[string]string{"label3": "value3"},
+ want: map[string]string{"label1": "value1", "label2": "value2", "label3": "value3"},
+ },
+ {
+ name: "DifferentLabelValues2",
+ sourceLabels: map[string]string{"label1": "value1", "label2": "value2"},
+ distLabels: map[string]string{"label1": "label1"},
+ want: map[string]string{"label1": "label1", "label2": "value2"},
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ if got := MergeLabels(test.distLabels, test.sourceLabels); !reflect.DeepEqual(got, test.want) {
+ t.Errorf("MatchLabels() = %v, want %v", got, test.want)
+ }
+ })
+ }
+}
diff --git a/internal/vector/aggregator/config.go b/internal/vector/aggregator/config.go
index d0593bb6..cb2fab5b 100644
--- a/internal/vector/aggregator/config.go
+++ b/internal/vector/aggregator/config.go
@@ -2,10 +2,12 @@ package aggregator
import (
"context"
- "github.com/kaasops/vector-operator/internal/utils/compression"
- "github.com/kaasops/vector-operator/internal/utils/k8s"
+
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/log"
+
+ "github.com/kaasops/vector-operator/internal/utils/compression"
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
)
func (ctrl *Controller) ensureVectorAggregatorConfig(ctx context.Context) error {
diff --git a/internal/vector/aggregator/controller.go b/internal/vector/aggregator/controller.go
index 09dae11a..8678906b 100644
--- a/internal/vector/aggregator/controller.go
+++ b/internal/vector/aggregator/controller.go
@@ -3,10 +3,6 @@ package aggregator
import (
"context"
- vectorv1alpha1 "github.com/kaasops/vector-operator/api/v1alpha1"
- "github.com/kaasops/vector-operator/internal/buildinfo"
- "github.com/kaasops/vector-operator/internal/config"
- "github.com/kaasops/vector-operator/internal/utils/k8s"
monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
corev1 "k8s.io/api/core/v1"
resourcev1 "k8s.io/apimachinery/pkg/api/resource"
@@ -17,6 +13,11 @@ import (
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
+
+ vectorv1alpha1 "github.com/kaasops/vector-operator/api/v1alpha1"
+ "github.com/kaasops/vector-operator/internal/buildinfo"
+ "github.com/kaasops/vector-operator/internal/config"
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
)
type Aggregator interface {
diff --git a/internal/vector/aggregator/deployment.go b/internal/vector/aggregator/deployment.go
index fec3cb8b..8957dd64 100644
--- a/internal/vector/aggregator/deployment.go
+++ b/internal/vector/aggregator/deployment.go
@@ -4,12 +4,14 @@ import (
"context"
"time"
- "github.com/kaasops/vector-operator/internal/common"
- "github.com/kaasops/vector-operator/internal/utils/k8s"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/log"
+
+ "github.com/kaasops/vector-operator/internal/common"
+ "github.com/kaasops/vector-operator/internal/config"
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
)
func (ctrl *Controller) ensureVectorAggregatorDeployment(ctx context.Context) error {
@@ -80,7 +82,7 @@ func (ctrl *Controller) VectorAggregatorContainer() *corev1.Container {
Ports: []corev1.ContainerPort{
{
Name: "prom-exporter",
- ContainerPort: 9598,
+ ContainerPort: config.DefaultInternalMetricsSinkPort,
Protocol: "TCP",
},
},
@@ -152,6 +154,15 @@ func (ctrl *Controller) ConfigReloaderSidecarContainer() *corev1.Container {
func (ctrl *Controller) generateVectorAggregatorVolume() []corev1.Volume {
volume := ctrl.Spec.Volumes
+
+ // Merge user-defined volumes with required volumes.
+ // User-defined volumes take precedence over required volumes with the same name.
+ // Build a set of user-defined volume names to check for conflicts.
+ existingVolumes := make(map[string]bool, len(volume))
+ for _, v := range volume {
+ existingVolumes[v.Name] = true
+ }
+
configVolumeSource := corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: ctrl.getNameVectorAggregator(),
@@ -161,9 +172,10 @@ func (ctrl *Controller) generateVectorAggregatorVolume() []corev1.Volume {
configVolumeSource = corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
}
-
}
- volume = append(volume, []corev1.Volume{
+
+ // Define required volumes for Vector aggregator
+ requiredVolumes := []corev1.Volume{
{
Name: "config",
VolumeSource: configVolumeSource,
@@ -192,9 +204,16 @@ func (ctrl *Controller) generateVectorAggregatorVolume() []corev1.Volume {
},
},
},
- }...)
+ }
- if ctrl.Spec.CompressConfigFile {
+ // Only add volumes that don't already exist
+ for _, reqVol := range requiredVolumes {
+ if !existingVolumes[reqVol.Name] {
+ volume = append(volume, reqVol)
+ }
+ }
+
+ if ctrl.Spec.CompressConfigFile && !existingVolumes["app-config-compress"] {
volume = append(volume, corev1.Volume{
Name: "app-config-compress",
VolumeSource: corev1.VolumeSource{
@@ -211,7 +230,16 @@ func (ctrl *Controller) generateVectorAggregatorVolume() []corev1.Volume {
func (ctrl *Controller) generateVectorAggregatorVolumeMounts() []corev1.VolumeMount {
volumeMount := ctrl.Spec.VolumeMounts
- volumeMount = append(volumeMount, []corev1.VolumeMount{
+ // Merge user-defined volumeMounts with required volumeMounts.
+ // User-defined volumeMounts take precedence over required volumeMounts with the same name.
+ // Build a set of user-defined volumeMount names to check for conflicts.
+ existingVolumeMounts := make(map[string]bool, len(volumeMount))
+ for _, vm := range volumeMount {
+ existingVolumeMounts[vm.Name] = true
+ }
+
+ // Define required volumeMounts for Vector aggregator
+ requiredVolumeMounts := []corev1.VolumeMount{
{
Name: "config",
MountPath: "/etc/vector",
@@ -228,15 +256,20 @@ func (ctrl *Controller) generateVectorAggregatorVolumeMounts() []corev1.VolumeMo
Name: "sysfs",
MountPath: "/host/sys",
},
- }...)
+ }
- if ctrl.Spec.CompressConfigFile {
- volumeMount = append(volumeMount, []corev1.VolumeMount{
- {
- Name: "app-config-compress",
- MountPath: "/tmp/archive",
- },
- }...)
+ // Only add volumeMounts that don't already exist
+ for _, reqVm := range requiredVolumeMounts {
+ if !existingVolumeMounts[reqVm.Name] {
+ volumeMount = append(volumeMount, reqVm)
+ }
+ }
+
+ if ctrl.Spec.CompressConfigFile && !existingVolumeMounts["app-config-compress"] {
+ volumeMount = append(volumeMount, corev1.VolumeMount{
+ Name: "app-config-compress",
+ MountPath: "/tmp/archive",
+ })
}
return volumeMount
diff --git a/internal/vector/aggregator/event_collector.go b/internal/vector/aggregator/event_collector.go
index 8ca92994..52cab981 100644
--- a/internal/vector/aggregator/event_collector.go
+++ b/internal/vector/aggregator/event_collector.go
@@ -4,8 +4,6 @@ import (
"context"
"encoding/json"
- "github.com/kaasops/vector-operator/internal/evcollector"
- "github.com/kaasops/vector-operator/internal/utils/k8s"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
@@ -14,6 +12,9 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/log"
+
+ "github.com/kaasops/vector-operator/internal/evcollector"
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
)
func (ctrl *Controller) ensureEventCollector(ctx context.Context) error {
diff --git a/internal/vector/aggregator/podmonitor.go b/internal/vector/aggregator/podmonitor.go
index cab43e4a..cbf69552 100644
--- a/internal/vector/aggregator/podmonitor.go
+++ b/internal/vector/aggregator/podmonitor.go
@@ -3,10 +3,11 @@ package aggregator
import (
"context"
- "github.com/kaasops/vector-operator/internal/utils/k8s"
monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/log"
+
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
)
func (ctrl *Controller) ensureVectorAggregatorPodMonitor(ctx context.Context) error {
diff --git a/internal/vector/aggregator/rbac.go b/internal/vector/aggregator/rbac.go
index c5bf2a21..82f5be63 100644
--- a/internal/vector/aggregator/rbac.go
+++ b/internal/vector/aggregator/rbac.go
@@ -2,10 +2,12 @@ package aggregator
import (
"context"
- "github.com/kaasops/vector-operator/internal/utils/k8s"
+
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"sigs.k8s.io/controller-runtime/pkg/log"
+
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
)
const ApiPort = 8686
diff --git a/internal/vector/aggregator/service.go b/internal/vector/aggregator/service.go
index a0b0cae4..cdae4ed0 100644
--- a/internal/vector/aggregator/service.go
+++ b/internal/vector/aggregator/service.go
@@ -4,13 +4,14 @@ import (
"context"
"maps"
- "github.com/kaasops/vector-operator/internal/utils/k8s"
"github.com/stoewer/go-strcase"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
+
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
)
func (ctrl *Controller) ensureVectorAggregatorService(ctx context.Context) error {
diff --git a/internal/vector/gen/event.pb.go b/internal/vector/gen/event.pb.go
index 9d55d371..359453c6 100644
--- a/internal/vector/gen/event.pb.go
+++ b/internal/vector/gen/event.pb.go
@@ -7,11 +7,12 @@
package gen
import (
+ reflect "reflect"
+ sync "sync"
+
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
- reflect "reflect"
- sync "sync"
)
const (
diff --git a/internal/vector/gen/vector.pb.go b/internal/vector/gen/vector.pb.go
index 4c49c5b6..89a63029 100644
--- a/internal/vector/gen/vector.pb.go
+++ b/internal/vector/gen/vector.pb.go
@@ -7,10 +7,11 @@
package gen
import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
diff --git a/internal/vector/gen/vector_grpc.pb.go b/internal/vector/gen/vector_grpc.pb.go
index d0e4691f..67b1c8f6 100644
--- a/internal/vector/gen/vector_grpc.pb.go
+++ b/internal/vector/gen/vector_grpc.pb.go
@@ -8,6 +8,7 @@ package gen
import (
context "context"
+
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
diff --git a/internal/vector/vectoragent/vectoragent.go b/internal/vector/vectoragent/vectoragent.go
index 08011bc4..3a864de6 100644
--- a/internal/vector/vectoragent/vectoragent.go
+++ b/internal/vector/vectoragent/vectoragent.go
@@ -18,11 +18,13 @@ package vectoragent
import (
"context"
+
+ "k8s.io/client-go/kubernetes"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
vectorv1alpha1 "github.com/kaasops/vector-operator/api/v1alpha1"
"github.com/kaasops/vector-operator/internal/config"
"github.com/kaasops/vector-operator/internal/utils/k8s"
- "k8s.io/client-go/kubernetes"
- "sigs.k8s.io/controller-runtime/pkg/client"
)
type Controller struct {
diff --git a/internal/vector/vectoragent/vectoragent_config.go b/internal/vector/vectoragent/vectoragent_config.go
index ce6b1950..28dcfcc2 100644
--- a/internal/vector/vectoragent/vectoragent_config.go
+++ b/internal/vector/vectoragent/vectoragent_config.go
@@ -19,9 +19,10 @@ package vectoragent
import (
"context"
- "github.com/kaasops/vector-operator/internal/utils/compression"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/log"
+
+ "github.com/kaasops/vector-operator/internal/utils/compression"
)
func (ctrl *Controller) createVectorAgentConfig(ctx context.Context) (*corev1.Secret, error) {
diff --git a/internal/vector/vectoragent/vectoragent_controller.go b/internal/vector/vectoragent/vectoragent_controller.go
index 3a27e41b..d443e8e0 100644
--- a/internal/vector/vectoragent/vectoragent_controller.go
+++ b/internal/vector/vectoragent/vectoragent_controller.go
@@ -21,12 +21,13 @@ import (
"time"
- "github.com/kaasops/vector-operator/internal/common"
- "github.com/kaasops/vector-operator/internal/utils/k8s"
monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/log"
+
+ "github.com/kaasops/vector-operator/internal/common"
+ "github.com/kaasops/vector-operator/internal/utils/k8s"
)
func (ctrl *Controller) EnsureVectorAgent(ctx context.Context) error {
@@ -162,7 +163,6 @@ func (ctrl *Controller) matchLabelsForVectorAgent() map[string]string {
func (ctrl *Controller) labelsForVectorAgent() map[string]string {
basicLabels := ctrl.matchLabelsForVectorAgent()
-
labels := k8s.MergeLabels(basicLabels, ctrl.Vector.Spec.Agent.Labels)
return labels
diff --git a/internal/vector/vectoragent/vectoragent_daemonset.go b/internal/vector/vectoragent/vectoragent_daemonset.go
index 9c36f1d3..33c318ea 100644
--- a/internal/vector/vectoragent/vectoragent_daemonset.go
+++ b/internal/vector/vectoragent/vectoragent_daemonset.go
@@ -20,6 +20,8 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/kaasops/vector-operator/internal/config"
)
func (ctrl *Controller) createVectorAgentDaemonSet() *appsv1.DaemonSet {
@@ -68,6 +70,15 @@ func (ctrl *Controller) createVectorAgentDaemonSet() *appsv1.DaemonSet {
func (ctrl *Controller) generateVectorAgentVolume() []corev1.Volume {
volume := ctrl.Vector.Spec.Agent.Volumes
+
+ // Merge user-defined volumes with required volumes.
+ // User-defined volumes take precedence over required volumes with the same name.
+ // Build a set of user-defined volume names to check for conflicts.
+ existingVolumes := make(map[string]bool, len(volume))
+ for _, v := range volume {
+ existingVolumes[v.Name] = true
+ }
+
configVolumeSource := corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: ctrl.getNameVectorAgent(),
@@ -77,9 +88,10 @@ func (ctrl *Controller) generateVectorAgentVolume() []corev1.Volume {
configVolumeSource = corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
}
-
}
- volume = append(volume, []corev1.Volume{
+
+ // Define required volumes for Vector agent
+ requiredVolumes := []corev1.Volume{
{
Name: "config",
VolumeSource: configVolumeSource,
@@ -108,9 +120,16 @@ func (ctrl *Controller) generateVectorAgentVolume() []corev1.Volume {
},
},
},
- }...)
+ }
- if ctrl.Vector.Spec.Agent.CompressConfigFile {
+ // Only add volumes that don't already exist
+ for _, reqVol := range requiredVolumes {
+ if !existingVolumes[reqVol.Name] {
+ volume = append(volume, reqVol)
+ }
+ }
+
+ if ctrl.Vector.Spec.Agent.CompressConfigFile && !existingVolumes["app-config-compress"] {
volume = append(volume, corev1.Volume{
Name: "app-config-compress",
VolumeSource: corev1.VolumeSource{
@@ -127,7 +146,16 @@ func (ctrl *Controller) generateVectorAgentVolume() []corev1.Volume {
func (ctrl *Controller) generateVectorAgentVolumeMounts() []corev1.VolumeMount {
volumeMount := ctrl.Vector.Spec.Agent.VolumeMounts
- volumeMount = append(volumeMount, []corev1.VolumeMount{
+ // Merge user-defined volumeMounts with required volumeMounts.
+ // User-defined volumeMounts take precedence over required volumeMounts with the same name.
+ // Build a set of user-defined volumeMount names to check for conflicts.
+ existingVolumeMounts := make(map[string]bool, len(volumeMount))
+ for _, vm := range volumeMount {
+ existingVolumeMounts[vm.Name] = true
+ }
+
+ // Define required volumeMounts for Vector agent
+ requiredVolumeMounts := []corev1.VolumeMount{
{
Name: "config",
MountPath: "/etc/vector",
@@ -144,15 +172,20 @@ func (ctrl *Controller) generateVectorAgentVolumeMounts() []corev1.VolumeMount {
Name: "sysfs",
MountPath: "/host/sys",
},
- }...)
+ }
- if ctrl.Vector.Spec.Agent.CompressConfigFile {
- volumeMount = append(volumeMount, []corev1.VolumeMount{
- {
- Name: "app-config-compress",
- MountPath: "/tmp/archive",
- },
- }...)
+ // Only add volumeMounts that don't already exist
+ for _, reqVm := range requiredVolumeMounts {
+ if !existingVolumeMounts[reqVm.Name] {
+ volumeMount = append(volumeMount, reqVm)
+ }
+ }
+
+ if ctrl.Vector.Spec.Agent.CompressConfigFile && !existingVolumeMounts["app-config-compress"] {
+ volumeMount = append(volumeMount, corev1.VolumeMount{
+ Name: "app-config-compress",
+ MountPath: "/tmp/archive",
+ })
}
return volumeMount
@@ -211,7 +244,7 @@ func (ctrl *Controller) VectorAgentContainer() *corev1.Container {
Ports: []corev1.ContainerPort{
{
Name: "prom-exporter",
- ContainerPort: 9598,
+ ContainerPort: config.DefaultInternalMetricsSinkPort,
Protocol: "TCP",
},
},
diff --git a/internal/vector/vectoragent/vectoragent_default.go b/internal/vector/vectoragent/vectoragent_default.go
index d537a0e0..bb0dd489 100644
--- a/internal/vector/vectoragent/vectoragent_default.go
+++ b/internal/vector/vectoragent/vectoragent_default.go
@@ -17,10 +17,11 @@ limitations under the License.
package vectoragent
import (
- "github.com/kaasops/vector-operator/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
resourcev1 "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/intstr"
+
+ "github.com/kaasops/vector-operator/api/v1alpha1"
)
func (ctrl *Controller) SetDefault() {
diff --git a/internal/vector/vectoragent/vectoragent_service.go b/internal/vector/vectoragent/vectoragent_service.go
index 0d18d526..4ba14672 100644
--- a/internal/vector/vectoragent/vectoragent_service.go
+++ b/internal/vector/vectoragent/vectoragent_service.go
@@ -17,9 +17,10 @@ limitations under the License.
package vectoragent
import (
- "github.com/kaasops/vector-operator/internal/config"
corev1 "k8s.io/api/core/v1"
+ "github.com/kaasops/vector-operator/internal/config"
+
"k8s.io/apimachinery/pkg/util/intstr"
)
diff --git a/scripts/kind-config-ci.yaml b/scripts/kind-config-ci.yaml
new file mode 100644
index 00000000..8bd95f36
--- /dev/null
+++ b/scripts/kind-config-ci.yaml
@@ -0,0 +1,18 @@
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+ - role: control-plane
+ kubeadmConfigPatches:
+ - |
+ kind: InitConfiguration
+ nodeRegistration:
+ kubeletExtraArgs:
+ node-labels: "ingress-ready=true"
+ # Allow scheduling workloads on control-plane for faster CI
+ extraPortMappings:
+ - containerPort: 80
+ hostPort: 80
+ protocol: TCP
+ - containerPort: 443
+ hostPort: 443
+ protocol: TCP
diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go
index ce823b49..3989de43 100644
--- a/test/e2e/e2e_suite_test.go
+++ b/test/e2e/e2e_suite_test.go
@@ -17,13 +17,358 @@ limitations under the License.
package e2e
import (
+ "context"
"fmt"
+ "os/exec"
+ "strings"
"testing"
+ "time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
+
+ "github.com/kaasops/vector-operator/test/e2e/framework"
+ "github.com/kaasops/vector-operator/test/e2e/framework/artifacts"
+ "github.com/kaasops/vector-operator/test/utils"
+)
+
+const (
+ operatorNamespace = "vector-operator-system"
+ operatorImage = "example.com/vector-operator:v0.0.1"
)
+// artifactCollector manages artifact collection for failed tests
+var artifactCollector artifacts.Collector
+
+// readinessTestNamespace is created during controller readiness check
+// and cleaned up in AfterSuite to avoid interfering with actual tests
+var readinessTestNamespace string
+
+// SynchronizedBeforeSuite ensures setup runs only once across all parallel processes
+var _ = SynchronizedBeforeSuite(func() []byte {
+ // This function runs ONLY on process #1
+ By("building and loading operator image")
+ cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", operatorImage))
+ _, err := utils.Run(cmd)
+ Expect(err).NotTo(HaveOccurred())
+
+ cmd = exec.Command("kind", "load", "docker-image", operatorImage, "--name", "kind")
+ _, err = utils.Run(cmd)
+ Expect(err).NotTo(HaveOccurred())
+
+ By("deploying operator via Helm")
+ cmd = exec.Command("make", "deploy-helm-e2e",
+ fmt.Sprintf("IMG=%s", operatorImage),
+ fmt.Sprintf("NAMESPACE=%s", operatorNamespace),
+ )
+ _, err = utils.Run(cmd)
+ Expect(err).NotTo(HaveOccurred())
+
+ By("verifying operator is ready")
+ // Wait a bit for controllers to start watching CRs
+ cmd = exec.Command("kubectl", "wait", "--for=condition=ready",
+ "--timeout=60s",
+ "pod", "-l", "app.kubernetes.io/name=vector-operator",
+ "-n", operatorNamespace,
+ )
+ _, err = utils.Run(cmd)
+ Expect(err).NotTo(HaveOccurred())
+
+ // Install shared dependencies once for all tests
+ framework.InstallSharedDependencies()
+
+ By("verifying controllers are ready to process resources")
+ // Pod being Ready doesn't guarantee controllers are initialized (leader election, cache sync, etc.)
+ // Create a test VectorAggregator and verify the controller creates its deployment
+ // This ensures the VectorAggregator controller is fully operational before tests start
+ verifyControllersReady()
+
+ // Initialize artifact collector
+ By("initializing artifact collector")
+ config := artifacts.LoadConfigFromEnv()
+ collector, err := artifacts.NewCollector(config)
+ Expect(err).NotTo(HaveOccurred())
+
+ runID := fmt.Sprintf("%d", time.Now().Unix())
+ err = collector.Initialize(runID)
+ Expect(err).NotTo(HaveOccurred())
+
+ artifactCollector = collector
+
+ return nil
+}, func(data []byte) {
+ // This function runs on ALL processes after process #1 completes
+
+ // Initialize artifact collector on all processes (skip if already initialized on process #1)
+ if artifactCollector == nil {
+ config := artifacts.LoadConfigFromEnv()
+ collector, err := artifacts.NewCollector(config)
+ if err == nil {
+ runID := fmt.Sprintf("%d", time.Now().Unix())
+ _ = collector.Initialize(runID)
+ artifactCollector = collector
+ }
+ }
+})
+
+// ReportAfterEach collects artifacts for failed tests
+var _ = ReportAfterEach(func(report SpecReport) {
+ // Skip if collector not initialized or artifacts disabled
+ if artifactCollector == nil {
+ return
+ }
+
+ // Try to get framework from report entries first (preferred method)
+ // This is more reliable and works correctly with parallel tests
+ f := framework.FromReportEntries(report.ReportEntries)
+
+ // Fallback to registry-based matching for backward compatibility
+ // This path will be deprecated once all tests use the new approach
+ if f == nil {
+ // Find framework for this test by matching namespace in ContainerHierarchyTexts
+ var matchedFrameworks []*framework.Framework
+ var matchScores []int
+
+ containerTexts := report.ContainerHierarchyTexts
+ fullTestPath := strings.Join(containerTexts, " ") + " " + report.LeafNodeText
+ fullTestPathLower := strings.ToLower(fullTestPath)
+
+ // Try to find framework by matching namespace patterns
+ // Priority: exact namespace match > timestamp suffix match > pattern match
+ framework.GetFrameworkRegistry().Range(func(key, value interface{}) bool {
+ namespace := key.(string)
+ fw := value.(*framework.Framework)
+
+ score := 0
+
+ // Remove timestamp suffix for pattern matching
+ // e.g., "test-dataflow-1763129228782243000" -> "test-dataflow"
+ baseNamespace := namespace
+ if idx := strings.LastIndex(namespace, "-"); idx > 0 {
+ possibleBase := namespace[:idx]
+ // Check if suffix is a timestamp (all digits)
+ suffix := namespace[idx+1:]
+ isTimestamp := true
+ for _, c := range suffix {
+ if c < '0' || c > '9' {
+ isTimestamp = false
+ break
+ }
+ }
+ if isTimestamp && len(suffix) > 10 { // timestamps are long
+ baseNamespace = possibleBase
+ }
+ }
+
+ // Extract pattern from namespace: "test-normal-mode" -> "normal mode" (with space)
+ // This matches Ginkgo's Describe("Normal Mode") to namespace test-normal-mode
+ namespacePattern := strings.TrimPrefix(baseNamespace, "test-")
+ namespacePattern = strings.ReplaceAll(namespacePattern, "-", " ") // "normal-mode" -> "normal mode"
+
+ // Scoring: higher score = better match
+ // Exact namespace match in text - highest priority (1000 points)
+ if strings.Contains(fullTestPathLower, strings.ToLower(namespace)) {
+ score += 1000
+ }
+
+ // Base namespace match (without timestamp) - very high priority (500 points)
+ if baseNamespace != namespace && strings.Contains(fullTestPathLower, strings.ToLower(baseNamespace)) {
+ score += 500
+ }
+
+ // Pattern match: "test-normal-mode" matches "Normal Mode" (50 points)
+ if strings.Contains(fullTestPathLower, strings.ToLower(namespacePattern)) {
+ score += 50
+ }
+
+ // Check if namespace words appear in test path (5 points per word, reduced to avoid false positives)
+ namespaceWords := strings.Fields(namespacePattern)
+ for _, word := range namespaceWords {
+ // Only count meaningful words (skip common words)
+ if len(word) > 3 && strings.Contains(fullTestPathLower, strings.ToLower(word)) {
+ score += 5
+ }
+ }
+
+ if score > 0 {
+ matchedFrameworks = append(matchedFrameworks, fw)
+ matchScores = append(matchScores, score)
+ }
+
+ return true // Continue searching all frameworks
+ })
+
+ // Select framework with highest match score
+ if len(matchedFrameworks) > 0 {
+ bestIdx := 0
+ bestScore := matchScores[0]
+ for i := 1; i < len(matchScores); i++ {
+ if matchScores[i] > bestScore {
+ bestScore = matchScores[i]
+ bestIdx = i
+ }
+ }
+ f = matchedFrameworks[bestIdx]
+
+ if report.Failed() {
+ fmt.Fprintf(GinkgoWriter, "🔍 Framework matched via registry (fallback) with score %d: namespace=%s\n",
+ bestScore, f.Namespace())
+ }
+ }
+ } else {
+ // Successfully retrieved from report entries (preferred path)
+ if report.Failed() {
+ fmt.Fprintf(GinkgoWriter, "✓ Framework retrieved from report entries: namespace=%s\n",
+ f.Namespace())
+ }
+ }
+
+ if f == nil {
+ // No framework found - can't collect artifacts
+ if report.Failed() {
+ fmt.Fprintf(GinkgoWriter, "⚠️ Cannot collect artifacts: no framework found for test\n")
+ }
+ return
+ }
+
+ // Log artifact collection for failed tests only
+ if report.Failed() {
+ fmt.Fprintf(GinkgoWriter, "📦 Collecting artifacts for FAILED test: %s (namespace: %s)\n",
+ report.LeafNodeText, f.Namespace())
+ }
+
+ // Build test info
+ testInfo := artifacts.TestInfo{
+ Name: report.FullText(),
+ Namespace: f.Namespace(),
+ Failed: report.Failed(),
+ FailureMessage: report.FailureMessage(),
+ Duration: report.RunTime,
+ StartTime: report.StartTime,
+ EndTime: report.EndTime,
+ Labels: report.Labels(),
+ KubectlClient: f.Kubectl(),
+ }
+
+ // Collect artifacts with timeout
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ if err := artifactCollector.CollectForTest(ctx, testInfo); err != nil {
+ fmt.Fprintf(GinkgoWriter, "⚠️ Failed to collect artifacts: %v\n", err)
+ } else {
+ if report.Failed() {
+ fmt.Fprintf(GinkgoWriter, "✓ Artifacts collected successfully\n")
+ }
+ }
+})
+
+// SynchronizedAfterSuite ensures cleanup runs only once across all parallel processes
+var _ = SynchronizedAfterSuite(func() {
+ // This function runs on ALL processes
+ // Nothing needed here
+}, func() {
+ // This function runs ONLY on process #1 after all others finish
+
+ // Close artifact collector
+ if artifactCollector != nil {
+ if err := artifactCollector.Close(); err != nil {
+ fmt.Fprintf(GinkgoWriter, "⚠️ Failed to close artifact collector: %v\n", err)
+ }
+ }
+
+ // Clean up readiness test namespace if it was created
+ // We defer this cleanup until after all tests to avoid controller overload
+ // during test execution (namespace deletion triggers cascading reconciliations)
+ if readinessTestNamespace != "" {
+ By("cleaning up controller readiness test namespace")
+ cmd := exec.Command("kubectl", "delete", "namespace", readinessTestNamespace, "--timeout=30s")
+ if _, err := utils.Run(cmd); err != nil {
+ fmt.Fprintf(GinkgoWriter, "⚠️ Failed to delete readiness test namespace %s: %v\n", readinessTestNamespace, err)
+ }
+ }
+
+ // Uninstall shared dependencies
+ framework.UninstallSharedDependencies()
+
+ By("undeploying operator via Helm")
+ cmd := exec.Command("make", "undeploy-helm-e2e",
+ fmt.Sprintf("NAMESPACE=%s", operatorNamespace),
+ )
+ _, _ = utils.Run(cmd)
+})
+
+// verifyControllersReady ensures controllers are fully initialized by creating a test resource
+// and verifying the controller processes it. This prevents flaky tests caused by controllers
+// still initializing (leader election, cache sync, informers) when pod becomes Ready.
+func verifyControllersReady() {
+ const (
+ testNamespace = "controller-readiness-test"
+ testAggregator = "readiness-test-aggregator"
+ readinessTimeout = 60 * time.Second
+ pollInterval = 2 * time.Second
+ )
+
+ // Store namespace name for cleanup in AfterSuite
+ // We don't clean up immediately to avoid controller overload during test execution
+ // (namespace deletion triggers cascading reconciliations that can interfere with tests)
+ readinessTestNamespace = testNamespace
+
+ // Create temporary namespace for readiness test (idempotent)
+ // First delete if exists and wait for full deletion
+ deleteCmd := exec.Command("kubectl", "delete", "namespace", testNamespace, "--ignore-not-found=true", "--wait=true", "--timeout=30s")
+ _ = deleteCmd.Run() // Ignore errors, namespace might not exist
+
+ // Now create the namespace
+ cmd := exec.Command("kubectl", "create", "namespace", testNamespace)
+ if _, err := utils.Run(cmd); err != nil {
+ Fail(fmt.Sprintf("Failed to create readiness test namespace: %v", err))
+ }
+
+ // Create a minimal VectorAggregator CR
+ aggregatorYAML := fmt.Sprintf(`apiVersion: observability.kaasops.io/v1alpha1
+kind: VectorAggregator
+metadata:
+ name: %s
+ namespace: %s
+spec:
+ selector: {}
+ replicas: 1
+ image: timberio/vector:0.40.0-alpine
+`, testAggregator, testNamespace)
+
+ cmd = exec.Command("kubectl", "apply", "-f", "-")
+ cmd.Stdin = strings.NewReader(aggregatorYAML)
+ if _, err := utils.Run(cmd); err != nil {
+ Fail(fmt.Sprintf("Failed to create test VectorAggregator: %v", err))
+ }
+
+ // Wait for controller to create the deployment
+ deploymentName := testAggregator + "-aggregator"
+ startTime := time.Now()
+
+ Eventually(func() error {
+ cmd := exec.Command("kubectl", "get", "deployment", deploymentName,
+ "-n", testNamespace, "-o", "name")
+ output, err := utils.Run(cmd)
+ if err != nil {
+ return fmt.Errorf("deployment not found: %w", err)
+ }
+ if !strings.Contains(string(output), "deployment") {
+ return fmt.Errorf("deployment not found")
+ }
+ return nil
+ }, readinessTimeout, pollInterval).Should(Succeed(),
+ "VectorAggregator controller should create deployment %s in namespace %s within %v. "+
+ "This indicates controller is not ready. Pod may be Ready but controllers are still initializing "+
+ "(leader election, cache sync, webhook registration, informers startup).",
+ deploymentName, testNamespace, readinessTimeout)
+
+ elapsed := time.Since(startTime)
+ fmt.Fprintf(GinkgoWriter, "✓ Controllers ready in %.2fs (deployment %s created)\n",
+ elapsed.Seconds(), deploymentName)
+}
+
// Run e2e tests using the Ginkgo runner.
func TestE2E(t *testing.T) {
RegisterFailHandler(Fail)
diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go
index 901f0ade..9dc7e4ea 100644
--- a/test/e2e/e2e_test.go
+++ b/test/e2e/e2e_test.go
@@ -30,63 +30,29 @@ import (
const namespace = "vector-operator-system"
var _ = Describe("controller", Ordered, func() {
- BeforeAll(func() {
- By("installing prometheus operator")
- Expect(utils.InstallPrometheusOperator()).To(Succeed())
-
- By("installing the cert-manager")
- Expect(utils.InstallCertManager()).To(Succeed())
+ // NOTE: Dependencies (Prometheus Operator, cert-manager) are installed once
+ // in BeforeSuite via framework.InstallSharedDependencies() and shared across all tests.
+ // No need for BeforeAll/AfterAll here.
- By("creating manager namespace")
+ BeforeAll(func() {
+ By("creating manager namespace (if not exists)")
cmd := exec.Command("kubectl", "create", "ns", namespace)
- _, _ = utils.Run(cmd)
- })
-
- AfterAll(func() {
- By("uninstalling the Prometheus manager bundle")
- utils.UninstallPrometheusOperator()
-
- By("uninstalling the cert-manager bundle")
- utils.UninstallCertManager()
-
- By("removing manager namespace")
- cmd := exec.Command("kubectl", "delete", "ns", namespace)
- _, _ = utils.Run(cmd)
+ _, _ = utils.Run(cmd) // Ignore error if already exists
})
Context("Operator", func() {
It("should run successfully", func() {
var controllerPodName string
- var err error
-
- // projectimage stores the name of the image used in the example
- var projectimage = "example.com/vector-operator:v0.0.1"
-
- By("building the manager(Operator) image")
- cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectimage))
- _, err = utils.Run(cmd)
- ExpectWithOffset(1, err).NotTo(HaveOccurred())
- By("loading the the manager(Operator) image on Kind")
- err = utils.LoadImageToKindClusterWithName(projectimage)
- ExpectWithOffset(1, err).NotTo(HaveOccurred())
-
- By("installing CRDs")
- cmd = exec.Command("make", "install")
- _, err = utils.Run(cmd)
- ExpectWithOffset(1, err).NotTo(HaveOccurred())
-
- By("deploying the controller-manager")
- cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectimage))
- _, err = utils.Run(cmd)
- ExpectWithOffset(1, err).NotTo(HaveOccurred())
+ // NOTE: Operator is deployed once in BeforeSuite (e2e_suite_test.go) via Helm
+ // This test verifies that the already-deployed operator is running correctly
By("validating that the controller-manager pod is running as expected")
verifyControllerUp := func() error {
- // Get pod name
+ // Get pod name (Helm deployment uses different labels)
- cmd = exec.Command("kubectl", "get",
- "pods", "-l", "control-plane=controller-manager",
+ cmd := exec.Command("kubectl", "get",
+ "pods", "-l", "app.kubernetes.io/name=vector-operator",
"-o", "go-template={{ range .items }}"+
"{{ if not .metadata.deletionTimestamp }}"+
"{{ .metadata.name }}"+
@@ -101,7 +67,7 @@ var _ = Describe("controller", Ordered, func() {
return fmt.Errorf("expect 1 controller pods running, but got %d", len(podNames))
}
controllerPodName = podNames[0]
- ExpectWithOffset(2, controllerPodName).Should(ContainSubstring("controller-manager"))
+ ExpectWithOffset(2, controllerPodName).Should(ContainSubstring("vector-operator"))
// Validate pod status
cmd = exec.Command("kubectl", "get",
diff --git a/test/e2e/framework/README.md b/test/e2e/framework/README.md
new file mode 100644
index 00000000..b40a9aaf
--- /dev/null
+++ b/test/e2e/framework/README.md
@@ -0,0 +1,612 @@
+# E2E Test Framework
+
+A comprehensive testing framework for Vector Operator e2e tests, built on top of Ginkgo/Gomega.
+
+## Overview
+
+This framework provides a high-level API for writing maintainable and readable e2e tests. It handles common operations like namespace management, resource deployment, status checking, and cleanup, while providing custom matchers for intuitive assertions.
+
+## Key Features
+
+- **High-level API** - Simple methods for common operations
+- **Automatic namespace management** - Creates and cleans up test namespaces
+- **Shared dependencies** - Install Prometheus Operator and cert-manager once for all tests
+- **Custom Gomega matchers** - Readable DSL-style assertions
+- **Test metrics tracking** - Automatic timing measurements
+- **YAML templating** - Dynamic test data generation
+- **Centralized timeouts** - Consistent timeout configuration
+
+## Quick Start
+
+### Basic Test Structure
+
+```go
+package e2e
+
+import (
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "github.com/kaasops/vector-operator/test/e2e/framework"
+ "github.com/kaasops/vector-operator/test/e2e/framework/assertions"
+ "github.com/kaasops/vector-operator/test/e2e/framework/config"
+)
+
+var _ = Describe("My Feature", Label(config.LabelSmoke, config.LabelFast), Ordered, func() {
+ f := framework.NewFramework("my-feature-test")
+
+ BeforeAll(f.Setup)
+ AfterAll(f.Teardown)
+
+ Context("Basic Functionality", func() {
+ It("should work correctly", func() {
+ // Deploy resources
+ f.ApplyTestData("normal-mode/agent.yaml")
+ f.ApplyTestData("normal-mode/pipeline-basic.yaml")
+
+ // Wait for readiness
+ f.WaitForPipelineValid("basic-pipeline")
+
+ // Assert using custom matchers
+ Eventually(f.Pipeline("basic-pipeline")).Should(assertions.BeValid())
+ })
+ })
+})
+```
+
+## Core Components
+
+### 1. Framework Object
+
+The main entry point for all test operations.
+
+```go
+// Create a new framework instance
+f := framework.NewFramework("test-namespace-prefix")
+
+// Setup creates namespace, initializes metrics, and registers framework
+// for artifact collection via Ginkgo report entries
+f.Setup()
+
+// Teardown cleans up namespace and resources
+f.Teardown()
+```
+
+**Framework Registration**
+
+The framework uses Ginkgo's report entry system for context propagation instead of global state:
+
+```go
+// In your test:
+f := framework.NewFramework("test-ns")
+f.Setup() // Automatically stores framework in Ginkgo report entries
+
+// In ReportAfterEach (for artifact collection):
+// Framework is automatically retrieved from report entries
+f := framework.FromReportEntries(report.ReportEntries)
+if f != nil {
+ // Collect artifacts using framework's kubectl client and namespace
+}
+```
+
+**Benefits:**
+- ✅ No global state - eliminates race conditions in parallel tests
+- ✅ Direct association between test and framework
+- ✅ Works correctly with Ginkgo's parallel execution
+- ✅ Backward compatible - still supports legacy registry-based matching as fallback
+
+**Context Support (Advanced)**
+
+For advanced use cases, the framework can be stored in Go contexts:
+
+```go
+// Store in context
+ctx := f.ToContext(context.Background())
+
+// Retrieve from context
+f := framework.FromContext(ctx)
+if f != nil {
+ // Use framework
+}
+```
+
+### 2. Resource Management
+
+#### Apply Test Data
+
+```go
+// Load and apply YAML from test/e2e/testdata/
+f.ApplyTestData("normal-mode/agent.yaml")
+f.ApplyTestData("normal-mode/pipeline-basic.yaml")
+```
+
+#### Create Multiple Resources
+
+```go
+// Create 100 pipelines from template
+creationTime := f.CreateMultiplePipelinesFromTemplate(
+ "scalability/pipeline-template.yaml",
+ "pipeline-NNNN", // Placeholder to replace
+ 100, // Count
+)
+fmt.Printf("Created 100 pipelines in %v\n", creationTime)
+```
+
+### 3. Wait Operations
+
+```go
+// Wait for deployment to be ready (uses config.DeploymentReadyTimeout)
+f.WaitForDeploymentReady("aggregator-name")
+
+// Wait for pipeline to become valid (uses config.PipelineValidTimeout)
+f.WaitForPipelineValid("pipeline-name")
+```
+
+### 3.1. Log Polling Methods
+
+Standardized methods for waiting on log content, eliminating boilerplate Eventually blocks:
+
+```go
+// Wait for substring to appear in pod logs
+err := f.WaitForLogsContaining("pod-name", "expected text", 2*time.Minute)
+Expect(err).NotTo(HaveOccurred())
+
+// Wait for regex pattern to match in pod logs
+err := f.WaitForLogsMatching("pod-name", `\d+ requests processed`, 1*time.Minute)
+Expect(err).NotTo(HaveOccurred())
+
+// Verify substring does NOT appear in logs (negative assertion)
+err := f.AssertNoLogsContaining("pod-name", "ERROR", 30*time.Second)
+Expect(err).NotTo(HaveOccurred())
+
+// Get logs with options
+logs, err := f.GetPodLogsWithOptions("pod-name", framework.LogOptions{
+ TailLines: 100,
+})
+Expect(err).NotTo(HaveOccurred())
+```
+
+**Before (verbose):**
+```go
+var logs string
+Eventually(func() bool {
+ l, err := f.GetPodLogs("pod-name")
+ if err != nil {
+ return false
+ }
+ logs = l
+ return strings.Contains(logs, "expected text")
+}, 2*time.Minute, 1*time.Second).Should(BeTrue())
+```
+
+**After (concise):**
+```go
+err := f.WaitForLogsContaining("pod-name", "expected text", 2*time.Minute)
+Expect(err).NotTo(HaveOccurred())
+```
+
+### 4. Status Queries
+
+```go
+// Get pipeline status field
+role := f.GetPipelineStatus("my-pipeline", "role")
+
+// Count valid pipelines
+validCount, err := f.CountValidPipelines()
+
+// Count services with label
+serviceCount := f.CountServicesWithLabel("app.kubernetes.io/component=Aggregator")
+```
+
+### 5. Custom Matchers
+
+The framework provides custom Gomega matchers for readable assertions:
+
+#### Pipeline Matchers
+
+```go
+// Check if pipeline is valid
+Eventually(f.Pipeline("test-pipeline")).Should(assertions.BeValid())
+Eventually(f.Pipeline("test-pipeline")).Should(assertions.BeInvalid())
+
+// Check role
+Expect(f.Pipeline("test-pipeline")).To(assertions.HaveRole("agent"))
+Expect(f.Pipeline("test-pipeline")).To(assertions.HaveRole("aggregator"))
+
+// Check error message contains substring
+Expect(f.Pipeline("invalid-pipeline")).To(assertions.HaveErrorContaining("validation"))
+```
+
+#### Service Matchers
+
+```go
+// Check if service exists
+Eventually(f.Service("my-service")).Should(assertions.Exist())
+
+// Check service port
+Expect(f.Service("my-service")).To(assertions.HavePort("9090"))
+```
+
+## Shared Dependencies
+
+Shared dependencies (Prometheus Operator, cert-manager) are installed once in `BeforeSuite` and shared across all tests.
+
+### Installation
+
+Handled automatically in `test/e2e/e2e_suite_test.go`:
+
+```go
+var _ = BeforeSuite(func() {
+ // ... operator deployment
+
+ // Install shared dependencies once
+ framework.InstallSharedDependencies()
+})
+```
+
+### Benefits
+
+- **Faster test execution** - ~3 minutes saved per test run
+- **More stable** - Avoid repeated install/uninstall cycles
+- **Cleaner logs** - No AlreadyExists errors
+
+### Usage in Tests
+
+Tests automatically use shared dependencies:
+
+```go
+// No need to install/uninstall in individual tests
+var _ = Describe("My Test", func() {
+ f := framework.NewFramework("test-ns")
+
+ BeforeAll(f.Setup) // Just creates namespace
+ AfterAll(f.Teardown) // Just cleans up namespace
+
+ // Dependencies are already available
+})
+```
+
+## Test Labels
+
+Ginkgo v2 provides a powerful label system for categorizing and filtering tests. Labels are simply strings that can be attached to test specs.
+
+### Standard Labels (defined in `config/constants.go`)
+
+```go
+Label(config.LabelSmoke) // Quick smoke tests
+Label(config.LabelFast) // Fast tests (<2 min)
+Label(config.LabelSlow) // Slow tests (>5 min)
+Label(config.LabelStress) // Stress/load tests
+Label(config.LabelRegression) // Regression tests
+```
+
+### Priority Labels
+
+```go
+Label(config.LabelP0) // P0: Critical, must always pass
+Label(config.LabelP1) // P1: High priority
+Label(config.LabelP2) // P2: Medium priority
+
+// Example usage:
+var _ = Describe("Source Type Constraints [P0-Security]",
+ Label(config.LabelConstraint, config.LabelP0, config.LabelSecurity, config.LabelFast), func() {
+ // ...
+})
+```
+
+### Category Labels
+
+```go
+Label(config.LabelSecurity) // Security-related tests
+Label(config.LabelConstraint) // Constraint validation tests
+```
+
+### Combined Labels
+
+```go
+// Multiple labels for fine-grained filtering
+Label(config.LabelSmoke, config.LabelFast) // Quick smoke test
+Label(config.LabelP0, config.LabelSecurity, config.LabelFast) // Critical security test
+Label(config.LabelStress, config.LabelSlow) // Long-running load test
+```
+
+### Filtering Tests
+
+Run specific test categories:
+
+```bash
+# Run only smoke tests
+ginkgo --label-filter=smoke ./test/e2e/
+
+# Run fast tests
+ginkgo --label-filter=fast ./test/e2e/
+
+# Exclude slow tests
+ginkgo --label-filter="!slow" ./test/e2e/
+
+# Run critical security tests
+ginkgo --label-filter="p0 && security" ./test/e2e/
+
+# Run smoke tests but exclude slow ones
+ginkgo --label-filter="smoke && !slow" ./test/e2e/
+
+# Run either constraint or security tests
+ginkgo --label-filter="constraint || security" ./test/e2e/
+```
+
+### Best Practices
+
+1. **Use descriptive labels**: Labels should clearly indicate what they categorize
+2. **Combine standard + custom labels**: Mix project-standard labels with feature-specific ones
+3. **Document critical labels**: If using priority labels (P0, P1), document their meaning
+4. **Keep labels in test names**: Add labels to Describe text for better readability (e.g., `[P0-Security]`)
+
+### Available Labels
+
+List all labels in the test suite:
+```bash
+ginkgo labels ./test/e2e/
+```
+
+## Test Metrics
+
+The framework automatically tracks test operation timing:
+
+```go
+// Metrics are collected automatically
+f.Setup() // Tracks setup time
+f.WaitForDeploymentReady(...) // Tracks deployment wait time
+f.WaitForPipelineValid(...) // Tracks pipeline validation time
+f.Teardown() // Tracks cleanup time
+
+// Metrics are printed after each test
+// Example output:
+// 📊 Test Metrics:
+// Setup: 60.777ms
+// Deployment Wait: 4.299s
+// Pipeline Validation: 5.098s
+// Cleanup: 11.034s
+// Total: 20.472s
+```
+
+## Environment Variables
+
+The framework supports several environment variables for customization:
+
+### E2E_TESTDATA_PATH
+
+Customize the location of test data files. Defaults to `test/e2e/testdata`.
+
+```bash
+# Use custom test data directory
+E2E_TESTDATA_PATH=/path/to/testdata make test-e2e
+
+# Run tests with test data in a different location
+E2E_TESTDATA_PATH=/tmp/my-testdata ginkgo test/e2e/
+```
+
+**Use cases:**
+- Testing with different data sets
+- CI/CD pipelines with mounted test data
+- Temporary test data generation
+- Isolated test environments
+
+### E2E_DRY_RUN
+
+Run tests in dry-run mode to generate test plans without executing them.
+
+```bash
+E2E_DRY_RUN=true make test-e2e
+```
+
+### E2E_RECORD_STEPS
+
+Record test steps for debugging and reproducibility.
+
+```bash
+E2E_RECORD_STEPS=true make test-e2e
+```
+
+## Timeouts Configuration
+
+Centralized timeout configuration in `config/timeouts.go`:
+
+```go
+const (
+ DeploymentCreateTimeout = 90 * time.Second // Wait for deployment to be created
+ DeploymentReadyTimeout = 120 * time.Second // Wait for deployment to be ready
+ PipelineValidTimeout = 2 * time.Minute // Wait for pipeline validation
+ ServiceCreateTimeout = 2 * time.Minute // Wait for service creation
+ DefaultPollInterval = 2 * time.Second // Default polling interval
+ SlowPollInterval = 5 * time.Second // Slower polling for heavy ops
+)
+```
+
+## Advanced Examples
+
+### Example 1: Basic Pipeline Test
+
+```go
+It("should create and validate a basic pipeline with agent", func() {
+ // Deploy resources
+ f.ApplyTestData("normal-mode/agent.yaml")
+ f.ApplyTestData("normal-mode/pipeline-basic.yaml")
+
+ // Wait for readiness
+ f.WaitForPipelineValid("basic-pipeline")
+
+ // Verify pipeline configuration
+ Eventually(f.Pipeline("basic-pipeline")).Should(assertions.BeValid())
+ Expect(f.Pipeline("basic-pipeline")).To(assertions.HaveRole("agent"))
+
+ // Verify agent processes the pipeline
+ Eventually(func() error {
+ return f.VerifyAgentHasPipeline("normal-agent", "basic-pipeline")
+ }, config.ServiceCreateTimeout, config.DefaultPollInterval).Should(Succeed())
+})
+```
+
+### Example 2: Aggregator Test
+
+```go
+It("should deploy aggregator and process pipelines", func() {
+ // Deploy aggregator
+ f.ApplyTestData("normal-mode/aggregator.yaml")
+ f.WaitForDeploymentReady("my-aggregator-aggregator")
+
+ // Create pipeline with aggregator role
+ f.ApplyTestData("normal-mode/pipeline-aggregator-role.yaml")
+ f.WaitForPipelineValid("aggregator-pipeline")
+
+ // Verify role
+ Expect(f.Pipeline("aggregator-pipeline")).To(assertions.HaveRole("aggregator"))
+})
+```
+
+### Example 3: Scalability Test
+
+```go
+It("should handle 100 pipelines successfully", func() {
+ const pipelineCount = 100
+
+ // Deploy aggregator
+ f.ApplyTestData("scalability/aggregator.yaml")
+ f.WaitForDeploymentReady("scale-aggregator-aggregator")
+
+ // Create 100 pipelines from template
+ creationTime := f.CreateMultiplePipelinesFromTemplate(
+ "scalability/pipeline-template.yaml",
+ "pipeline-NNNN",
+ pipelineCount,
+ )
+ GinkgoWriter.Printf("✨ Created %d pipelines in %v\n", pipelineCount, creationTime)
+
+ // Wait for all to become valid (with progress logging)
+ Eventually(func() (int, error) {
+ validCount, err := f.CountValidPipelines()
+ if validCount > 0 {
+ GinkgoWriter.Printf("📊 Validation progress: %d/%d pipelines valid (%.0f%%)\n",
+ validCount, pipelineCount, float64(validCount)/float64(pipelineCount)*100)
+ }
+ return validCount, nil
+ }, 7*time.Minute, 10*time.Second).Should(Equal(pipelineCount))
+})
+```
+
+## Best Practices
+
+### 1. Use Descriptive Test Names
+
+```go
+// Good
+It("should create and validate a basic pipeline with agent", func() { ... })
+
+// Bad
+It("test1", func() { ... })
+```
+
+### 2. Use Eventually for Async Operations
+
+```go
+// Good - waits for condition to be met
+Eventually(f.Pipeline("test-pipeline")).Should(assertions.BeValid())
+
+// Bad - may fail if not ready immediately
+Expect(f.Pipeline("test-pipeline")).To(assertions.BeValid())
+```
+
+### 3. Use Appropriate Labels
+
+```go
+// Mark fast smoke tests
+var _ = Describe("Quick Validation", Label(config.LabelSmoke, config.LabelFast), ...)
+
+// Mark slow stress tests
+var _ = Describe("Load Test", Label(config.LabelStress, config.LabelSlow), ...)
+```
+
+### 4. Leverage Test Metrics
+
+```go
+// Metrics are automatically tracked and displayed
+BeforeAll(f.Setup) // Tracks setup time
+AfterAll(f.Teardown) // Tracks cleanup time + displays all metrics
+```
+
+### 5. Use Custom Matchers
+
+```go
+// Good - readable and clear intent
+Expect(f.Pipeline("test")).To(assertions.BeValid())
+Expect(f.Pipeline("test")).To(assertions.HaveRole("agent"))
+
+// Bad - verbose and less clear
+status := f.GetPipelineStatus("test", "configCheckResult")
+Expect(status).To(Equal("true"))
+role := f.GetPipelineStatus("test", "role")
+Expect(role).To(Equal("agent"))
+```
+
+## Directory Structure
+
+```
+test/e2e/framework/
+├── README.md # This file
+├── framework.go # Main framework implementation
+├── lifecycle.go # Shared dependencies management
+├── resources.go # Resource utilities
+├── config/
+│ ├── constants.go # Test labels and constants
+│ └── timeouts.go # Timeout configuration
+├── kubectl/
+│ ├── client.go # Kubectl wrapper
+│ ├── wait.go # Wait utilities
+│ └── validation.go # Validation helpers
+├── assertions/
+│ └── matchers.go # Custom Gomega matchers
+├── artifacts/
+│ ├── collector.go # Artifact collection
+│ ├── storage.go # Artifact storage
+│ └── config.go # Artifact configuration
+├── errors/
+│ └── errors.go # Custom error types
+└── recorder/
+ └── recorder.go # Step recorder
+```
+
+## Contributing
+
+When adding new features to the framework:
+
+1. Keep the API simple and intuitive
+2. Add appropriate error handling
+3. Track timing metrics for new operations
+4. Add custom matchers for common assertions
+5. Update this README with examples
+
+## Troubleshooting
+
+### AlreadyExists Errors
+
+If you see `AlreadyExists` errors for Prometheus Operator or cert-manager:
+- Ensure you're not installing dependencies in `BeforeAll`
+- Dependencies are automatically installed in `BeforeSuite` via `framework.InstallSharedDependencies()`
+
+### Timeout Errors
+
+If tests timeout:
+- Check `config/timeouts.go` and adjust as needed
+- Use `SlowPollInterval` for expensive operations
+- Consider increasing go test timeout: `-timeout=15m`
+
+### Namespace Not Found
+
+If you see namespace errors:
+- Ensure `BeforeAll(f.Setup)` is called
+- Verify namespace name matches test data YAML files
+
+## References
+
+- [Ginkgo Documentation](https://onsi.github.io/ginkgo/)
+- [Gomega Matchers](https://onsi.github.io/gomega/)
+- [Vector Operator E2E Tests](../README.md)
diff --git a/test/e2e/framework/artifacts/README.md b/test/e2e/framework/artifacts/README.md
new file mode 100644
index 00000000..97b774d5
--- /dev/null
+++ b/test/e2e/framework/artifacts/README.md
@@ -0,0 +1,178 @@
+# E2E Test Artifact Collection
+
+Automatic collection of debugging artifacts when e2e tests fail.
+
+## Features
+
+- **Automatic**: Collects artifacts only on test failures (configurable)
+- **Safe**: Never fails tests due to collection errors
+- **Fast**: < 1s overhead for passing tests, < 30s for failing tests
+- **Comprehensive**: Logs, pod status, events, resources
+
+## Configuration (ENV Variables)
+
+All configuration is ENV-based with sensible defaults:
+
+### Collection Control
+- `E2E_ARTIFACTS_ENABLED` (default: `true`) - Master switch
+- `E2E_ARTIFACTS_ON_FAILURE_ONLY` (default: `true`) - Only collect on failures
+- `E2E_ARTIFACTS_MINIMAL_ONLY` (default: `false`) - P0 artifacts only
+
+### Storage
+- `E2E_ARTIFACTS_DIR` (default: `test/e2e/artifacts`) - Base directory
+
+### Size Limits
+- `E2E_ARTIFACTS_MAX_LOG_LINES` (default: `500`) - Max log lines per pod
+- `E2E_ARTIFACTS_MAX_RESOURCE_SIZE` (default: `10485760`) - Max 10MB per file
+- `E2E_ARTIFACTS_MAX_TOTAL_SIZE` (default: `104857600`) - Max 100MB per test
+
+### Timeouts
+- `E2E_ARTIFACTS_TIMEOUT` (default: `30s`) - Max collection time
+
+## Usage
+
+### Running Tests with Artifacts
+
+```bash
+# Default behavior (enabled, on-failure-only)
+make test-e2e
+
+# Disable artifacts
+E2E_ARTIFACTS_ENABLED=false make test-e2e
+
+# Collect for all tests (even passing)
+E2E_ARTIFACTS_ON_FAILURE_ONLY=false make test-e2e
+
+# Increase log lines
+E2E_ARTIFACTS_MAX_LOG_LINES=1000 make test-e2e
+```
+
+### Artifact Location
+
+Artifacts are stored in:
+```
+test/e2e/artifacts/
+└── run-{timestamp}/
+ ├── metadata.json
+ └── {test-name}/
+ ├── metadata.json
+ ├── logs/
+ │ ├── operator-controller.log
+ │ └── pod-{name}.log
+ ├── pods/
+ │ └── {pod-name}-status.json
+ ├── resources/
+ │ ├── vectorpipeline-{name}-status.json
+ │ └── deployment-{name}.yaml
+ └── events/
+ └── namespace-events.txt
+```
+
+### Unified Test Results
+
+When you run `make test-e2e`, all results are automatically saved in a unified structure with reports and artifacts correlated by timestamp:
+
+```bash
+# Run tests - results automatically saved with timestamp
+make test-e2e
+
+# Results structure:
+test/e2e/results/run-{timestamp}/
+├── reports/
+│ ├── junit-report.xml # JUnit XML for CI integration
+│ ├── report.json # Ginkgo JSON report
+│ └── test-output.log # Full test output logs
+└── artifacts/ # Debug artifacts (only for failed tests)
+ ├── metadata.json # Run-level metadata
+ └── {test-name}/ # Per-test artifacts
+ ├── metadata.json
+ ├── logs/
+ ├── pods/
+ ├── resources/
+ └── events/
+```
+
+**Benefits**:
+- Single runID correlates all reports and artifacts
+- Easy to navigate - everything in one directory
+- CI/CD friendly - upload one directory
+- Helpful output with quick analysis commands
+
+### CI Integration (GitHub Actions)
+
+```yaml
+- name: Run E2E Tests
+ run: make test-e2e
+
+- name: Upload Test Results
+ if: always() # Upload even if tests fail
+ uses: actions/upload-artifact@v4
+ with:
+ name: e2e-results-${{ github.run_number }}
+ path: test/e2e/results/
+ retention-days: 30
+```
+
+## Collected Artifacts (P0 - MVP)
+
+### Critical for Debugging
+1. **Pod Status JSON** - Conditions, restarts, phase
+2. **Operator Controller Logs** - Time-filtered logs (test duration + 1min buffer)
+3. **VectorPipeline CR Status** - Validation results
+4. **Namespace Events** - What happened in test namespace
+5. **Resource Metadata** - Deployments, DaemonSets, Services
+
+### Future (Phase 2)
+- Full pod logs (all containers)
+- Full pod descriptions
+- Vector agent/aggregator logs
+- ConfigCheck pod logs
+- Timeline reconstruction
+
+## Architecture
+
+- **Thread-safe**: Uses `sync.Map` for parallel test support
+- **Graceful degradation**: Collection errors don't fail tests
+- **Size limits**: Prevents CI artifact bloat
+- **Atomic writes**: Temp file + rename for reliability
+
+## Performance
+
+- **Passing tests**: < 1s overhead (if `ON_FAILURE_ONLY=true`)
+- **Failing tests**: < 30s collection time
+- **Storage**: < 100MB per test, < 500MB per run
+
+## Troubleshooting
+
+### No artifacts collected
+1. Check `E2E_ARTIFACTS_ENABLED=true`
+2. Verify test is using `framework.NewFramework()` or `framework.Shared()`
+3. Check GinkgoWriter output for warning messages
+
+### Artifacts too large
+1. Reduce `E2E_ARTIFACTS_MAX_LOG_LINES` (default: 500)
+2. Enable `E2E_ARTIFACTS_MINIMAL_ONLY=true`
+3. Check individual file sizes with `E2E_ARTIFACTS_MAX_RESOURCE_SIZE`
+
+### Collection timeout
+1. Increase `E2E_ARTIFACTS_TIMEOUT` (default: 30s)
+2. Check kubectl connectivity
+3. Review namespace resource count
+
+## Important Bug Fixes
+
+### Time-based Log Collection (Fixed)
+**Problem**: Previously, operator logs were collected using `kubectl logs --tail 500`, which retrieved the last 500 lines from the entire pod lifetime. In long-running test suites (e.g., full e2e runs lasting 15+ minutes), the operator pod could generate thousands of log lines, causing the last 500 lines to exclude logs from earlier failing tests.
+
+**Example**: A test failing at 18:05-18:07 would collect operator logs from 16:02-16:03 (the pod's startup logs), completely missing the relevant reconciliation attempts.
+
+**Solution**: Implemented time-based log collection using `kubectl logs --since-time` with the test's start time (+ 1 minute buffer). This ensures operator logs are collected only for the relevant time period, regardless of how long the pod has been running.
+
+**Impact**:
+- Fixes flaky test debugging where operator logs were missing
+- Enables reliable root cause analysis for race conditions
+- Reduces confusion when logs don't match test timeline
+
+## Development
+
+See architect design document for Phase 2+ enhancements.
diff --git a/test/e2e/framework/artifacts/collector.go b/test/e2e/framework/artifacts/collector.go
new file mode 100644
index 00000000..0be6e612
--- /dev/null
+++ b/test/e2e/framework/artifacts/collector.go
@@ -0,0 +1,621 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package artifacts
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2" //nolint:golint,revive
+
+ "github.com/kaasops/vector-operator/test/e2e/framework/kubectl"
+)
+
+// TestInfo contains information about a test execution
+type TestInfo struct {
+ Name string
+ Namespace string
+ Failed bool
+ FailureMessage string
+ Duration time.Duration
+ StartTime time.Time
+ EndTime time.Time
+ Labels []string
+
+ // Test sequence tracking (for degradation analysis)
+ SequenceNumber int // Which test in the run (1, 2, 3...)
+ OperatorAge time.Duration // How long operator has been running
+
+ // Kubernetes context
+ KubectlClient *kubectl.Client
+}
+
+// Collector manages artifact collection for e2e tests
+type Collector interface {
+ // Initialize sets up the collector for a test run
+ Initialize(runID string) error
+
+ // CollectForTest collects artifacts for a test
+ CollectForTest(ctx context.Context, testInfo TestInfo) error
+
+ // Close finalizes the collector and writes summary
+ Close() error
+}
+
+// collector implements the Collector interface
+type collector struct {
+ config Config
+ storage *Storage
+ metadata *MetadataBuilder
+ runStart time.Time
+
+ // Operator tracking (for degradation analysis)
+ operatorStartTime time.Time
+
+ // Statistics
+ totalTests int
+ failedTests int
+ testCounter int // Counter for directory naming
+}
+
+// NewCollector creates a new artifact collector
+func NewCollector(config Config) (Collector, error) {
+ return &collector{
+ config: config,
+ runStart: time.Now(),
+ }, nil
+}
+
+// Initialize sets up the collector for a test run
+func (c *collector) Initialize(runID string) error {
+ if !c.config.Enabled {
+ return nil
+ }
+
+ // Create storage
+ storage, err := NewStorage(c.config.BaseDir, runID, c.config.MaxResourceSize)
+ if err != nil {
+ return fmt.Errorf("failed to create storage: %w", err)
+ }
+ c.storage = storage
+
+ // Create metadata builder
+ c.metadata = NewMetadataBuilder(storage)
+
+ // Get operator start time for degradation tracking
+ c.operatorStartTime = c.getOperatorStartTime()
+
+ fmt.Fprintf(GinkgoWriter, "📦 Artifact collection initialized: %s\n", storage.GetRunDir())
+ return nil
+}
+
+// CollectForTest collects artifacts for a specific test
+func (c *collector) CollectForTest(ctx context.Context, testInfo TestInfo) error {
+ if !c.config.Enabled {
+ return nil
+ }
+
+ // Update statistics
+ c.totalTests++
+ if testInfo.Failed {
+ c.failedTests++
+ }
+
+ // Skip passed tests if configured
+ if !testInfo.Failed && c.config.CollectOnFailureOnly {
+ return nil
+ }
+
+ // Increment counter and create short directory name
+ c.testCounter++
+ shortName := createShortTestName(testInfo.Name, c.testCounter)
+
+ // Fill in tracking fields for degradation analysis
+ testInfo.SequenceNumber = c.totalTests
+ if !c.operatorStartTime.IsZero() {
+ testInfo.OperatorAge = time.Since(c.operatorStartTime)
+ }
+
+ // Create test directory
+ testDir, err := c.storage.CreateTestDir(shortName)
+ if err != nil {
+ return fmt.Errorf("failed to create test directory: %w", err)
+ }
+
+ fmt.Fprintf(GinkgoWriter, "📦 Collecting artifacts for test: %s\n", testInfo.Name)
+ collectionStart := time.Now()
+
+ // Collect with timeout
+ ctx, cancel := context.WithTimeout(ctx, c.config.CollectionTimeout)
+ defer cancel()
+
+ // Track collected artifacts
+ inventory := ArtifactInventory{
+ LogFiles: []string{},
+ ResourceFiles: []string{},
+ EventFiles: []string{},
+ }
+
+ // P0 artifacts - critical for debugging
+ if err := c.collectP0Artifacts(ctx, testInfo, testDir, &inventory); err != nil {
+ fmt.Fprintf(GinkgoWriter, "⚠️ Warning: P0 artifact collection had errors: %v\n", err)
+ }
+
+ // Write test metadata
+ collectionDuration := time.Since(collectionStart)
+ inventory.CollectionTime = collectionDuration.String()
+
+ meta := BuildTestMetadata(testInfo, inventory)
+ if err := c.metadata.WriteTestMetadata(meta, testDir); err != nil {
+ fmt.Fprintf(GinkgoWriter, "⚠️ Warning: Failed to write test metadata: %v\n", err)
+ }
+
+ fmt.Fprintf(GinkgoWriter, "✅ Artifacts collected in %v (%d files)\n",
+ collectionDuration, len(inventory.LogFiles)+len(inventory.ResourceFiles)+len(inventory.EventFiles))
+
+ return nil
+}
+
+// collectP0Artifacts collects P0 (critical) artifacts
+func (c *collector) collectP0Artifacts(ctx context.Context, testInfo TestInfo, testDir string, inventory *ArtifactInventory) error {
+ kubectl := testInfo.KubectlClient
+ namespace := testInfo.Namespace
+
+ if kubectl == nil || namespace == "" {
+ return fmt.Errorf("missing kubectl client or namespace")
+ }
+
+ // 1. Pod status (JSON) - fast, critical
+ if err := c.collectPodStatus(ctx, kubectl, namespace, testDir, inventory); err != nil {
+ fmt.Fprintf(GinkgoWriter, "⚠️ Failed to collect pod status: %v\n", err)
+ }
+
+ // 2. Operator controller logs - critical for debugging
+ if err := c.collectOperatorLogs(ctx, testDir, inventory, testInfo.StartTime); err != nil {
+ fmt.Fprintf(GinkgoWriter, "⚠️ Failed to collect operator logs: %v\n", err)
+ }
+
+ // 2a. Operator health (pod describe, events) - critical for degradation diagnosis
+ if err := c.collectOperatorHealth(ctx, testDir, inventory, testInfo.StartTime); err != nil {
+ fmt.Fprintf(GinkgoWriter, "⚠️ Failed to collect operator health: %v\n", err)
+ }
+
+ // 3. Pipeline status - fast, shows validation state
+ if err := c.collectPipelineStatus(ctx, kubectl, namespace, testDir, inventory); err != nil {
+ fmt.Fprintf(GinkgoWriter, "⚠️ Failed to collect pipeline status: %v\n", err)
+ }
+
+ // 4. Namespace events - fast, shows what happened
+ if err := c.collectEvents(ctx, kubectl, namespace, testDir, inventory); err != nil {
+ fmt.Fprintf(GinkgoWriter, "⚠️ Failed to collect namespace events: %v\n", err)
+ }
+
+ // 5. Resource metadata (Deployment/DaemonSet/Service basic info)
+ if err := c.collectResourceMetadata(ctx, kubectl, namespace, testDir, inventory); err != nil {
+ fmt.Fprintf(GinkgoWriter, "⚠️ Failed to collect resource metadata: %v\n", err)
+ }
+
+ return nil
+}
+
+// collectPodStatus collects status of all pods in the namespace
+func (c *collector) collectPodStatus(ctx context.Context, kubectl *kubectl.Client, namespace, testDir string, inventory *ArtifactInventory) error {
+ // Get all pods
+ pods, err := kubectl.GetPodsByLabel("")
+ if err != nil {
+ return fmt.Errorf("failed to get pods: %w", err)
+ }
+
+ inventory.PodCount = len(pods)
+
+ for _, podName := range pods {
+ // Get pod status as JSON
+ output, err := kubectl.GetWithJsonPath("pod", podName, ".status")
+ if err != nil {
+ fmt.Fprintf(GinkgoWriter, " ⚠️ Failed to get status for pod %s: %v\n", podName, err)
+ continue
+ }
+
+ filename := fmt.Sprintf("%s-status.json", podName)
+ if err := c.storage.WriteFile(testDir, "pods", filename, []byte(output)); err != nil {
+ fmt.Fprintf(GinkgoWriter, " ⚠️ Failed to write pod status for %s: %v\n", podName, err)
+ continue
+ }
+
+ inventory.ResourceFiles = append(inventory.ResourceFiles, "pods/"+filename)
+
+ // Also get pod logs (last N lines)
+ logs, err := kubectl.GetPodLogsTail(podName, c.config.MaxLogLines)
+ if err != nil {
+ // Pod might not have logs yet, that's okay
+ continue
+ }
+
+ // Truncate logs if needed
+ truncatedLogs := TruncateLogLines([]byte(logs), c.config.MaxLogLines)
+
+ logFilename := fmt.Sprintf("%s.log", podName)
+ if err := c.storage.WriteFile(testDir, "logs", logFilename, truncatedLogs); err != nil {
+ fmt.Fprintf(GinkgoWriter, " ⚠️ Failed to write logs for %s: %v\n", podName, err)
+ continue
+ }
+
+ inventory.LogFiles = append(inventory.LogFiles, "logs/"+logFilename)
+ }
+
+ return nil
+}
+
+// collectOperatorLogs collects operator controller logs
+func (c *collector) collectOperatorLogs(ctx context.Context, testDir string, inventory *ArtifactInventory, testStart time.Time) error {
+ // Get operator pod name from vector-operator-system namespace
+ operatorNs := "vector-operator-system"
+ operatorClient := kubectl.NewClient(operatorNs)
+
+ pods, err := operatorClient.GetPodsByLabel("app.kubernetes.io/name=vector-operator")
+ if err != nil || len(pods) == 0 {
+ return fmt.Errorf("failed to find operator controller pod: %w", err)
+ }
+
+ // Get logs from first controller pod (should only be one)
+ podName := pods[0]
+
+ // Add 1 minute buffer before test start to capture context
+ // This helps see what was happening just before the test started
+ logsSince := testStart.Add(-1 * time.Minute)
+
+ // Use time-based log collection to get logs relevant to this test
+ // This fixes the issue where long-running operator pods would only return
+ // the last N lines from the entire pod lifetime, missing test-specific logs
+ logs, err := operatorClient.GetPodLogsSinceTime(podName, logsSince, c.config.MaxLogLines)
+ if err != nil {
+ return fmt.Errorf("failed to get operator logs: %w", err)
+ }
+
+ // Truncate logs if still needed
+ truncatedLogs := TruncateLogLines([]byte(logs), c.config.MaxLogLines)
+
+ if err := c.storage.WriteFile(testDir, "logs", "operator-controller.log", truncatedLogs); err != nil {
+ return fmt.Errorf("failed to write operator logs: %w", err)
+ }
+
+ inventory.LogFiles = append(inventory.LogFiles, "logs/operator-controller.log")
+ return nil
+}
+
+// collectOperatorHealth collects operator pod describe and events for degradation diagnosis
+func (c *collector) collectOperatorHealth(ctx context.Context, testDir string, inventory *ArtifactInventory, testStart time.Time) error {
+ const operatorNs = "vector-operator-system"
+ operatorClient := kubectl.NewClient(operatorNs)
+
+ // Get operator pod
+ pods, err := operatorClient.GetPodsByLabel("app.kubernetes.io/name=vector-operator")
+ if err != nil || len(pods) == 0 {
+ return fmt.Errorf("failed to find operator pod: %w", err)
+ }
+ podName := pods[0]
+
+ // 1. Get pod describe (shows conditions, events, restarts, QoS, resource requests/limits)
+ describeCmd := exec.Command("kubectl", "describe", "pod", podName, "-n", operatorNs)
+ describeOutput, err := describeCmd.CombinedOutput()
+ if err == nil {
+ if err := c.storage.WriteFile(testDir, "operator", "pod-describe.txt", describeOutput); err != nil {
+ return fmt.Errorf("failed to write operator pod describe: %w", err)
+ }
+ inventory.ResourceFiles = append(inventory.ResourceFiles, "operator/pod-describe.txt")
+ }
+
+ // 2. Get cluster-wide events related to operator (evictions, OOMKills, etc.)
+ // Use time window from 2 minutes before test start to catch context
+ sinceTime := testStart.Add(-2 * time.Minute).UTC().Format(time.RFC3339)
+
+ // Get all Warning events in operator namespace
+ eventsCmd := exec.Command("kubectl", "get", "events", "-n", operatorNs,
+ "--field-selector", "type=Warning",
+ "--since-time", sinceTime)
+ eventsOutput, err := eventsCmd.CombinedOutput()
+ if err == nil && len(eventsOutput) > 0 {
+ if err := c.storage.WriteFile(testDir, "operator", "warning-events.txt", eventsOutput); err != nil {
+ return fmt.Errorf("failed to write operator warning events: %w", err)
+ }
+ inventory.EventFiles = append(inventory.EventFiles, "operator/warning-events.txt")
+ }
+
+ // 3. Get deployment describe (shows replica status, conditions)
+ deployDescribeCmd := exec.Command("kubectl", "describe", "deployment", "vector-operator-controller-manager", "-n", operatorNs)
+ deployDescribeOutput, err := deployDescribeCmd.CombinedOutput()
+ if err == nil {
+ if err := c.storage.WriteFile(testDir, "operator", "deployment-describe.txt", deployDescribeOutput); err != nil {
+ return fmt.Errorf("failed to write deployment describe: %w", err)
+ }
+ inventory.ResourceFiles = append(inventory.ResourceFiles, "operator/deployment-describe.txt")
+ }
+
+ // 4. Collect pprof profiles (goroutine, heap) for memory/goroutine leak diagnosis
+ if err := c.collectPprofProfiles(ctx, testDir, inventory, podName, operatorNs); err != nil {
+ // Non-fatal: pprof may not be enabled in production
+ fmt.Fprintf(GinkgoWriter, "⚠️ Failed to collect pprof profiles (may not be enabled): %v\n", err)
+ }
+
+ return nil
+}
+
+// collectPipelineStatus collects VectorPipeline CR status
+func (c *collector) collectPipelineStatus(ctx context.Context, kubectl *kubectl.Client, namespace, testDir string, inventory *ArtifactInventory) error {
+ // Get all VectorPipeline CRs
+ pipelinesOutput, err := kubectl.GetAll("vectorpipeline", "")
+ if err != nil {
+ return fmt.Errorf("failed to list pipelines: %w", err)
+ }
+
+ if pipelinesOutput == "" {
+ // No pipelines, that's okay
+ return nil
+ }
+
+ pipelines := strings.Fields(pipelinesOutput)
+ for _, pipelineName := range pipelines {
+ // Get pipeline status
+ status, err := kubectl.GetWithJsonPath("vectorpipeline", pipelineName, ".status")
+ if err != nil {
+ fmt.Fprintf(GinkgoWriter, " ⚠️ Failed to get status for pipeline %s: %v\n", pipelineName, err)
+ continue
+ }
+
+ filename := fmt.Sprintf("vectorpipeline-%s-status.json", pipelineName)
+ if err := c.storage.WriteFile(testDir, "resources", filename, []byte(status)); err != nil {
+ fmt.Fprintf(GinkgoWriter, " ⚠️ Failed to write pipeline status for %s: %v\n", pipelineName, err)
+ continue
+ }
+
+ inventory.ResourceFiles = append(inventory.ResourceFiles, "resources/"+filename)
+ }
+
+ return nil
+}
+
+// collectEvents collects Kubernetes events from the namespace
+func (c *collector) collectEvents(ctx context.Context, kubectl *kubectl.Client, namespace, testDir string, inventory *ArtifactInventory) error {
+ // Get events - use kubectl.Client to run kubectl get events
+ // Since we don't have a GetEvents method, we'll use a simple approach
+ eventsOutput, err := kubectl.Get("events", "")
+ if err != nil {
+ // Events might not exist, that's okay
+ return nil
+ }
+
+ if err := c.storage.WriteFile(testDir, "events", "namespace-events.txt", eventsOutput); err != nil {
+ return fmt.Errorf("failed to write namespace events: %w", err)
+ }
+
+ inventory.EventFiles = append(inventory.EventFiles, "events/namespace-events.txt")
+ return nil
+}
+
+// collectResourceMetadata collects basic metadata about Deployments, DaemonSets, Services
+func (c *collector) collectResourceMetadata(ctx context.Context, kubectl *kubectl.Client, namespace, testDir string, inventory *ArtifactInventory) error {
+ resourceTypes := []string{"deployment", "daemonset", "service"}
+
+ for _, resourceType := range resourceTypes {
+ resources, err := kubectl.GetAll(resourceType, "")
+ if err != nil {
+ continue // Resource type might not exist
+ }
+
+ if resources == "" {
+ continue
+ }
+
+ resourceNames := strings.Fields(resources)
+ for _, resourceName := range resourceNames {
+ // Get resource metadata (name, labels, status)
+ output, err := kubectl.Get(resourceType, resourceName)
+ if err != nil {
+ continue
+ }
+
+ filename := fmt.Sprintf("%s-%s.yaml", resourceType, resourceName)
+ if err := c.storage.WriteFile(testDir, "resources", filename, output); err != nil {
+ fmt.Fprintf(GinkgoWriter, " ⚠️ Failed to write %s/%s: %v\n", resourceType, resourceName, err)
+ continue
+ }
+
+ inventory.ResourceFiles = append(inventory.ResourceFiles, "resources/"+filename)
+ }
+ }
+
+ return nil
+}
+
+// Close finalizes the collector and writes run summary
+func (c *collector) Close() error {
+ if !c.config.Enabled || c.storage == nil {
+ return nil
+ }
+
+ runEnd := time.Now()
+ runMeta := RunMetadata{
+ RunID: c.storage.GetRunID(),
+ StartTime: c.runStart,
+ EndTime: runEnd,
+ TotalTests: c.totalTests,
+ FailedTests: c.failedTests,
+ PassedTests: c.totalTests - c.failedTests,
+ ArtifactsDir: c.storage.GetRunDir(),
+ Environment: map[string]string{
+ "E2E_ARTIFACTS_ENABLED": fmt.Sprintf("%t", c.config.Enabled),
+ "E2E_ARTIFACTS_ON_FAILURE_ONLY": fmt.Sprintf("%t", c.config.CollectOnFailureOnly),
+ "E2E_ARTIFACTS_MAX_LOG_LINES": fmt.Sprintf("%d", c.config.MaxLogLines),
+ "E2E_ARTIFACTS_COLLECTION_TIME": runEnd.Sub(c.runStart).String(),
+ },
+ GitCommit: os.Getenv("E2E_GIT_COMMIT"),
+ GitBranch: os.Getenv("E2E_GIT_BRANCH"),
+ GitDirty: os.Getenv("E2E_GIT_DIRTY"),
+ Description: os.Getenv("E2E_RUN_DESCRIPTION"),
+ }
+
+ if err := c.metadata.WriteRunMetadata(runMeta); err != nil {
+ return fmt.Errorf("failed to write run metadata: %w", err)
+ }
+
+ fmt.Fprintf(GinkgoWriter, "\n📦 Artifact Collection Summary:\n")
+ fmt.Fprintf(GinkgoWriter, " Location: %s\n", c.storage.GetRunDir())
+ fmt.Fprintf(GinkgoWriter, " Total tests: %d\n", c.totalTests)
+ fmt.Fprintf(GinkgoWriter, " Failed tests with artifacts: %d\n", c.failedTests)
+ fmt.Fprintf(GinkgoWriter, " Duration: %v\n\n", runEnd.Sub(c.runStart))
+
+ return nil
+}
+
+// createShortTestName creates a short, numbered directory name from full test name
+// Input: "Artifact Verification should intentionally fail to test artifact collection"
+// Output: "01-artifact-verification"
+func createShortTestName(fullName string, counter int) string {
+ // Split by spaces to get the first part (Describe block name)
+ parts := strings.Fields(fullName)
+ if len(parts) == 0 {
+ return fmt.Sprintf("%02d-unknown", counter)
+ }
+
+ // Stop at "should" or "[" - these mark the end of test suite name
+ var suiteParts []string
+ for _, word := range parts {
+ lower := strings.ToLower(word)
+ // Stop at common separators
+ if lower == "should" || strings.HasPrefix(word, "[") {
+ break
+ }
+ // Clean up and add word
+ clean := strings.Trim(word, "()[]{}")
+ if clean != "" {
+ suiteParts = append(suiteParts, clean)
+ }
+ // Limit to first 3-4 words
+ if len(suiteParts) >= 4 {
+ break
+ }
+ }
+
+ // Fallback if nothing found
+ if len(suiteParts) == 0 {
+ suiteParts = parts[:1]
+ }
+
+ // Join and lowercase
+ mainPart := strings.ToLower(strings.Join(suiteParts, "-"))
+
+ // Remove any remaining special characters
+ replacer := strings.NewReplacer(
+ "(", "", ")", "",
+ "[", "", "]", "",
+ "{", "", "}", "",
+ )
+ mainPart = replacer.Replace(mainPart)
+
+ // Limit length to reasonable size
+ const maxLen = 40
+ if len(mainPart) > maxLen {
+ mainPart = mainPart[:maxLen]
+ }
+
+ // Add counter prefix for uniqueness and ordering
+ return fmt.Sprintf("%02d-%s", counter, mainPart)
+}
+
+// getOperatorStartTime retrieves the operator pod's start time for degradation tracking
+func (c *collector) getOperatorStartTime() time.Time {
+ const operatorNs = "vector-operator-system"
+ operatorClient := kubectl.NewClient(operatorNs)
+
+ // Get operator pods
+ pods, err := operatorClient.GetPodsByLabel("app.kubernetes.io/name=vector-operator")
+ if err != nil || len(pods) == 0 {
+ // If we can't get operator pod, return zero time
+ return time.Time{}
+ }
+
+ // Get pod start time
+ startTimeStr, err := operatorClient.GetWithJsonPath("pod", pods[0], ".status.startTime")
+ if err != nil {
+ return time.Time{}
+ }
+
+ // Parse RFC3339 timestamp
+ startTime, err := time.Parse(time.RFC3339, strings.TrimSpace(startTimeStr))
+ if err != nil {
+ return time.Time{}
+ }
+
+ return startTime
+}
+
+// collectPprofProfiles collects pprof profiles from the operator pod for leak diagnosis
+// Uses kubectl port-forward since distroless image doesn't have wget/curl
+func (c *collector) collectPprofProfiles(ctx context.Context, testDir string, inventory *ArtifactInventory, podName, namespace string) error {
+ const pprofPort = "6060"
+ const localPort = "16060" // Use high port to avoid conflicts
+
+ // Start port-forward in background
+ portForwardCmd := exec.Command("kubectl", "port-forward",
+ fmt.Sprintf("pod/%s", podName),
+ "-n", namespace,
+ fmt.Sprintf("%s:%s", localPort, pprofPort))
+
+ if err := portForwardCmd.Start(); err != nil {
+ return fmt.Errorf("failed to start port-forward: %w", err)
+ }
+ defer func() {
+ if portForwardCmd.Process != nil {
+ _ = portForwardCmd.Process.Kill()
+ }
+ }()
+
+ // Wait a bit for port-forward to establish
+ time.Sleep(2 * time.Second)
+
+ // Collect goroutine profile (text format for readability)
+ goroutineCmd := exec.Command("curl", "-s",
+ fmt.Sprintf("http://localhost:%s/debug/pprof/goroutine?debug=1", localPort))
+ goroutineOutput, err := goroutineCmd.CombinedOutput()
+ if err == nil && len(goroutineOutput) > 0 {
+ if err := c.storage.WriteFile(testDir, "operator", "pprof-goroutine.txt", goroutineOutput); err != nil {
+ return fmt.Errorf("failed to write goroutine profile: %w", err)
+ }
+ inventory.ResourceFiles = append(inventory.ResourceFiles, "operator/pprof-goroutine.txt")
+ } else {
+ return fmt.Errorf("failed to collect goroutine profile: %w", err)
+ }
+
+ // Collect heap profile (text format for readability)
+ heapCmd := exec.Command("curl", "-s",
+ fmt.Sprintf("http://localhost:%s/debug/pprof/heap?debug=1", localPort))
+ heapOutput, err := heapCmd.CombinedOutput()
+ if err == nil && len(heapOutput) > 0 {
+ if err := c.storage.WriteFile(testDir, "operator", "pprof-heap.txt", heapOutput); err != nil {
+ return fmt.Errorf("failed to write heap profile: %w", err)
+ }
+ inventory.ResourceFiles = append(inventory.ResourceFiles, "operator/pprof-heap.txt")
+ } else {
+ return fmt.Errorf("failed to collect heap profile: %w", err)
+ }
+
+ return nil
+}
diff --git a/test/e2e/framework/artifacts/config.go b/test/e2e/framework/artifacts/config.go
new file mode 100644
index 00000000..c6033566
--- /dev/null
+++ b/test/e2e/framework/artifacts/config.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package artifacts
+
+import (
+ "os"
+ "strconv"
+ "time"
+)
+
+// Default configuration values
+const (
+ defaultBaseDir = "test/e2e/artifacts"
+ defaultMaxLogLines = 500
+ defaultMaxResourceSize = 10 * 1024 * 1024 // 10MB
+ defaultMaxTotalSize = 100 * 1024 * 1024 // 100MB per test
+ defaultCollectionTimeout = 30 * time.Second
+ defaultEnabled = true
+ defaultOnFailureOnly = true
+ defaultMinimalOnly = false
+)
+
+// Config defines artifact collection behavior
+type Config struct {
+ // Collection control
+ Enabled bool // Master switch for artifact collection
+ CollectOnFailureOnly bool // Collect artifacts only for failed tests
+ CollectMinimalOnly bool // Collect only P0 artifacts (fast path)
+
+ // Storage paths
+ BaseDir string // Base directory for artifact storage
+
+ // Size limits (prevent artifact bloat)
+ MaxLogLines int // Maximum log lines per pod
+ MaxResourceSize int64 // Maximum size for single resource (bytes)
+ MaxTotalSize int64 // Maximum total size per test (bytes)
+
+ // Timeouts
+ CollectionTimeout time.Duration // Maximum time to collect artifacts
+
+ // Filters
+ NamespacePatterns []string // Namespace patterns to collect from
+ PodLabelSelectors []string // Pod label selectors for filtering
+}
+
+// LoadConfigFromEnv loads configuration from environment variables
+// Following Phase 1 pattern: ENV-based config with sensible defaults
+func LoadConfigFromEnv() Config {
+ return Config{
+ Enabled: getEnvBool("E2E_ARTIFACTS_ENABLED", defaultEnabled),
+ CollectOnFailureOnly: getEnvBool("E2E_ARTIFACTS_ON_FAILURE_ONLY", defaultOnFailureOnly),
+ CollectMinimalOnly: getEnvBool("E2E_ARTIFACTS_MINIMAL_ONLY", defaultMinimalOnly),
+
+ BaseDir: getEnvString("E2E_ARTIFACTS_DIR", defaultBaseDir),
+
+ MaxLogLines: getEnvInt("E2E_ARTIFACTS_MAX_LOG_LINES", defaultMaxLogLines),
+ MaxResourceSize: getEnvInt64("E2E_ARTIFACTS_MAX_RESOURCE_SIZE", defaultMaxResourceSize),
+ MaxTotalSize: getEnvInt64("E2E_ARTIFACTS_MAX_TOTAL_SIZE", defaultMaxTotalSize),
+
+ CollectionTimeout: getEnvDuration("E2E_ARTIFACTS_TIMEOUT", defaultCollectionTimeout),
+
+ NamespacePatterns: []string{"test-*"},
+ PodLabelSelectors: []string{},
+ }
+}
+
+// Helper functions for ENV parsing
+
+func getEnvBool(key string, defaultValue bool) bool {
+ value := os.Getenv(key)
+ if value == "" {
+ return defaultValue
+ }
+ result, err := strconv.ParseBool(value)
+ if err != nil {
+ return defaultValue
+ }
+ return result
+}
+
+func getEnvInt(key string, defaultValue int) int {
+ value := os.Getenv(key)
+ if value == "" {
+ return defaultValue
+ }
+ result, err := strconv.Atoi(value)
+ if err != nil {
+ return defaultValue
+ }
+ return result
+}
+
+func getEnvInt64(key string, defaultValue int64) int64 {
+ value := os.Getenv(key)
+ if value == "" {
+ return defaultValue
+ }
+ result, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return defaultValue
+ }
+ return result
+}
+
+func getEnvString(key string, defaultValue string) string {
+ value := os.Getenv(key)
+ if value == "" {
+ return defaultValue
+ }
+ return value
+}
+
+func getEnvDuration(key string, defaultValue time.Duration) time.Duration {
+ value := os.Getenv(key)
+ if value == "" {
+ return defaultValue
+ }
+ result, err := time.ParseDuration(value)
+ if err != nil {
+ return defaultValue
+ }
+ return result
+}
diff --git a/test/e2e/framework/artifacts/metadata.go b/test/e2e/framework/artifacts/metadata.go
new file mode 100644
index 00000000..7eb03003
--- /dev/null
+++ b/test/e2e/framework/artifacts/metadata.go
@@ -0,0 +1,118 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package artifacts
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+// RunMetadata contains metadata about an entire test run
+type RunMetadata struct {
+ RunID string `json:"run_id"`
+ StartTime time.Time `json:"start_time"`
+ EndTime time.Time `json:"end_time,omitempty"`
+ TotalTests int `json:"total_tests"`
+ FailedTests int `json:"failed_tests"`
+ PassedTests int `json:"passed_tests"`
+ Environment map[string]string `json:"environment"`
+ ArtifactsDir string `json:"artifacts_dir"`
+ // Git information for tracking test run version
+ GitCommit string `json:"git_commit,omitempty"`
+ GitBranch string `json:"git_branch,omitempty"`
+ GitDirty string `json:"git_dirty,omitempty"` // "dirty", "staged", or empty if clean
+ Description string `json:"description,omitempty"` // Optional user description
+}
+
+// TestMetadata contains metadata about a single test execution
+type TestMetadata struct {
+ Name string `json:"name"`
+ Namespace string `json:"namespace"`
+ StartTime time.Time `json:"start_time"`
+ EndTime time.Time `json:"end_time"`
+ Duration time.Duration `json:"duration_ms"` // in milliseconds for JSON
+ Failed bool `json:"failed"`
+ FailureMessage string `json:"failure_message,omitempty"`
+ Labels []string `json:"labels"`
+
+ // Test sequence tracking (for degradation analysis)
+ TestSequenceNumber int `json:"test_sequence_number"` // Which test in the run (1, 2, 3...)
+ OperatorAge time.Duration `json:"operator_age_seconds"` // How long operator has been running
+
+ // Collected artifacts inventory
+ Artifacts ArtifactInventory `json:"artifacts"`
+}
+
+// ArtifactInventory tracks what artifacts were collected
+type ArtifactInventory struct {
+ PodCount int `json:"pod_count"`
+ LogFiles []string `json:"log_files"`
+ ResourceFiles []string `json:"resource_files"`
+ EventFiles []string `json:"event_files"`
+ TotalSizeBytes int64 `json:"total_size_bytes"`
+ CollectionTime string `json:"collection_time"` // Human-readable duration
+}
+
+// MetadataBuilder helps build and write metadata files
+type MetadataBuilder struct {
+ storage *Storage
+}
+
+// NewMetadataBuilder creates a new metadata builder
+func NewMetadataBuilder(storage *Storage) *MetadataBuilder {
+ return &MetadataBuilder{
+ storage: storage,
+ }
+}
+
+// WriteTestMetadata writes test metadata to JSON file
+func (m *MetadataBuilder) WriteTestMetadata(meta TestMetadata, testDir string) error {
+ data, err := json.MarshalIndent(meta, "", " ")
+ if err != nil {
+ return fmt.Errorf("failed to marshal test metadata: %w", err)
+ }
+
+ return m.storage.WriteFile(testDir, "", "metadata.json", data)
+}
+
+// WriteRunMetadata writes run metadata to JSON file
+func (m *MetadataBuilder) WriteRunMetadata(meta RunMetadata) error {
+ data, err := json.MarshalIndent(meta, "", " ")
+ if err != nil {
+ return fmt.Errorf("failed to marshal run metadata: %w", err)
+ }
+
+ return m.storage.WriteFileInRunDir("metadata.json", data)
+}
+
+// BuildTestMetadata creates TestMetadata from TestInfo
+func BuildTestMetadata(info TestInfo, artifacts ArtifactInventory) TestMetadata {
+ return TestMetadata{
+ Name: info.Name,
+ Namespace: info.Namespace,
+ StartTime: info.StartTime,
+ EndTime: info.EndTime,
+ Duration: info.Duration,
+ Failed: info.Failed,
+ FailureMessage: info.FailureMessage,
+ Labels: info.Labels,
+ TestSequenceNumber: info.SequenceNumber,
+ OperatorAge: info.OperatorAge,
+ Artifacts: artifacts,
+ }
+}
diff --git a/test/e2e/framework/artifacts/storage.go b/test/e2e/framework/artifacts/storage.go
new file mode 100644
index 00000000..3ecca496
--- /dev/null
+++ b/test/e2e/framework/artifacts/storage.go
@@ -0,0 +1,327 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package artifacts
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "time"
+)
+
+// Storage handles filesystem operations for artifact collection
+type Storage struct {
+ baseDir string
+ runDir string
+ maxSize int64
+ runID string
+}
+
+// NewStorage creates a new storage instance with specified configuration
+func NewStorage(baseDir string, runID string, maxSize int64) (*Storage, error) {
+ var runDir string
+
+ // Check if baseDir already contains a run directory (e.g., from E2E_ARTIFACTS_DIR)
+ // This prevents nested run-{timestamp}/run-{timestamp}/ structure
+ if filepath.Base(baseDir) == "artifacts" && isRunDirectory(filepath.Dir(baseDir)) {
+ // baseDir is already inside a run directory (e.g., test/e2e/results/run-{timestamp}/artifacts/)
+ // Use it directly without creating another run-{runID} subdirectory
+ runDir = baseDir
+ } else {
+ // Standard case: create run-{runID} subdirectory
+ runDir = filepath.Join(baseDir, "run-"+runID)
+ }
+
+ // Create run directory
+ if err := os.MkdirAll(runDir, 0755); err != nil {
+ return nil, fmt.Errorf("failed to create run directory %s: %w", runDir, err)
+ }
+
+ return &Storage{
+ baseDir: baseDir,
+ runDir: runDir,
+ maxSize: maxSize,
+ runID: runID,
+ }, nil
+}
+
+// isRunDirectory checks if a directory name matches the run-{timestamp} pattern
+func isRunDirectory(path string) bool {
+ base := filepath.Base(path)
+ return len(base) > 4 && base[:4] == "run-"
+}
+
+// WriteFile writes content to a file within a test directory with size limits
+// testDir: test-specific directory name (e.g., "test-normal-mode")
+// category: subdirectory within test dir (e.g., "logs", "resources", "events")
+// filename: name of the file to write
+func (s *Storage) WriteFile(testDir, category, filename string, content []byte) error {
+ // Check and enforce size limit
+ if int64(len(content)) > s.maxSize {
+ content = s.truncateContent(content, "size limit exceeded")
+ }
+
+ // Build full directory path
+ var dir string
+ if category != "" {
+ dir = filepath.Join(s.runDir, testDir, category)
+ } else {
+ dir = filepath.Join(s.runDir, testDir)
+ }
+
+ // Create category directory
+ if err := os.MkdirAll(dir, 0755); err != nil {
+ return fmt.Errorf("failed to create directory %s: %w", dir, err)
+ }
+
+ // Write file atomically (write to temp, then rename)
+ path := filepath.Join(dir, filename)
+ tempPath := path + ".tmp"
+
+ if err := os.WriteFile(tempPath, content, 0644); err != nil {
+ return fmt.Errorf("failed to write temp file %s: %w", tempPath, err)
+ }
+
+ if err := os.Rename(tempPath, path); err != nil {
+ // Clean up temp file if rename fails
+ _ = os.Remove(tempPath)
+ return fmt.Errorf("failed to rename temp file %s to %s: %w", tempPath, path, err)
+ }
+
+ return nil
+}
+
+// WriteFileInRunDir writes a file directly in the run directory (not test-specific)
+// Used for run-level metadata
+func (s *Storage) WriteFileInRunDir(filename string, content []byte) error {
+ path := filepath.Join(s.runDir, filename)
+ tempPath := path + ".tmp"
+
+ if err := os.WriteFile(tempPath, content, 0644); err != nil {
+ return fmt.Errorf("failed to write temp file %s: %w", tempPath, err)
+ }
+
+ if err := os.Rename(tempPath, path); err != nil {
+ _ = os.Remove(tempPath)
+ return fmt.Errorf("failed to rename temp file %s to %s: %w", tempPath, path, err)
+ }
+
+ return nil
+}
+
+// WriteStream writes content from a reader to a file with size limits
+// Useful for streaming command output without loading all into memory
+func (s *Storage) WriteStream(testDir, category, filename string, reader io.Reader, maxLines int) error {
+ // Build full directory path
+ var dir string
+ if category != "" {
+ dir = filepath.Join(s.runDir, testDir, category)
+ } else {
+ dir = filepath.Join(s.runDir, testDir)
+ }
+
+ // Create category directory
+ if err := os.MkdirAll(dir, 0755); err != nil {
+ return fmt.Errorf("failed to create directory %s: %w", dir, err)
+ }
+
+ // Write to temp file
+ path := filepath.Join(dir, filename)
+ tempPath := path + ".tmp"
+
+ tempFile, err := os.Create(tempPath)
+ if err != nil {
+ return fmt.Errorf("failed to create temp file %s: %w", tempPath, err)
+ }
+ defer tempFile.Close()
+
+ // Copy with size limit
+ written, err := io.CopyN(tempFile, reader, s.maxSize)
+ if err != nil && err != io.EOF {
+ // If we hit the limit, add truncation marker
+ if written >= s.maxSize {
+ truncationMarker := []byte("\n\n... [TRUNCATED - exceeds size limit] ...\n")
+ _, _ = tempFile.Write(truncationMarker)
+ }
+ }
+
+ tempFile.Close()
+
+ // Rename to final path
+ if err := os.Rename(tempPath, path); err != nil {
+ _ = os.Remove(tempPath)
+ return fmt.Errorf("failed to rename temp file %s to %s: %w", tempPath, path, err)
+ }
+
+ return nil
+}
+
+// GetRunDir returns the run directory path
+func (s *Storage) GetRunDir() string {
+ return s.runDir
+}
+
+// GetRunID returns the run ID
+func (s *Storage) GetRunID() string {
+ return s.runID
+}
+
+// truncateContent truncates content to fit within maxSize and adds a marker
+func (s *Storage) truncateContent(content []byte, reason string) []byte {
+ marker := []byte(fmt.Sprintf("\n\n... [TRUNCATED: %s - max %d bytes] ...\n", reason, s.maxSize))
+
+ // If marker itself is too large, truncate it
+ if int64(len(marker)) >= s.maxSize {
+ return marker[:s.maxSize]
+ }
+
+ // Calculate how much content we can keep
+ keepSize := s.maxSize - int64(len(marker))
+ if keepSize < 0 {
+ keepSize = 0
+ }
+
+ // Keep the end of the content (most recent logs are usually most relevant)
+ // But also include first few bytes to show what file it is
+ headerSize := int64(100)
+ if headerSize > keepSize/2 {
+ headerSize = keepSize / 2
+ }
+
+ var truncated []byte
+ if headerSize > 0 && int64(len(content)) > headerSize {
+ // Include header + marker + tail
+ tailSize := keepSize - headerSize
+ tailStart := int64(len(content)) - tailSize
+ if tailStart < headerSize {
+ tailStart = headerSize
+ }
+
+ truncated = append(truncated, content[:headerSize]...)
+ truncated = append(truncated, []byte("\n... [CONTENT SKIPPED] ...\n")...)
+ if tailStart < int64(len(content)) {
+ truncated = append(truncated, content[tailStart:]...)
+ }
+ } else {
+ // Just take what fits
+ truncated = content[:keepSize]
+ }
+
+ return append(truncated, marker...)
+}
+
+// TruncateLogLines truncates log output to specified number of lines
+// Takes the LAST N lines (most recent logs are most relevant for debugging)
+func TruncateLogLines(content []byte, maxLines int) []byte {
+ if maxLines <= 0 {
+ return content
+ }
+
+ lines := []byte{}
+ lineCount := 0
+ newlineCount := 0
+
+ // Count newlines from the end
+ for i := len(content) - 1; i >= 0; i-- {
+ if content[i] == '\n' {
+ newlineCount++
+ if newlineCount >= maxLines {
+ // Found enough lines, this is our cut point
+ lines = content[i+1:]
+ lineCount = maxLines
+ break
+ }
+ }
+ }
+
+ // If we didn't find enough newlines, return all content
+ if lineCount == 0 {
+ return content
+ }
+
+ // Skip leading empty lines and trim leading whitespace from first line
+ start := 0
+ for start < len(lines) {
+ // Find end of current line
+ end := start
+ for end < len(lines) && lines[end] != '\n' {
+ end++
+ }
+
+ // Check if line has any non-whitespace content
+ lineContent := bytes.TrimSpace(lines[start:end])
+ if len(lineContent) > 0 {
+ // Found first non-empty line
+ // Build result: trimmed first line + rest
+ result := lineContent
+ if end < len(lines) {
+ // Append the rest (from \n onwards)
+ result = append(result, lines[end:]...)
+ }
+ lines = result
+ break
+ }
+
+ // Move to next line (skip the \n)
+ start = end + 1
+ }
+
+ // Add truncation marker at the beginning
+ marker := []byte(fmt.Sprintf("... [Showing last %d lines] ...\n", lineCount))
+ return append(marker, lines...)
+}
+
+// CreateTestDir creates a directory for a specific test
+func (s *Storage) CreateTestDir(testName string) (string, error) {
+ // Sanitize test name for filesystem
+ sanitized := sanitizeFilename(testName)
+ testDir := filepath.Join(s.runDir, sanitized)
+
+ if err := os.MkdirAll(testDir, 0755); err != nil {
+ return "", fmt.Errorf("failed to create test directory %s: %w", testDir, err)
+ }
+
+ return sanitized, nil
+}
+
+// sanitizeFilename removes characters that are problematic in filenames
+func sanitizeFilename(name string) string {
+ // Replace spaces and problematic characters with hyphens
+ result := []byte(name)
+ for i, c := range result {
+ switch c {
+ case '/', '\\', ':', '*', '?', '"', '<', '>', '|', ' ':
+ result[i] = '-'
+ }
+ }
+
+ // Limit length to avoid filesystem issues
+ const maxLength = 200
+ if len(result) > maxLength {
+ // Use a timestamp suffix to ensure uniqueness
+ suffix := fmt.Sprintf("-%d", time.Now().Unix())
+ cutPoint := maxLength - len(suffix)
+ if cutPoint < 0 {
+ cutPoint = 0
+ }
+ result = append(result[:cutPoint], []byte(suffix)...)
+ }
+
+ return string(result)
+}
diff --git a/test/e2e/framework/artifacts/storage_test.go b/test/e2e/framework/artifacts/storage_test.go
new file mode 100644
index 00000000..3af92c63
--- /dev/null
+++ b/test/e2e/framework/artifacts/storage_test.go
@@ -0,0 +1,55 @@
+package artifacts
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestTruncateLogLines_RemovesLeadingWhitespace(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ maxLines int
+ want string
+ }{
+ {
+ name: "removes leading newlines and spaces",
+ input: "\n\n \t2025-11-14T19:58:40Z\tINFO\tstart Reconcile\nline2\nline3",
+ maxLines: 3,
+ want: "... [Showing last 3 lines] ...\n2025-11-14T19:58:40Z\tINFO\tstart Reconcile\nline2\nline3",
+ },
+ {
+ name: "handles logs without leading whitespace",
+ input: "line1\nline2\nline3\nline4\nline5",
+ maxLines: 3,
+ want: "... [Showing last 3 lines] ...\nline3\nline4\nline5",
+ },
+ {
+ name: "keeps content when less than maxLines",
+ input: "line1\nline2",
+ maxLines: 5,
+ want: "line1\nline2",
+ },
+ {
+ name: "trims leading whitespace from first line but preserves it in subsequent lines",
+ input: "line1\nline2\nline3 with content\n indented line4\n indented line5",
+ maxLines: 3,
+ want: "... [Showing last 3 lines] ...\nline3 with content\n indented line4\n indented line5",
+ },
+ {
+ name: "handles real operator log format",
+ input: "line1\nline2\nline3\nline4\n2025-11-14T19:58:40Z\tINFO\tstart Reconcile",
+ maxLines: 1,
+ want: "... [Showing last 1 lines] ...\n2025-11-14T19:58:40Z\tINFO\tstart Reconcile",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := TruncateLogLines([]byte(tt.input), tt.maxLines)
+ if !bytes.Equal(got, []byte(tt.want)) {
+ t.Errorf("TruncateLogLines() = %q, want %q", string(got), tt.want)
+ }
+ })
+ }
+}
diff --git a/test/e2e/framework/assertions/matchers.go b/test/e2e/framework/assertions/matchers.go
new file mode 100644
index 00000000..227721d4
--- /dev/null
+++ b/test/e2e/framework/assertions/matchers.go
@@ -0,0 +1,300 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package assertions
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/onsi/gomega/types"
+
+ "github.com/kaasops/vector-operator/test/e2e/framework/kubectl"
+)
+
+// PipelineResource represents a pipeline for matching
+type PipelineResource struct {
+ namespace string
+ name string
+ kubectl *kubectl.Client
+}
+
+// NewPipelineResource creates a new pipeline resource wrapper
+func NewPipelineResource(namespace, name string) *PipelineResource {
+ return &PipelineResource{
+ namespace: namespace,
+ name: name,
+ kubectl: kubectl.NewClient(namespace),
+ }
+}
+
+// resourceType returns the correct resource type based on namespace
+// Empty namespace = cluster-scoped (ClusterVectorPipeline)
+// Non-empty namespace = namespaced (VectorPipeline)
+func (p *PipelineResource) resourceType() string {
+ if p.namespace == "" {
+ return "clustervectorpipeline"
+ }
+ return "vectorpipeline"
+}
+
+// BeValid matcher for pipeline validity
+type beValidMatcher struct{}
+
+func (m *beValidMatcher) Match(actual interface{}) (success bool, err error) {
+ pipeline, ok := actual.(*PipelineResource)
+ if !ok {
+ return false, fmt.Errorf("BeValid matcher expects a *PipelineResource")
+ }
+
+ result, err := pipeline.kubectl.GetWithJsonPath(pipeline.resourceType(), pipeline.name, ".status.configCheckResult")
+ if err != nil {
+ return false, err
+ }
+
+ return result == "true", nil
+}
+
+func (m *beValidMatcher) FailureMessage(actual interface{}) string {
+ pipeline := actual.(*PipelineResource)
+ return fmt.Sprintf("Expected pipeline %s/%s to be valid", pipeline.namespace, pipeline.name)
+}
+
+func (m *beValidMatcher) NegatedFailureMessage(actual interface{}) string {
+ pipeline := actual.(*PipelineResource)
+ return fmt.Sprintf("Expected pipeline %s/%s not to be valid", pipeline.namespace, pipeline.name)
+}
+
+// BeValid returns a matcher that checks if a pipeline is valid
+func BeValid() types.GomegaMatcher {
+ return &beValidMatcher{}
+}
+
+// HaveSplitModeEnabled matcher
+type haveSplitModeEnabledMatcher struct{}
+
+func (m *haveSplitModeEnabledMatcher) Match(actual interface{}) (success bool, err error) {
+ pipeline, ok := actual.(*PipelineResource)
+ if !ok {
+ return false, fmt.Errorf("HaveSplitModeEnabled matcher expects a *PipelineResource")
+ }
+
+ result, err := pipeline.kubectl.GetWithJsonPath(pipeline.resourceType(), pipeline.name, ".status.splitMode.enabled")
+ if err != nil {
+ return false, err
+ }
+
+ return result == "true", nil
+}
+
+func (m *haveSplitModeEnabledMatcher) FailureMessage(actual interface{}) string {
+ pipeline := actual.(*PipelineResource)
+ return fmt.Sprintf("Expected pipeline %s/%s to have split mode enabled", pipeline.namespace, pipeline.name)
+}
+
+func (m *haveSplitModeEnabledMatcher) NegatedFailureMessage(actual interface{}) string {
+ pipeline := actual.(*PipelineResource)
+ return fmt.Sprintf("Expected pipeline %s/%s not to have split mode enabled", pipeline.namespace, pipeline.name)
+}
+
+// HaveSplitModeEnabled returns a matcher that checks if split mode is enabled
+func HaveSplitModeEnabled() types.GomegaMatcher {
+ return &haveSplitModeEnabledMatcher{}
+}
+
+// HaveRole matcher
+type haveRoleMatcher struct {
+ expectedRole string
+}
+
+func (m *haveRoleMatcher) Match(actual interface{}) (success bool, err error) {
+ pipeline, ok := actual.(*PipelineResource)
+ if !ok {
+ return false, fmt.Errorf("HaveRole matcher expects a *PipelineResource")
+ }
+
+ result, err := pipeline.kubectl.GetWithJsonPath(pipeline.resourceType(), pipeline.name, ".status.role")
+ if err != nil {
+ return false, err
+ }
+
+ return result == m.expectedRole, nil
+}
+
+func (m *haveRoleMatcher) FailureMessage(actual interface{}) string {
+ pipeline := actual.(*PipelineResource)
+ return fmt.Sprintf("Expected pipeline %s/%s to have role %s", pipeline.namespace, pipeline.name, m.expectedRole)
+}
+
+func (m *haveRoleMatcher) NegatedFailureMessage(actual interface{}) string {
+ pipeline := actual.(*PipelineResource)
+ return fmt.Sprintf("Expected pipeline %s/%s not to have role %s", pipeline.namespace, pipeline.name, m.expectedRole)
+}
+
+// HaveRole returns a matcher that checks the pipeline role
+func HaveRole(role string) types.GomegaMatcher {
+ return &haveRoleMatcher{expectedRole: role}
+}
+
+// ServiceResource represents a service for matching
+type ServiceResource struct {
+ namespace string
+ name string
+ kubectl *kubectl.Client
+}
+
+// NewServiceResource creates a new service resource wrapper
+func NewServiceResource(namespace, name string) *ServiceResource {
+ return &ServiceResource{
+ namespace: namespace,
+ name: name,
+ kubectl: kubectl.NewClient(namespace),
+ }
+}
+
+// Exist matcher for service existence
+type existMatcher struct{}
+
+func (m *existMatcher) Match(actual interface{}) (success bool, err error) {
+ service, ok := actual.(*ServiceResource)
+ if !ok {
+ return false, fmt.Errorf("Exist matcher expects a *ServiceResource")
+ }
+
+ _, err = service.kubectl.Get("service", service.name)
+ return err == nil, nil
+}
+
+func (m *existMatcher) FailureMessage(actual interface{}) string {
+ service := actual.(*ServiceResource)
+ return fmt.Sprintf("Expected service %s/%s to exist", service.namespace, service.name)
+}
+
+func (m *existMatcher) NegatedFailureMessage(actual interface{}) string {
+ service := actual.(*ServiceResource)
+ return fmt.Sprintf("Expected service %s/%s not to exist", service.namespace, service.name)
+}
+
+// Exist returns a matcher that checks if a service exists
+func Exist() types.GomegaMatcher {
+ return &existMatcher{}
+}
+
+// HavePort matcher
+type havePortMatcher struct {
+ expectedPort string
+}
+
+func (m *havePortMatcher) Match(actual interface{}) (success bool, err error) {
+ service, ok := actual.(*ServiceResource)
+ if !ok {
+ return false, fmt.Errorf("HavePort matcher expects a *ServiceResource")
+ }
+
+ port, err := service.kubectl.GetWithJsonPath("service", service.name, ".spec.ports[0].port")
+ if err != nil {
+ return false, err
+ }
+
+ return port == m.expectedPort, nil
+}
+
+func (m *havePortMatcher) FailureMessage(actual interface{}) string {
+ service := actual.(*ServiceResource)
+ return fmt.Sprintf("Expected service %s/%s to have port %s", service.namespace, service.name, m.expectedPort)
+}
+
+func (m *havePortMatcher) NegatedFailureMessage(actual interface{}) string {
+ service := actual.(*ServiceResource)
+ return fmt.Sprintf("Expected service %s/%s not to have port %s", service.namespace, service.name, m.expectedPort)
+}
+
+// HavePort returns a matcher that checks the service port
+func HavePort(port string) types.GomegaMatcher {
+ return &havePortMatcher{expectedPort: port}
+}
+
+// BeInvalid matcher for pipeline invalidity
+type beInvalidMatcher struct{}
+
+func (m *beInvalidMatcher) Match(actual interface{}) (success bool, err error) {
+ pipeline, ok := actual.(*PipelineResource)
+ if !ok {
+ return false, fmt.Errorf("BeInvalid matcher expects a *PipelineResource")
+ }
+
+ result, err := pipeline.kubectl.GetWithJsonPath(pipeline.resourceType(), pipeline.name, ".status.configCheckResult")
+ if err != nil {
+ return false, err
+ }
+
+ return result == "false", nil
+}
+
+func (m *beInvalidMatcher) FailureMessage(actual interface{}) string {
+ pipeline := actual.(*PipelineResource)
+ return fmt.Sprintf("Expected pipeline %s/%s to be invalid", pipeline.namespace, pipeline.name)
+}
+
+func (m *beInvalidMatcher) NegatedFailureMessage(actual interface{}) string {
+ pipeline := actual.(*PipelineResource)
+ return fmt.Sprintf("Expected pipeline %s/%s not to be invalid", pipeline.namespace, pipeline.name)
+}
+
+// BeInvalid returns a matcher that checks if a pipeline is invalid
+func BeInvalid() types.GomegaMatcher {
+ return &beInvalidMatcher{}
+}
+
+// HaveErrorContaining matcher for error messages
+type haveErrorContainingMatcher struct {
+ expectedSubstring string
+}
+
+func (m *haveErrorContainingMatcher) Match(actual interface{}) (success bool, err error) {
+ pipeline, ok := actual.(*PipelineResource)
+ if !ok {
+ return false, fmt.Errorf("HaveErrorContaining matcher expects a *PipelineResource")
+ }
+
+ reason, err := pipeline.kubectl.GetWithJsonPath(pipeline.resourceType(), pipeline.name, ".status.reason")
+ if err != nil {
+ return false, err
+ }
+
+ // Simple substring check (case-insensitive)
+ lowerReason := strings.ToLower(reason)
+ lowerExpected := strings.ToLower(m.expectedSubstring)
+
+ return strings.Contains(lowerReason, lowerExpected), nil
+}
+
+func (m *haveErrorContainingMatcher) FailureMessage(actual interface{}) string {
+ pipeline := actual.(*PipelineResource)
+ return fmt.Sprintf("Expected pipeline %s/%s to have error containing '%s'",
+ pipeline.namespace, pipeline.name, m.expectedSubstring)
+}
+
+func (m *haveErrorContainingMatcher) NegatedFailureMessage(actual interface{}) string {
+ pipeline := actual.(*PipelineResource)
+ return fmt.Sprintf("Expected pipeline %s/%s not to have error containing '%s'",
+ pipeline.namespace, pipeline.name, m.expectedSubstring)
+}
+
+// HaveErrorContaining returns a matcher that checks if error message contains substring
+func HaveErrorContaining(substring string) types.GomegaMatcher {
+ return &haveErrorContainingMatcher{expectedSubstring: substring}
+}
diff --git a/test/e2e/framework/config/constants.go b/test/e2e/framework/config/constants.go
new file mode 100644
index 00000000..8bde68e6
--- /dev/null
+++ b/test/e2e/framework/config/constants.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+// Test labels for selective test execution
+const (
+ // Execution speed labels
+ LabelSmoke = "smoke"
+ LabelFast = "fast"
+ LabelSlow = "slow"
+ LabelRegression = "regression"
+ LabelStress = "stress"
+ LabelParallel = "parallel"
+
+ // Priority labels (P0 = critical, must always pass)
+ LabelP0 = "p0"
+ LabelP1 = "p1"
+ LabelP2 = "p2"
+
+ // Category labels
+ LabelSecurity = "security"
+ LabelConstraint = "constraint"
+)
+
+// Resource naming suffixes
+const (
+ AggregatorSuffix = "-aggregator"
+ AgentSuffix = "-agent"
+)
+
+// Kubernetes labels
+const (
+ ComponentLabel = "app.kubernetes.io/component"
+ AggregatorComponent = "Aggregator"
+ AgentComponent = "Agent"
+)
diff --git a/test/e2e/framework/config/timeouts.go b/test/e2e/framework/config/timeouts.go
new file mode 100644
index 00000000..82a0fe7f
--- /dev/null
+++ b/test/e2e/framework/config/timeouts.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ "os"
+ "time"
+)
+
+// Default timeout values
+const (
+ // Resource creation timeouts
+ defaultDeploymentCreateTimeout = 120 * time.Second // Increased for resource-heavy aggregator deployments
+ defaultDeploymentReadyTimeout = 120 * time.Second
+ defaultNamespaceDeleteTimeout = 120 * time.Second // Increased timeout to handle slow namespace termination
+
+ // Pipeline validation timeouts
+ defaultPipelineValidTimeout = 2 * time.Minute
+ defaultConfigCheckTimeout = 30 * time.Second
+
+ // Service check timeouts
+ defaultServiceCreateTimeout = 2 * time.Minute
+
+ // Polling intervals
+ defaultDefaultPollInterval = 2 * time.Second
+ defaultFastPollInterval = 1 * time.Second
+ defaultSlowPollInterval = 2 * time.Second // Reduced from 5s - more responsive polling
+
+ // Test spec timeouts
+ defaultDefaultTestTimeout = 5 * time.Minute
+ defaultLongTestTimeout = 10 * time.Minute
+)
+
+// Configurable timeout variables (can be overridden via environment variables)
+var (
+ // Resource creation timeouts
+ DeploymentCreateTimeout = getEnvDuration("E2E_DEPLOYMENT_CREATE_TIMEOUT", defaultDeploymentCreateTimeout)
+ DeploymentReadyTimeout = getEnvDuration("E2E_DEPLOYMENT_READY_TIMEOUT", defaultDeploymentReadyTimeout)
+ NamespaceDeleteTimeout = getEnvDuration("E2E_NAMESPACE_DELETE_TIMEOUT", defaultNamespaceDeleteTimeout)
+
+ // Pipeline validation timeouts
+ PipelineValidTimeout = getEnvDuration("E2E_PIPELINE_VALID_TIMEOUT", defaultPipelineValidTimeout)
+ ConfigCheckTimeout = getEnvDuration("E2E_CONFIG_CHECK_TIMEOUT", defaultConfigCheckTimeout)
+
+ // Service check timeouts
+ ServiceCreateTimeout = getEnvDuration("E2E_SERVICE_CREATE_TIMEOUT", defaultServiceCreateTimeout)
+
+ // Polling intervals
+ DefaultPollInterval = getEnvDuration("E2E_DEFAULT_POLL_INTERVAL", defaultDefaultPollInterval)
+ FastPollInterval = getEnvDuration("E2E_FAST_POLL_INTERVAL", defaultFastPollInterval)
+ SlowPollInterval = getEnvDuration("E2E_SLOW_POLL_INTERVAL", defaultSlowPollInterval)
+
+ // Test spec timeouts
+ DefaultTestTimeout = getEnvDuration("E2E_DEFAULT_TEST_TIMEOUT", defaultDefaultTestTimeout)
+ LongTestTimeout = getEnvDuration("E2E_LONG_TEST_TIMEOUT", defaultLongTestTimeout)
+)
+
+// getEnvDuration reads a duration from environment variable, falling back to default if not set or invalid
+func getEnvDuration(envVar string, defaultValue time.Duration) time.Duration {
+ if val := os.Getenv(envVar); val != "" {
+ if duration, err := time.ParseDuration(val); err == nil {
+ return duration
+ }
+ // If parsing fails, fall back to default (silently to avoid test noise)
+ }
+ return defaultValue
+}
+
+// GetPollInterval returns appropriate poll interval based on timeout
+func GetPollInterval(timeout time.Duration) time.Duration {
+ if timeout < 30*time.Second {
+ return FastPollInterval
+ }
+ if timeout > 2*time.Minute {
+ return SlowPollInterval
+ }
+ return DefaultPollInterval
+}
diff --git a/test/e2e/framework/errors/errors.go b/test/e2e/framework/errors/errors.go
new file mode 100644
index 00000000..c2bd06a0
--- /dev/null
+++ b/test/e2e/framework/errors/errors.go
@@ -0,0 +1,100 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package errors
+
+import (
+ "strings"
+)
+
+// Centralized error classification for e2e tests
+// Provides consistent error handling across kubectl operations
+
+// IsAlreadyExists checks if error indicates resource already exists
+func IsAlreadyExists(err error) bool {
+ if err == nil {
+ return false
+ }
+ errStr := err.Error()
+ return strings.Contains(errStr, "AlreadyExists") ||
+ strings.Contains(errStr, "already exists")
+}
+
+// IsNotFound checks if error indicates resource not found
+func IsNotFound(err error) bool {
+ if err == nil {
+ return false
+ }
+ errStr := err.Error()
+ return strings.Contains(errStr, "NotFound") ||
+ strings.Contains(errStr, "not found") ||
+ strings.Contains(errStr, "(NotFound)")
+}
+
+// IsConflict checks if error indicates resource conflict
+func IsConflict(err error) bool {
+ if err == nil {
+ return false
+ }
+ errStr := err.Error()
+ return strings.Contains(errStr, "Conflict") ||
+ strings.Contains(errStr, "the object has been modified")
+}
+
+// IsTimeout checks if error indicates timeout
+func IsTimeout(err error) bool {
+ if err == nil {
+ return false
+ }
+ errStr := err.Error()
+ return strings.Contains(errStr, "timeout") ||
+ strings.Contains(errStr, "timed out") ||
+ strings.Contains(errStr, "context deadline exceeded")
+}
+
+// IsConnectionError checks if error indicates connection/network issue
+func IsConnectionError(err error) bool {
+ if err == nil {
+ return false
+ }
+ errStr := err.Error()
+ return strings.Contains(errStr, "connection refused") ||
+ strings.Contains(errStr, "i/o timeout") ||
+ strings.Contains(errStr, "network") ||
+ strings.Contains(errStr, "dial tcp")
+}
+
+// IsTransient checks if error is likely transient and retriable
+func IsTransient(err error) bool {
+ if err == nil {
+ return false
+ }
+ return IsTimeout(err) ||
+ IsConnectionError(err) ||
+ IsConflict(err) ||
+ strings.Contains(err.Error(), "Internal error") ||
+ strings.Contains(err.Error(), "TooManyRequests") ||
+ strings.Contains(err.Error(), "ServerTimeout")
+}
+
+// IsIgnorable checks if error can be safely ignored in test setup/teardown
+func IsIgnorable(err error) bool {
+ if err == nil {
+ return true
+ }
+ // AlreadyExists and NotFound are often acceptable in test lifecycle
+ return IsAlreadyExists(err) || IsNotFound(err)
+}
diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go
new file mode 100644
index 00000000..32ba098d
--- /dev/null
+++ b/test/e2e/framework/framework.go
@@ -0,0 +1,1154 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package framework
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/ginkgo/v2/types"
+ . "github.com/onsi/gomega"
+ "k8s.io/apimachinery/pkg/util/wait"
+
+ "github.com/kaasops/vector-operator/test/e2e/framework/config"
+ "github.com/kaasops/vector-operator/test/e2e/framework/errors"
+ "github.com/kaasops/vector-operator/test/e2e/framework/kubectl"
+ "github.com/kaasops/vector-operator/test/e2e/framework/recorder"
+)
+
+const (
+ // MaxConfigSize is the maximum allowed size for base64-encoded config data (10MB)
+ // This prevents DoS attacks via extremely large config payloads
+ MaxConfigSize = 10 * 1024 * 1024 // 10MB
+)
+
+// Framework provides a high-level API for e2e tests
+type Framework struct {
+ namespace string
+ kubectl *kubectl.Client
+ isShared bool
+ metrics *TestMetrics
+ recorder *recorder.TestRecorder
+ dryRun bool
+ recordSteps bool
+ TestDataPath string // Path to test data directory (configurable via E2E_TESTDATA_PATH)
+}
+
+// TestMetrics tracks timing information for test operations
+type TestMetrics struct {
+ SetupTime time.Duration
+ DeploymentWaitTime time.Duration
+ PipelineValidationTime time.Duration
+ CleanupTime time.Duration
+}
+
+// Global shared framework instance
+var sharedFramework *Framework
+
+// frameworkRegistry stores framework instances for artifact collection
+// Key: namespace, Value: *Framework
+// DEPRECATED: This is being phased out in favor of Ginkgo report entries.
+// New code should use AddReportEntry() in Setup() and retrieve from ReportAfterEach.
+var frameworkRegistry sync.Map
+
+// FrameworkContextKey is the key type for storing Framework in context
+// Using a custom type prevents collisions with other context keys
+type FrameworkContextKey struct{}
+
+// frameworkReportEntryName is the name used when storing framework in Ginkgo report entries
+const frameworkReportEntryName = "framework-instance"
+
+// NewFramework creates a new isolated test framework with its own namespace
+func NewFramework(namespace string) *Framework {
+ // Get test data path from environment or use default
+ testDataPath := os.Getenv("E2E_TESTDATA_PATH")
+ if testDataPath == "" {
+ testDataPath = filepath.Join("test", "e2e", "testdata")
+ }
+
+ f := &Framework{
+ namespace: namespace,
+ kubectl: kubectl.NewClient(namespace),
+ isShared: false,
+ metrics: &TestMetrics{},
+ TestDataPath: testDataPath,
+ }
+
+ // Check for dry-run or recording mode
+ if os.Getenv("E2E_DRY_RUN") == "true" {
+ f.dryRun = true
+ f.recorder = recorder.NewTestRecorder(namespace)
+ f.recordSteps = true
+ } else if os.Getenv("E2E_RECORD_STEPS") == "true" {
+ f.recordSteps = true
+ f.recorder = recorder.NewTestRecorder(namespace)
+ }
+
+ return f
+}
+
+// NewUniqueFramework creates a new framework with a unique timestamped namespace
+// This prevents namespace collisions when tests run in parallel or when cleanup is slow
+func NewUniqueFramework(baseName string) *Framework {
+ // Use nanosecond timestamp + counter for uniqueness
+ timestamp := time.Now().UnixNano()
+ uniqueNS := fmt.Sprintf("%s-%d", baseName, timestamp)
+
+ // Get test data path from environment or use default
+ testDataPath := os.Getenv("E2E_TESTDATA_PATH")
+ if testDataPath == "" {
+ testDataPath = filepath.Join("test", "e2e", "testdata")
+ }
+
+ f := &Framework{
+ namespace: uniqueNS,
+ kubectl: kubectl.NewClient(uniqueNS),
+ isShared: false,
+ metrics: &TestMetrics{},
+ TestDataPath: testDataPath,
+ }
+
+ // Check for dry-run or recording mode
+ if os.Getenv("E2E_DRY_RUN") == "true" {
+ f.dryRun = true
+ f.recorder = recorder.NewTestRecorder(uniqueNS)
+ f.recordSteps = true
+ } else if os.Getenv("E2E_RECORD_STEPS") == "true" {
+ f.recordSteps = true
+ f.recorder = recorder.NewTestRecorder(uniqueNS)
+ }
+
+ return f
+}
+
+// Shared returns a shared framework instance that reuses the same namespace
+// This is useful for parallel tests that don't interfere with each other
+func Shared(namespace string) *Framework {
+ if sharedFramework == nil {
+ // Get test data path from environment or use default
+ testDataPath := os.Getenv("E2E_TESTDATA_PATH")
+ if testDataPath == "" {
+ testDataPath = filepath.Join("test", "e2e", "testdata")
+ }
+
+ sharedFramework = &Framework{
+ namespace: namespace,
+ kubectl: kubectl.NewClient(namespace),
+ isShared: true,
+ metrics: &TestMetrics{},
+ TestDataPath: testDataPath,
+ }
+
+ // Check for dry-run or recording mode
+ if os.Getenv("E2E_DRY_RUN") == "true" {
+ sharedFramework.dryRun = true
+ sharedFramework.recorder = recorder.NewTestRecorder(namespace)
+ sharedFramework.recordSteps = true
+ } else if os.Getenv("E2E_RECORD_STEPS") == "true" {
+ sharedFramework.recordSteps = true
+ sharedFramework.recorder = recorder.NewTestRecorder(namespace)
+ }
+ }
+ return sharedFramework
+}
+
+// Setup performs the test environment setup
+func (f *Framework) Setup() {
+ // Store framework in Ginkgo report entries for artifact collection
+ // This is the preferred method as it directly associates framework with the current test
+ // and works correctly with parallel test execution
+ AddReportEntry(frameworkReportEntryName, f)
+
+ // DEPRECATED: Also store in global registry for backward compatibility
+ // This will be removed in a future version once all code migrates to using report entries
+ frameworkRegistry.Store(f.namespace, f)
+
+ start := time.Now()
+ defer func() {
+ f.metrics.SetupTime = time.Since(start)
+ }()
+
+ By(fmt.Sprintf("creating test namespace: %s", f.namespace))
+ err := kubectl.CreateNamespace(f.namespace)
+ if err != nil {
+ // Check if it's an ignorable error (AlreadyExists, NotFound)
+ if errors.IsIgnorable(err) {
+ GinkgoWriter.Printf("Warning: namespace creation failed (might already exist): %v\n", err)
+ } else {
+ // Only fail for non-ignorable errors
+ Expect(err).NotTo(HaveOccurred())
+ }
+ }
+
+ // Wait for namespace to be ready before proceeding
+ By(fmt.Sprintf("waiting for namespace to be ready: %s", f.namespace))
+ Eventually(func() bool {
+ ns, err := kubectl.GetNamespace(f.namespace)
+ if err != nil {
+ GinkgoWriter.Printf("Failed to get namespace status: %v\n", err)
+ return false
+ }
+ // Check namespace is Active (not Terminating)
+ return ns.Status.Phase == "Active"
+ }, config.DeploymentReadyTimeout, config.DefaultPollInterval).Should(BeTrue(),
+ fmt.Sprintf("namespace %s should be Active", f.namespace))
+}
+
+// Teardown performs the test environment cleanup
+func (f *Framework) Teardown() {
+ // Export test plan if recording is enabled
+ if f.recorder != nil && f.recordSteps {
+ f.ExportTestPlan()
+ }
+
+ // Don't cleanup shared namespaces immediately
+ if f.isShared {
+ return
+ }
+
+ start := time.Now()
+ defer func() {
+ f.metrics.CleanupTime = time.Since(start)
+ }()
+
+ By(fmt.Sprintf("cleaning up test namespace: %s", f.namespace))
+ err := kubectl.DeleteNamespace(f.namespace, fmt.Sprintf("%ds", int(config.NamespaceDeleteTimeout.Seconds())))
+ if err != nil {
+ GinkgoWriter.Printf("Warning: namespace cleanup failed: %v\n", err)
+ }
+
+ // NOTE: Do NOT delete from frameworkRegistry here!
+ // ReportAfterEach runs AFTER AfterAll/Teardown, and needs the framework
+ // for artifact collection. The registry will be cleaned up when the process exits.
+ // frameworkRegistry.Delete(f.namespace)
+}
+
+// Namespace returns the test namespace
+func (f *Framework) Namespace() string {
+ return f.namespace
+}
+
+// ApplyTestData loads and applies a test manifest from testdata directory
+// It automatically replaces any hardcoded namespace with the framework's namespace
+func (f *Framework) ApplyTestData(path string) {
+ By(fmt.Sprintf("applying test data: %s", path))
+
+ content, err := os.ReadFile(filepath.Join(f.TestDataPath, path))
+ Expect(err).NotTo(HaveOccurred(), "Failed to load test data from %s", path)
+
+ // Replace namespace in YAML if present
+ yamlContent := replaceNamespace(string(content), f.namespace)
+
+ err = f.kubectl.Apply(yamlContent)
+ Expect(err).NotTo(HaveOccurred(), "Failed to apply test data %s in namespace %s", path, f.namespace)
+}
+
+// ApplyTestDataWithoutNamespaceReplacement loads and applies a test manifest WITHOUT namespace replacement
+// Use this when you need to apply resources to specific namespaces
+func (f *Framework) ApplyTestDataWithoutNamespaceReplacement(path string) {
+ By(fmt.Sprintf("applying test data without namespace replacement: %s", path))
+
+ content, err := os.ReadFile(filepath.Join(f.TestDataPath, path))
+ Expect(err).NotTo(HaveOccurred(), "Failed to load test data from %s", path)
+
+ // Apply without forcing namespace (YAML contains the correct namespace)
+ err = f.kubectl.ApplyWithoutNamespaceOverride(string(content))
+ Expect(err).NotTo(HaveOccurred(), "Failed to apply test data %s", path)
+}
+
+// replaceNamespace replaces hardcoded namespaces in YAML content
+func replaceNamespace(yaml, namespace string) string {
+ // This is a simple replacement - for production use, proper YAML parsing might be better
+ // But for tests this is sufficient
+ lines := []string{}
+ for _, line := range splitLines(yaml) {
+ // Replace namespace: with namespace:
+ if len(line) > 12 && line[:12] == " namespace:" {
+ lines = append(lines, fmt.Sprintf(" namespace: %s", namespace))
+ } else {
+ lines = append(lines, line)
+ }
+ }
+ return joinLines(lines)
+}
+
+// splitLines splits string by newlines
+func splitLines(s string) []string {
+ return strings.Split(s, "\n")
+}
+
+// joinLines joins lines with newlines
+func joinLines(lines []string) string {
+ return strings.Join(lines, "\n")
+}
+
+// ApplyYAML applies raw YAML content
+func (f *Framework) ApplyYAML(yamlContent string) {
+ err := f.kubectl.Apply(yamlContent)
+ Expect(err).NotTo(HaveOccurred(), "Failed to apply YAML in namespace %s", f.namespace)
+}
+
+// WaitForDeploymentReady waits for a deployment to be ready
+func (f *Framework) WaitForDeploymentReady(name string) {
+ By(fmt.Sprintf("waiting for deployment %s to be ready", name))
+ start := time.Now()
+ defer func() {
+ duration := time.Since(start)
+ f.metrics.DeploymentWaitTime += duration
+ GinkgoWriter.Printf("⏱️ Deployment %s ready in %v\n", name, duration)
+ }()
+
+ f.kubectl.WaitForDeploymentReady(name)
+}
+
+// WaitForPipelineValid waits for a pipeline to become valid
+func (f *Framework) WaitForPipelineValid(name string) {
+ By(fmt.Sprintf("waiting for pipeline %s to become valid", name))
+ start := time.Now()
+ defer func() {
+ duration := time.Since(start)
+ f.metrics.PipelineValidationTime += duration
+ GinkgoWriter.Printf("⏱️ Pipeline %s validated in %v\n", name, duration)
+ }()
+
+ f.kubectl.WaitForPipelineValid(name)
+}
+
+// WaitForPipelineInvalid waits for a pipeline to become invalid (for negative tests)
+func (f *Framework) WaitForPipelineInvalid(name string) {
+ By(fmt.Sprintf("waiting for pipeline %s to become invalid", name))
+ f.kubectl.WaitForPipelineInvalid(name)
+}
+
+// GetPipelineStatus retrieves a specific status field from a pipeline
+func (f *Framework) GetPipelineStatus(name string, field string) string {
+ result, err := f.kubectl.GetWithJsonPath("vectorpipeline", name, fmt.Sprintf(".status.%s", field))
+ Expect(err).NotTo(HaveOccurred(),
+ "Failed to get pipeline %s status field %s in namespace %s", name, field, f.namespace)
+ return result
+}
+
+// GetServicePort retrieves the port of a service
+func (f *Framework) GetServicePort(name string) string {
+ result, err := f.kubectl.GetWithJsonPath("service", name, ".spec.ports[0].port")
+ Expect(err).NotTo(HaveOccurred(),
+ "Failed to get service %s port in namespace %s", name, f.namespace)
+ return result
+}
+
+// TryGetServicePort retrieves the port of a service without failing if not found
+func (f *Framework) TryGetServicePort(name string) (string, error) {
+ return f.kubectl.GetWithJsonPath("service", name, ".spec.ports[0].port")
+}
+
+// CreateMultiplePipelinesFromTemplate creates N pipelines from a template by replacing a placeholder
+func (f *Framework) CreateMultiplePipelinesFromTemplate(templatePath, placeholder string, count int) time.Duration {
+ start := time.Now()
+
+ content, err := os.ReadFile(filepath.Join(f.TestDataPath, templatePath))
+ Expect(err).NotTo(HaveOccurred(), "Failed to load template from %s", templatePath)
+
+ template := string(content)
+
+ for i := 1; i <= count; i++ {
+ pipelineName := fmt.Sprintf("pipeline-%03d", i)
+ yaml := replaceNamespace(template, f.namespace)
+ yaml = replacePlaceholder(yaml, placeholder, pipelineName)
+
+ err = f.kubectl.Apply(yaml)
+ Expect(err).NotTo(HaveOccurred(),
+ "Failed to apply pipeline %s from template %s in namespace %s", pipelineName, templatePath, f.namespace)
+ }
+
+ return time.Since(start)
+}
+
+// replacePlaceholder replaces a placeholder in YAML content
+func replacePlaceholder(yaml, placeholder, value string) string {
+ return strings.ReplaceAll(yaml, placeholder, value)
+}
+
+// CountValidPipelines counts how many pipelines are valid in the namespace
+func (f *Framework) CountValidPipelines() (int, error) {
+ result, err := f.kubectl.GetWithJsonPath("vectorpipeline", "", ".items[*].status.configCheckResult")
+ if err != nil {
+ return 0, err
+ }
+
+ if result == "" {
+ return 0, nil
+ }
+
+ validCount := 0
+ for _, status := range splitFields(result) {
+ if status == "true" {
+ validCount++
+ }
+ }
+
+ return validCount, nil
+}
+
+// CountPipelines returns the total number of pipelines in the namespace
+func (f *Framework) CountPipelines() (int, error) {
+ result, err := f.kubectl.GetAll("vectorpipeline", "")
+ if err != nil {
+ return 0, err
+ }
+
+ if result == "" {
+ return 0, nil
+ }
+
+ return len(splitFields(result)), nil
+}
+
+// CountServicesContaining counts services whose name contains the given substring
+func (f *Framework) CountServicesContaining(substring string) (int, error) {
+ result, err := f.kubectl.GetAll("service", "")
+ if err != nil {
+ return 0, err
+ }
+
+ if result == "" {
+ return 0, nil
+ }
+
+ count := 0
+ for _, svc := range splitFields(result) {
+ if svc != "" && containsSubstring(svc, substring) {
+ count++
+ }
+ }
+
+ return count, nil
+}
+
+// containsSubstring checks if a string contains a substring
+func containsSubstring(s, substr string) bool {
+ return strings.Contains(s, substr)
+}
+
+// ExpectServiceExists verifies that a service exists
+func (f *Framework) ExpectServiceExists(name string) {
+ By(fmt.Sprintf("verifying service %s exists", name))
+ _, err := f.kubectl.Get("service", name)
+ Expect(err).NotTo(HaveOccurred(),
+ "Expected service %s to exist in namespace %s", name, f.namespace)
+}
+
+// CountServicesWithLabel counts services matching a label selector
+func (f *Framework) CountServicesWithLabel(labelSelector string) int {
+ result, err := f.kubectl.GetAll("service", labelSelector)
+ Expect(err).NotTo(HaveOccurred(),
+ "Failed to get services with label %s in namespace %s", labelSelector, f.namespace)
+
+ if result == "" {
+ return 0
+ }
+
+ count := 0
+ for _, svc := range splitFields(result) {
+ if svc != "" {
+ count++
+ }
+ }
+ return count
+}
+
+// WaitForServiceCount waits for a specific number of services
+func (f *Framework) WaitForServiceCount(labelSelector string, expectedCount int, timeout time.Duration) {
+ By(fmt.Sprintf("waiting for %d services with label %s", expectedCount, labelSelector))
+ f.kubectl.WaitForServiceCount(labelSelector, expectedCount, timeout)
+}
+
+// PrintMetrics prints timing metrics for the test
+func (f *Framework) PrintMetrics() {
+ GinkgoWriter.Println("\n📊 Test Metrics:")
+ GinkgoWriter.Printf(" Setup: %v\n", f.metrics.SetupTime)
+ GinkgoWriter.Printf(" Deployment Wait: %v\n", f.metrics.DeploymentWaitTime)
+ GinkgoWriter.Printf(" Pipeline Validation: %v\n", f.metrics.PipelineValidationTime)
+ GinkgoWriter.Printf(" Cleanup: %v\n", f.metrics.CleanupTime)
+ GinkgoWriter.Printf(" Total: %v\n", f.metrics.SetupTime+f.metrics.DeploymentWaitTime+f.metrics.PipelineValidationTime+f.metrics.CleanupTime)
+}
+
+// splitFields splits space-separated fields
+func splitFields(s string) []string {
+ return strings.Fields(s)
+}
+
+// GetPodLogs retrieves logs from a pod
+func (f *Framework) GetPodLogs(podName string) (string, error) {
+ return f.kubectl.GetPodLogs(podName)
+}
+
+// GetPodLogsTail retrieves the last N lines of logs from a pod
+func (f *Framework) GetPodLogsTail(podName string, lines int) (string, error) {
+ return f.kubectl.GetPodLogsTail(podName, lines)
+}
+
+// GetPodsByLabel retrieves pod names matching a label selector
+func (f *Framework) GetPodsByLabel(labelSelector string) ([]string, error) {
+ return f.kubectl.GetPodsByLabel(labelSelector)
+}
+
+// WaitForPodReady waits for a pod to become ready
+func (f *Framework) WaitForPodReady(podName string) {
+ By(fmt.Sprintf("waiting for pod %s to be ready", podName))
+ err := f.kubectl.WaitForPodReady(podName, "2m")
+ Expect(err).NotTo(HaveOccurred(), "Pod %s did not become ready in namespace %s", podName, f.namespace)
+}
+
+// GetAggregatorPods retrieves aggregator pod names for a given aggregator
+func (f *Framework) GetAggregatorPods(aggregatorName string) ([]string, error) {
+ // Aggregator pods use instance label to identify which aggregator they belong to
+ labelSelector := fmt.Sprintf("app.kubernetes.io/instance=%s,app.kubernetes.io/component=Aggregator", aggregatorName)
+ return f.kubectl.GetPodsByLabel(labelSelector)
+}
+
+// GetAgentPods retrieves agent pod names
+func (f *Framework) GetAgentPods(vectorName string) ([]string, error) {
+ // Agent pods use instance label and component=Agent
+ labelSelector := fmt.Sprintf("app.kubernetes.io/instance=%s,app.kubernetes.io/component=Agent", vectorName)
+ return f.kubectl.GetPodsByLabel(labelSelector)
+}
+
+// GetPipelineAnnotation retrieves a specific annotation from a pipeline
+func (f *Framework) GetPipelineAnnotation(name string, annotationKey string) string {
+ jsonPath := fmt.Sprintf(".metadata.annotations['%s']", annotationKey)
+ result, err := f.kubectl.GetWithJsonPath("vectorpipeline", name, jsonPath)
+ if err != nil {
+ // Annotation might not exist, which is expected in some cases
+ return ""
+ }
+ return result
+}
+
+// VerifyAgentHasPipeline verifies that the agent Secret contains the specified pipeline
+func (f *Framework) VerifyAgentHasPipeline(vectorName, pipelineName string) error {
+ return f.VerifyAgentHasPipelineInNamespace(vectorName, pipelineName, f.namespace)
+}
+
+// VerifyAgentHasPipelineInNamespace verifies that an agent Secret contains the specified pipeline from a specific namespace
+func (f *Framework) VerifyAgentHasPipelineInNamespace(vectorName, pipelineName, namespace string) error {
+ // Get the agent's vector config from the Secret
+ // The config is stored in a Secret with name pattern: {vectorName}-agent
+ secretName := fmt.Sprintf("%s-agent", vectorName)
+
+ // Get base64-encoded config from Secret
+ encodedConfig, err := f.kubectl.GetWithJsonPath("secret", secretName, ".data['agent\\.json']")
+ if err != nil {
+ return fmt.Errorf("failed to get agent secret %s: %w", secretName, err)
+ }
+
+ if encodedConfig == "" {
+ return fmt.Errorf("agent secret %s has no agent.json data", secretName)
+ }
+
+ // Check size before decoding to prevent DoS via large payloads
+ maxEncodedSize := MaxConfigSize * 4 / 3
+ if len(encodedConfig) > maxEncodedSize {
+ return fmt.Errorf("config too large: %d bytes (max %d bytes)", len(encodedConfig), maxEncodedSize)
+ }
+
+ // Decode base64
+ configBytes, err := base64.StdEncoding.DecodeString(encodedConfig)
+ if err != nil {
+ return fmt.Errorf("failed to decode base64 config from secret %s: %w", secretName, err)
+ }
+ config := string(configBytes)
+
+ if config == "" {
+ return fmt.Errorf("agent config is empty after decoding")
+ }
+
+ // Check if the pipeline name appears in the config
+ // In normal mode, pipeline components are prefixed with namespace-pipelinename-
+ expectedPrefix := fmt.Sprintf("%s-%s-", namespace, pipelineName)
+ if !strings.Contains(config, expectedPrefix) {
+ return fmt.Errorf("pipeline %s not found in agent config (expected prefix: %s)", pipelineName, expectedPrefix)
+ }
+
+ return nil
+}
+
+// VerifyAgentHasClusterPipeline verifies that an agent Secret contains the specified ClusterVectorPipeline
+func (f *Framework) VerifyAgentHasClusterPipeline(vectorName, pipelineName string) error {
+ // Get the agent's vector config from the Secret
+ secretName := fmt.Sprintf("%s-agent", vectorName)
+
+ // Get base64-encoded config from Secret
+ encodedConfig, err := f.kubectl.GetWithJsonPath("secret", secretName, ".data['agent\\.json']")
+ if err != nil {
+ return fmt.Errorf("failed to get agent secret %s: %w", secretName, err)
+ }
+
+ if encodedConfig == "" {
+ return fmt.Errorf("agent secret %s has no agent.json data", secretName)
+ }
+
+ // Check size before decoding to prevent DoS via large payloads
+ maxEncodedSize := MaxConfigSize * 4 / 3
+ if len(encodedConfig) > maxEncodedSize {
+ return fmt.Errorf("config too large: %d bytes (max %d bytes)", len(encodedConfig), maxEncodedSize)
+ }
+
+ // Decode base64
+ configBytes, err := base64.StdEncoding.DecodeString(encodedConfig)
+ if err != nil {
+ return fmt.Errorf("failed to decode base64 config from secret %s: %w", secretName, err)
+ }
+ config := string(configBytes)
+
+ if config == "" {
+ return fmt.Errorf("agent config is empty after decoding")
+ }
+
+ // Check if the cluster pipeline name appears in the config
+ // ClusterVectorPipeline components are prefixed with only pipelinename- (no namespace prefix)
+ expectedPrefix := fmt.Sprintf("%s-", pipelineName)
+ if !strings.Contains(config, expectedPrefix) {
+ return fmt.Errorf("cluster pipeline %s not found in agent config (expected prefix: %s)", pipelineName, expectedPrefix)
+ }
+
+ return nil
+}
+
+// VerifyAggregatorHasPipeline verifies that an aggregator Secret contains the specified pipeline
+func (f *Framework) VerifyAggregatorHasPipeline(aggregatorName, pipelineName string) error {
+ // Get the aggregator's vector config from the Secret
+ // The config is stored in a Secret with name pattern: {aggregatorName}-aggregator
+ secretName := fmt.Sprintf("%s-aggregator", aggregatorName)
+
+ // Get base64-encoded config from Secret
+ encodedConfig, err := f.kubectl.GetWithJsonPath("secret", secretName, ".data['config\\.json']")
+ if err != nil {
+ return fmt.Errorf("failed to get aggregator secret %s: %w", secretName, err)
+ }
+
+ if encodedConfig == "" {
+ return fmt.Errorf("aggregator secret %s has no config.json data", secretName)
+ }
+
+ // Check size before decoding to prevent DoS via large payloads
+ maxEncodedSize := MaxConfigSize * 4 / 3
+ if len(encodedConfig) > maxEncodedSize {
+ return fmt.Errorf("config too large: %d bytes (max %d bytes)", len(encodedConfig), maxEncodedSize)
+ }
+
+ // Decode base64
+ configBytes, err := base64.StdEncoding.DecodeString(encodedConfig)
+ if err != nil {
+ return fmt.Errorf("failed to decode base64 config from secret %s: %w", secretName, err)
+ }
+ config := string(configBytes)
+
+ if config == "" {
+ return fmt.Errorf("aggregator %s config is empty after decoding", aggregatorName)
+ }
+
+ // Check if the pipeline name appears in the config
+ expectedPrefix := fmt.Sprintf("%s-%s-", f.namespace, pipelineName)
+ if !strings.Contains(config, expectedPrefix) {
+ return fmt.Errorf("pipeline %s not found in aggregator %s config (expected prefix: %s)",
+ pipelineName, aggregatorName, expectedPrefix)
+ }
+
+ return nil
+}
+
+// ApplyTestDataWithVars loads and applies a test manifest with variable substitution
+func (f *Framework) ApplyTestDataWithVars(path string, vars map[string]string) {
+ By(fmt.Sprintf("applying test data with vars: %s", path))
+
+ content, err := os.ReadFile(filepath.Join(f.TestDataPath, path))
+ Expect(err).NotTo(HaveOccurred(), "Failed to load test data from %s", path)
+
+ // Replace namespace in YAML
+ yamlContent := replaceNamespace(string(content), f.namespace)
+
+ // Replace variables
+ for placeholder, value := range vars {
+ yamlContent = strings.ReplaceAll(yamlContent, placeholder, value)
+ }
+
+ err = f.kubectl.Apply(yamlContent)
+ Expect(err).NotTo(HaveOccurred(), "Failed to apply test data %s in namespace %s", path, f.namespace)
+}
+
+// DeleteResource deletes a Kubernetes resource
+func (f *Framework) DeleteResource(kind, name string) {
+ By(fmt.Sprintf("deleting %s %s", kind, name))
+ err := f.kubectl.Delete(kind, name)
+ Expect(err).NotTo(HaveOccurred(), "Failed to delete %s %s in namespace %s", kind, name, f.namespace)
+}
+
+// WaitForPodReadyInNamespace waits for a pod to become ready in a specific namespace
+func (f *Framework) WaitForPodReadyInNamespace(podName, namespace string) {
+ By(fmt.Sprintf("waiting for pod %s to be ready in namespace %s", podName, namespace))
+ client := kubectl.NewClient(namespace)
+ err := client.WaitForPodReady(podName, "2m")
+ Expect(err).NotTo(HaveOccurred(), "Pod %s did not become ready in namespace %s", podName, namespace)
+}
+
+// WaitForPipelineValidInNamespace waits for a pipeline to become valid in a specific namespace
+func (f *Framework) WaitForPipelineValidInNamespace(name, namespace string) {
+ By(fmt.Sprintf("waiting for pipeline %s to become valid in namespace %s", name, namespace))
+ start := time.Now()
+ defer func() {
+ duration := time.Since(start)
+ GinkgoWriter.Printf("⏱️ Pipeline %s validated in %v (namespace: %s)\n", name, duration, namespace)
+ }()
+
+ client := kubectl.NewClient(namespace)
+ client.WaitForPipelineValid(name)
+}
+
+// GetPipelineAnnotationInNamespace retrieves a specific annotation from a pipeline in a specific namespace
+func (f *Framework) GetPipelineAnnotationInNamespace(name, namespace, annotationKey string) string {
+ jsonPath := fmt.Sprintf(".metadata.annotations['%s']", annotationKey)
+ client := kubectl.NewClient(namespace)
+ result, err := client.GetWithJsonPath("vectorpipeline", name, jsonPath)
+ if err != nil {
+ // Annotation might not exist, which is expected in some cases
+ return ""
+ }
+ return result
+}
+
+// WaitForClusterPipelineValid waits for a ClusterVectorPipeline to become valid
+func (f *Framework) WaitForClusterPipelineValid(name string) {
+ By(fmt.Sprintf("waiting for ClusterVectorPipeline %s to become valid", name))
+ start := time.Now()
+ defer func() {
+ duration := time.Since(start)
+ GinkgoWriter.Printf("⏱️ ClusterVectorPipeline %s validated in %v\n", name, duration)
+ }()
+
+ // ClusterVectorPipeline is cluster-scoped, so we use a client without namespace
+ client := kubectl.NewClient("")
+ Eventually(func() string {
+ result, _ := client.GetWithJsonPath("clustervectorpipeline", name, ".status.configCheckResult")
+ return result
+ }, config.PipelineValidTimeout, config.DefaultPollInterval).Should(Equal("true"),
+ "ClusterVectorPipeline %s did not become valid", name)
+}
+
+// GetClusterPipelineAnnotation retrieves a specific annotation from a ClusterVectorPipeline
+func (f *Framework) GetClusterPipelineAnnotation(name, annotationKey string) string {
+ jsonPath := fmt.Sprintf(".metadata.annotations['%s']", annotationKey)
+ client := kubectl.NewClient("")
+ result, err := client.GetWithJsonPath("clustervectorpipeline", name, jsonPath)
+ if err != nil {
+ // Annotation might not exist, which is expected in some cases
+ return ""
+ }
+ return result
+}
+
+// GetClusterPipelineStatus retrieves a specific status field from a ClusterVectorPipeline
+func (f *Framework) GetClusterPipelineStatus(name, field string) string {
+ client := kubectl.NewClient("")
+ result, err := client.GetWithJsonPath("clustervectorpipeline", name, fmt.Sprintf(".status.%s", field))
+ Expect(err).NotTo(HaveOccurred(),
+ "Failed to get ClusterVectorPipeline %s status field %s", name, field)
+ return result
+}
+
+// Kubectl returns the kubectl client
+func (f *Framework) Kubectl() *kubectl.Client {
+ return f.kubectl
+}
+
+// GetRegisteredFramework retrieves a framework by namespace
+// Used by artifact collector to access kubectl client and namespace
+func GetRegisteredFramework(namespace string) (*Framework, bool) {
+ value, ok := frameworkRegistry.Load(namespace)
+ if !ok {
+ return nil, false
+ }
+ return value.(*Framework), true
+}
+
+// GetFrameworkRegistry returns the framework registry for iteration
+// Used by ReportAfterEach to find frameworks when namespace is not known
+func GetFrameworkRegistry() *sync.Map {
+ return &frameworkRegistry
+}
+
+// GetSecret retrieves a Secret by name in the framework's namespace
+func (f *Framework) GetSecret(name string) (map[string][]byte, error) {
+ cmd := fmt.Sprintf("kubectl get secret %s -n %s -o json", name, f.namespace)
+ output, err := exec.Command("sh", "-c", cmd).CombinedOutput()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get secret %s: %w, output: %s", name, err, string(output))
+ }
+
+ var secret struct {
+ Data map[string]string `json:"data"`
+ }
+ if err := json.Unmarshal(output, &secret); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal secret: %w", err)
+ }
+
+ // Decode base64 data
+ decodedData := make(map[string][]byte)
+ maxEncodedSize := MaxConfigSize * 4 / 3
+ for k, v := range secret.Data {
+ // Check size before decoding to prevent DoS via large payloads
+ if len(v) > maxEncodedSize {
+ return nil, fmt.Errorf("secret data for key %s too large: %d bytes (max %d bytes)", k, len(v), maxEncodedSize)
+ }
+
+ decoded, err := base64.StdEncoding.DecodeString(v)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode secret data for key %s: %w", k, err)
+ }
+ decodedData[k] = decoded
+ }
+
+ return decodedData, nil
+}
+
+// GetDeployment retrieves a Deployment by name in the framework's namespace
+func (f *Framework) GetDeployment(name string) (*DeploymentInfo, error) {
+ cmd := fmt.Sprintf("kubectl get deployment %s -n %s -o json", name, f.namespace)
+ output, err := exec.Command("sh", "-c", cmd).CombinedOutput()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get deployment %s: %w, output: %s", name, err, string(output))
+ }
+
+ var deployment struct {
+ Spec struct {
+ Template struct {
+ Spec struct {
+ InitContainers []struct {
+ Name string `json:"name"`
+ } `json:"initContainers"`
+ Containers []struct {
+ Name string `json:"name"`
+ } `json:"containers"`
+ } `json:"spec"`
+ } `json:"template"`
+ } `json:"spec"`
+ }
+ if err := json.Unmarshal(output, &deployment); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal deployment: %w", err)
+ }
+
+ info := &DeploymentInfo{
+ InitContainers: make([]string, 0),
+ Containers: make([]string, 0),
+ }
+
+ for _, c := range deployment.Spec.Template.Spec.InitContainers {
+ info.InitContainers = append(info.InitContainers, c.Name)
+ }
+ for _, c := range deployment.Spec.Template.Spec.Containers {
+ info.Containers = append(info.Containers, c.Name)
+ }
+
+ return info, nil
+}
+
+// DeploymentInfo contains simplified deployment information
+type DeploymentInfo struct {
+ InitContainers []string
+ Containers []string
+}
+
+// RecordStep records a test step for reproducibility
+func (f *Framework) RecordStep(step recorder.TestStep) {
+ if f.recorder != nil {
+ f.recorder.RecordStep(step)
+ }
+}
+
+// SetTestName sets the current test name in the recorder
+func (f *Framework) SetTestName(name string) {
+ if f.recorder != nil {
+ f.recorder.SetTestName(name)
+ }
+}
+
+// ExportTestPlan exports the recorded test plan to files
+func (f *Framework) ExportTestPlan() {
+ if f.recorder == nil {
+ return
+ }
+
+ // Get current test spec info
+ spec := CurrentSpecReport()
+ testName := buildTestName(spec)
+
+ if testName == "" {
+ testName = "unknown-test"
+ }
+
+ f.recorder.SetTestName(testName)
+
+ // In dry-run mode, print to stdout
+ if f.dryRun {
+ fmt.Println("\n" + strings.Repeat("=", 80))
+ fmt.Printf("Test Plan: %s\n", testName)
+ fmt.Println(strings.Repeat("=", 80))
+ fmt.Println(f.recorder.ExportAsShellScript())
+ return
+ }
+
+ // Otherwise, save to artifact directory if it exists
+ artifactDir := os.Getenv("ARTIFACT_DIR")
+ if artifactDir == "" {
+ artifactDir = "test/e2e/results/test-plans"
+ }
+
+ // Create directory if it doesn't exist
+ if err := os.MkdirAll(artifactDir, 0755); err != nil {
+ fmt.Printf("Warning: failed to create artifact directory: %v\n", err)
+ return
+ }
+
+ // Sanitize test name for filename
+ safeTestName := strings.ReplaceAll(testName, " ", "-")
+ safeTestName = strings.ReplaceAll(safeTestName, "/", "-")
+
+ // Save as shell script
+ scriptPath := filepath.Join(artifactDir, fmt.Sprintf("%s.sh", safeTestName))
+ scriptContent := f.recorder.ExportAsShellScript()
+ if err := os.WriteFile(scriptPath, []byte(scriptContent), 0755); err != nil {
+ fmt.Printf("Warning: failed to write test plan script: %v\n", err)
+ } else {
+ fmt.Printf("✓ Test plan saved to: %s\n", scriptPath)
+ }
+
+ // Save as markdown
+ mdPath := filepath.Join(artifactDir, fmt.Sprintf("%s.md", safeTestName))
+ mdContent := f.recorder.ExportAsMarkdown()
+ if err := os.WriteFile(mdPath, []byte(mdContent), 0644); err != nil {
+ fmt.Printf("Warning: failed to write test plan markdown: %v\n", err)
+ } else {
+ fmt.Printf("✓ Test plan documentation saved to: %s\n", mdPath)
+ }
+}
+
+// buildTestName constructs a test name from the spec report
+func buildTestName(spec types.SpecReport) string {
+ hierarchy := spec.ContainerHierarchyTexts
+ leaf := spec.LeafNodeText
+
+ if len(hierarchy) > 0 {
+ return strings.Join(append(hierarchy, leaf), " ")
+ }
+ return leaf
+}
+
+// ToContext stores the framework in the given context
+// This allows framework to be passed through context chains if needed
+func (f *Framework) ToContext(ctx context.Context) context.Context {
+ return context.WithValue(ctx, FrameworkContextKey{}, f)
+}
+
+// FromContext retrieves a framework from the given context
+// Returns nil if no framework is stored in the context
+func FromContext(ctx context.Context) *Framework {
+ if f, ok := ctx.Value(FrameworkContextKey{}).(*Framework); ok {
+ return f
+ }
+ return nil
+}
+
+// FromReportEntries retrieves a framework from Ginkgo report entries
+// This is the preferred way to access framework in ReportAfterEach
+// Returns nil if no framework entry is found
+func FromReportEntries(entries []types.ReportEntry) *Framework {
+ for _, entry := range entries {
+ if entry.Name == frameworkReportEntryName {
+ // GetRawValue() returns the underlying interface{} value
+ if f, ok := entry.Value.GetRawValue().(*Framework); ok {
+ return f
+ }
+ }
+ }
+ return nil
+}
+
+// LogOptions contains options for retrieving pod logs
+type LogOptions struct {
+ // Container name to get logs from (empty for default container)
+ Container string
+ // TailLines limits the number of lines from the end of the logs
+ TailLines int
+ // SinceSeconds returns logs newer than a relative duration (in seconds)
+ SinceSeconds int
+}
+
+// WaitForLogsContaining waits for a substring to appear in pod logs
+// Returns nil if found, error if timeout occurs
+func (f *Framework) WaitForLogsContaining(podName, substring string, timeout time.Duration) error {
+ fmt.Fprintf(GinkgoWriter, "⏳ Waiting for logs in pod %s to contain: %s\n", podName, substring)
+
+ var lastLogs string
+ startTime := time.Now()
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+
+ err := wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) {
+ logs, err := f.GetPodLogs(podName)
+ if err != nil {
+ // Not a critical error, pod might not exist yet or be starting
+ return false, nil
+ }
+ lastLogs = logs
+ return strings.Contains(logs, substring), nil
+ })
+
+ if err != nil {
+ elapsed := time.Since(startTime)
+ // Truncate logs if too long
+ truncatedLogs := lastLogs
+ if len(lastLogs) > 500 {
+ truncatedLogs = lastLogs[len(lastLogs)-500:] + "\n... (truncated)"
+ }
+ return fmt.Errorf("timeout waiting for logs to contain '%s' in pod %s after %v. Last logs:\n%s",
+ substring, podName, elapsed, truncatedLogs)
+ }
+
+ elapsed := time.Since(startTime)
+ fmt.Fprintf(GinkgoWriter, "✓ Found expected substring in pod %s logs (took %v)\n", podName, elapsed)
+ return nil
+}
+
+// WaitForLogsMatching waits for a regex pattern to match in pod logs
+// Returns nil if match found, error if timeout occurs or pattern is invalid
+func (f *Framework) WaitForLogsMatching(podName, pattern string, timeout time.Duration) error {
+ fmt.Fprintf(GinkgoWriter, "⏳ Waiting for logs in pod %s to match pattern: %s\n", podName, pattern)
+
+ // Compile regex pattern
+ re, err := regexp.Compile(pattern)
+ if err != nil {
+ return fmt.Errorf("invalid regex pattern '%s': %w", pattern, err)
+ }
+
+ var lastLogs string
+ startTime := time.Now()
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+
+ err = wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) {
+ logs, err := f.GetPodLogs(podName)
+ if err != nil {
+ // Not a critical error, pod might not exist yet or be starting
+ return false, nil
+ }
+ lastLogs = logs
+ return re.MatchString(logs), nil
+ })
+
+ if err != nil {
+ elapsed := time.Since(startTime)
+ // Truncate logs if too long
+ truncatedLogs := lastLogs
+ if len(lastLogs) > 500 {
+ truncatedLogs = lastLogs[len(lastLogs)-500:] + "\n... (truncated)"
+ }
+ return fmt.Errorf("timeout waiting for logs to match pattern '%s' in pod %s after %v. Last logs:\n%s",
+ pattern, podName, elapsed, truncatedLogs)
+ }
+
+ elapsed := time.Since(startTime)
+ fmt.Fprintf(GinkgoWriter, "✓ Found pattern match in pod %s logs (took %v)\n", podName, elapsed)
+ return nil
+}
+
+// AssertNoLogsContaining verifies that a substring does NOT appear in pod logs
+// Returns nil if substring is absent for the entire check duration, error otherwise
+func (f *Framework) AssertNoLogsContaining(podName, substring string, checkDuration time.Duration) error {
+ fmt.Fprintf(GinkgoWriter, "⏳ Verifying logs in pod %s do NOT contain: %s (checking for %v)\n",
+ podName, substring, checkDuration)
+
+ var foundLogs string
+ startTime := time.Now()
+ ctx, cancel := context.WithTimeout(context.Background(), checkDuration)
+ defer cancel()
+
+ err := wait.PollUntilContextTimeout(ctx, time.Second, checkDuration, true, func(ctx context.Context) (bool, error) {
+ logs, err := f.GetPodLogs(podName)
+ if err != nil {
+ // Pod might not exist yet, which is acceptable for negative checks
+ return false, nil
+ }
+
+ if strings.Contains(logs, substring) {
+ foundLogs = logs
+ // Found the substring - this is a failure for negative assertion
+ return true, nil
+ }
+
+ // Continue checking
+ return false, nil
+ })
+
+ // For Consistently-style checks, we want to ensure the substring was NEVER found
+ if wait.Interrupted(err) {
+ // Timeout means we successfully verified absence for the entire duration
+ elapsed := time.Since(startTime)
+ fmt.Fprintf(GinkgoWriter, "✓ Verified substring absent in pod %s logs for %v\n", podName, elapsed)
+ return nil
+ }
+
+ if foundLogs != "" {
+ // We found the substring - this is an error
+ truncatedLogs := foundLogs
+ if len(foundLogs) > 500 {
+ truncatedLogs = foundLogs[len(foundLogs)-500:] + "\n... (truncated)"
+ }
+ return fmt.Errorf("found unexpected substring '%s' in pod %s logs. Last logs:\n%s",
+ substring, podName, truncatedLogs)
+ }
+
+ // Other error occurred
+ if err != nil {
+ return fmt.Errorf("error while checking logs for pod %s: %w", podName, err)
+ }
+
+ return nil
+}
+
+// GetPodLogsWithOptions retrieves logs from a pod with custom options
+func (f *Framework) GetPodLogsWithOptions(podName string, opts LogOptions) (string, error) {
+ if opts.Container != "" || opts.TailLines > 0 || opts.SinceSeconds > 0 {
+ // Use kubectl client methods if options are specified
+ if opts.TailLines > 0 {
+ return f.kubectl.GetPodLogsTail(podName, opts.TailLines)
+ }
+ // For other options, we'd need to add more kubectl methods
+ // For now, fall back to basic GetPodLogs
+ return f.kubectl.GetPodLogs(podName)
+ }
+
+ return f.kubectl.GetPodLogs(podName)
+}
diff --git a/test/e2e/framework/kubectl/client.go b/test/e2e/framework/kubectl/client.go
new file mode 100644
index 00000000..6d13ca7f
--- /dev/null
+++ b/test/e2e/framework/kubectl/client.go
@@ -0,0 +1,485 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kubectl
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "os/exec"
+ "strings"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+
+ "github.com/kaasops/vector-operator/test/utils"
+)
+
+// Client provides convenient kubectl operations
+type Client struct {
+ namespace string
+}
+
+// NewClient creates a new kubectl client for the given namespace
+func NewClient(namespace string) *Client {
+ return &Client{namespace: namespace}
+}
+
+// Apply applies YAML content to the cluster with explicit namespace override
+// This ensures resources are created in the correct test namespace
+func (c *Client) Apply(yamlContent string) error {
+ // Validate namespace to prevent command injection
+ if err := ValidateNamespace(c.namespace); err != nil {
+ return fmt.Errorf("namespace validation failed: %w", err)
+ }
+
+ // Log command for audit and reproducibility
+ log.Printf("KUBECTL_CMD: kubectl apply -f - -n %s", c.namespace)
+
+ cmd := exec.Command("kubectl", "apply", "-f", "-", "-n", c.namespace)
+ cmd.Stdin = strings.NewReader(yamlContent)
+ output, err := utils.Run(cmd)
+
+ // Log kubectl output for debugging (helps catch namespace mismatches)
+ if len(output) > 0 {
+ fmt.Printf("kubectl apply: %s\n", string(output))
+ }
+
+ return err
+}
+
+// ApplyWithoutNamespaceOverride applies YAML content without forcing namespace
+// Use this when the YAML already contains the correct namespace field
+func (c *Client) ApplyWithoutNamespaceOverride(yamlContent string) error {
+ // Log command for audit and reproducibility
+ log.Printf("KUBECTL_CMD: kubectl apply -f -")
+
+ cmd := exec.Command("kubectl", "apply", "-f", "-")
+ cmd.Stdin = strings.NewReader(yamlContent)
+ output, err := utils.Run(cmd)
+
+ // Log kubectl output for debugging
+ if len(output) > 0 {
+ fmt.Printf("kubectl apply: %s\n", string(output))
+ }
+
+ return err
+}
+
+// Get retrieves a resource by name and type
+func (c *Client) Get(resourceType, name string) ([]byte, error) {
+ // Validate parameters to prevent command injection
+ if err := ValidateNamespace(c.namespace); err != nil {
+ return nil, fmt.Errorf("namespace validation failed: %w", err)
+ }
+ if err := ValidateResourceType(resourceType); err != nil {
+ return nil, fmt.Errorf("resource type validation failed: %w", err)
+ }
+ if err := ValidateResourceName(name); err != nil {
+ return nil, fmt.Errorf("resource name validation failed: %w", err)
+ }
+
+ // Log command for audit and reproducibility
+ log.Printf("KUBECTL_CMD: kubectl get %s %s -n %s", resourceType, name, c.namespace)
+
+ cmd := exec.Command("kubectl", "get", resourceType, name, "-n", c.namespace)
+ return utils.Run(cmd)
+}
+
+// GetWithJsonPath retrieves a specific field from a resource
+// If name is empty, retrieves from all resources of the given type
+func (c *Client) GetWithJsonPath(resourceType, name, jsonPath string) (string, error) {
+ // Validate parameters to prevent command injection
+ if err := ValidateNamespace(c.namespace); err != nil {
+ return "", fmt.Errorf("namespace validation failed: %w", err)
+ }
+ if err := ValidateResourceType(resourceType); err != nil {
+ return "", fmt.Errorf("resource type validation failed: %w", err)
+ }
+ if name != "" {
+ if err := ValidateResourceName(name); err != nil {
+ return "", fmt.Errorf("resource name validation failed: %w", err)
+ }
+ }
+ if err := ValidateJSONPath(jsonPath); err != nil {
+ return "", fmt.Errorf("jsonPath validation failed: %w", err)
+ }
+
+ // Build command args based on whether name is specified
+ args := []string{"get", resourceType}
+
+ // Only include name if it's not empty (empty name means get all resources)
+ if name != "" {
+ args = append(args, name)
+ }
+
+ args = append(args, "-n", c.namespace, "-o", fmt.Sprintf("jsonpath={%s}", jsonPath))
+
+ // Log command for audit and reproducibility
+ log.Printf("KUBECTL_CMD: kubectl %s", strings.Join(args, " "))
+
+ cmd := exec.Command("kubectl", args...)
+ output, err := utils.Run(cmd)
+ return string(output), err
+}
+
+// GetAll retrieves all resources of a type with optional label selector
+func (c *Client) GetAll(resourceType string, labelSelector string) (string, error) {
+ // Validate parameters to prevent command injection
+ if err := ValidateNamespace(c.namespace); err != nil {
+ return "", fmt.Errorf("namespace validation failed: %w", err)
+ }
+ if err := ValidateResourceType(resourceType); err != nil {
+ return "", fmt.Errorf("resource type validation failed: %w", err)
+ }
+ if labelSelector != "" {
+ if err := ValidateLabelSelector(labelSelector); err != nil {
+ return "", fmt.Errorf("label selector validation failed: %w", err)
+ }
+ }
+
+ args := []string{"get", resourceType, "-n", c.namespace}
+ if labelSelector != "" {
+ args = append(args, "-l", labelSelector)
+ }
+ args = append(args, "-o", "jsonpath={.items[*].metadata.name}")
+
+ // Log command for audit and reproducibility
+ log.Printf("KUBECTL_CMD: kubectl %s", strings.Join(args, " "))
+
+ cmd := exec.Command("kubectl", args...)
+ output, err := utils.Run(cmd)
+ return string(output), err
+}
+
+// Wait waits for a resource condition
+func (c *Client) Wait(resourceType, name, condition string, timeout string) error {
+ // Validate parameters to prevent command injection
+ if err := ValidateNamespace(c.namespace); err != nil {
+ return fmt.Errorf("namespace validation failed: %w", err)
+ }
+ if err := ValidateResourceType(resourceType); err != nil {
+ return fmt.Errorf("resource type validation failed: %w", err)
+ }
+ if err := ValidateResourceName(name); err != nil {
+ return fmt.Errorf("resource name validation failed: %w", err)
+ }
+ if err := ValidateTimeout(timeout); err != nil {
+ return fmt.Errorf("timeout validation failed: %w", err)
+ }
+
+ // Log command for audit and reproducibility
+ log.Printf("KUBECTL_CMD: kubectl wait --for=%s --timeout=%s %s/%s -n %s", condition, timeout, resourceType, name, c.namespace)
+
+ cmd := exec.Command("kubectl", "wait",
+ fmt.Sprintf("--for=%s", condition),
+ fmt.Sprintf("--timeout=%s", timeout),
+ fmt.Sprintf("%s/%s", resourceType, name),
+ "-n", c.namespace)
+ _, err := utils.Run(cmd)
+ return err
+}
+
+// Delete deletes a resource
+func (c *Client) Delete(resourceType, name string) error {
+ // Validate parameters to prevent command injection
+ if err := ValidateNamespace(c.namespace); err != nil {
+ return fmt.Errorf("namespace validation failed: %w", err)
+ }
+ if err := ValidateResourceType(resourceType); err != nil {
+ return fmt.Errorf("resource type validation failed: %w", err)
+ }
+ if err := ValidateResourceName(name); err != nil {
+ return fmt.Errorf("resource name validation failed: %w", err)
+ }
+
+ // Log command for audit and reproducibility
+ log.Printf("KUBECTL_CMD: kubectl delete %s %s -n %s", resourceType, name, c.namespace)
+
+ cmd := exec.Command("kubectl", "delete", resourceType, name, "-n", c.namespace)
+ _, err := utils.Run(cmd)
+ return err
+}
+
+// CreateNamespace creates a namespace
+func CreateNamespace(name string) error {
+ // Validate namespace to prevent command injection
+ if err := ValidateNamespace(name); err != nil {
+ return fmt.Errorf("namespace validation failed: %w", err)
+ }
+
+ // Log command for audit and reproducibility
+ log.Printf("KUBECTL_CMD: kubectl create ns %s", name)
+
+ cmd := exec.Command("kubectl", "create", "ns", name)
+ _, err := utils.Run(cmd)
+ return err
+}
+
+// GetNamespace retrieves namespace information
+func GetNamespace(name string) (*corev1.Namespace, error) {
+ // Validate namespace to prevent command injection
+ if err := ValidateNamespace(name); err != nil {
+ return nil, fmt.Errorf("namespace validation failed: %w", err)
+ }
+
+ // Log command for audit and reproducibility
+ log.Printf("KUBECTL_CMD: kubectl get ns %s -o json", name)
+
+ cmd := exec.Command("kubectl", "get", "ns", name, "-o", "json")
+ output, err := cmd.Output()
+ if err != nil {
+ return nil, err
+ }
+
+ var ns corev1.Namespace
+ if err := json.Unmarshal(output, &ns); err != nil {
+ return nil, fmt.Errorf("failed to parse namespace JSON: %w", err)
+ }
+
+ return &ns, nil
+}
+
+// DeleteNamespace deletes a namespace with retry and force delete fallback
+// Handles CRD resources with finalizers to prevent stuck namespaces
+func DeleteNamespace(name string, timeout string) error {
+ // Validate parameters to prevent command injection
+ if err := ValidateNamespace(name); err != nil {
+ return fmt.Errorf("namespace validation failed: %w", err)
+ }
+ if err := ValidateTimeout(timeout); err != nil {
+ return fmt.Errorf("timeout validation failed: %w", err)
+ }
+
+ // Log command for audit and reproducibility
+ log.Printf("KUBECTL_CMD: kubectl delete ns %s --timeout=%s", name, timeout)
+
+ // Parse timeout duration for wait logic
+ timeoutDuration, err := parseDuration(timeout)
+ if err != nil {
+ return fmt.Errorf("invalid timeout format: %w", err)
+ }
+
+ // First try: normal delete
+ cmd := exec.Command("kubectl", "delete", "ns", name, fmt.Sprintf("--timeout=%s", timeout))
+ _, err = utils.Run(cmd)
+ if err == nil {
+ // Wait for namespace to actually disappear
+ return waitForNamespaceDeletion(name, timeoutDuration)
+ }
+
+ // If normal delete fails or times out, force cleanup CRD resources first
+ fmt.Printf("⚠️ Namespace %s deletion failed, attempting force cleanup\n", name)
+
+ // Clean up operator CRD resources that might have finalizers
+ crdTypes := []string{
+ "vectorpipeline",
+ "vectoraggregator",
+ "vector",
+ "clustervectorpipeline",
+ "clustervectoraggregator",
+ }
+
+ for _, crdType := range crdTypes {
+ // Get all resources of this type
+ cmd := exec.Command("kubectl", "get", crdType, "-n", name, "-o", "name")
+ output, err := cmd.Output()
+ if err != nil {
+ continue // Resource type doesn't exist or no resources, skip
+ }
+
+ resources := strings.Fields(string(output))
+ for _, resource := range resources {
+ // Remove finalizers
+ patchCmd := exec.Command("kubectl", "patch", resource, "-n", name,
+ "-p", `{"metadata":{"finalizers":[]}}`,
+ "--type=merge")
+ _ = patchCmd.Run() // Ignore errors
+
+ // Force delete
+ deleteCmd := exec.Command("kubectl", "delete", resource, "-n", name,
+ "--grace-period=0", "--force")
+ _ = deleteCmd.Run() // Ignore errors
+ }
+ }
+
+ // Remove namespace finalizers
+ _ = exec.Command("kubectl", "patch", "ns", name,
+ "-p", `{"metadata":{"finalizers":[]}}`,
+ "--type=merge").Run()
+
+ // Then force delete namespace with shorter timeout
+ log.Printf("KUBECTL_CMD: kubectl delete ns %s --grace-period=0 --force --timeout=10s", name)
+ cmd = exec.Command("kubectl", "delete", "ns", name,
+ "--grace-period=0", "--force", "--timeout=10s")
+ _, _ = utils.Run(cmd)
+
+ // Wait for namespace to actually disappear, even after force delete
+ waitErr := waitForNamespaceDeletion(name, 30*time.Second)
+ if waitErr != nil {
+ fmt.Printf("⚠️ Namespace %s still exists after cleanup, continuing anyway\n", name)
+ return nil // Don't fail the test - namespace will be cleaned up eventually
+ }
+
+ return nil
+}
+
+// waitForNamespaceDeletion waits for a namespace to be fully deleted
+func waitForNamespaceDeletion(name string, timeout time.Duration) error {
+ deadline := time.Now().Add(timeout)
+ pollInterval := 2 * time.Second
+
+ for time.Now().Before(deadline) {
+ // Try to get the namespace
+ cmd := exec.Command("kubectl", "get", "ns", name)
+ err := cmd.Run()
+ if err != nil {
+ // Namespace not found - deletion successful
+ log.Printf("KUBECTL_CMD: namespace %s successfully deleted", name)
+ return nil
+ }
+
+ // Namespace still exists, wait and retry
+ time.Sleep(pollInterval)
+ }
+
+ return fmt.Errorf("namespace %s still exists after %v", name, timeout)
+}
+
+// parseDuration parses timeout strings like "30s", "5m", "1h"
+func parseDuration(timeout string) (time.Duration, error) {
+ // Extract numeric part and unit
+ if len(timeout) < 2 {
+ return 0, fmt.Errorf("invalid timeout: %s", timeout)
+ }
+
+ unit := timeout[len(timeout)-1:]
+ valueStr := timeout[:len(timeout)-1]
+
+ var value int
+ _, err := fmt.Sscanf(valueStr, "%d", &value)
+ if err != nil {
+ return 0, fmt.Errorf("invalid timeout value: %s", timeout)
+ }
+
+ switch unit {
+ case "s":
+ return time.Duration(value) * time.Second, nil
+ case "m":
+ return time.Duration(value) * time.Minute, nil
+ case "h":
+ return time.Duration(value) * time.Hour, nil
+ default:
+ return 0, fmt.Errorf("invalid timeout unit: %s (must be s, m, or h)", unit)
+ }
+}
+
+// GetPodLogs retrieves logs from a pod
+func (c *Client) GetPodLogs(podName string) (string, error) {
+ // Validate parameters to prevent command injection
+ if err := ValidateNamespace(c.namespace); err != nil {
+ return "", fmt.Errorf("namespace validation failed: %w", err)
+ }
+ if err := ValidateResourceName(podName); err != nil {
+ return "", fmt.Errorf("pod name validation failed: %w", err)
+ }
+
+ // Log command for audit and reproducibility
+ log.Printf("KUBECTL_CMD: kubectl logs %s -n %s", podName, c.namespace)
+
+ cmd := exec.Command("kubectl", "logs", podName, "-n", c.namespace)
+ output, err := utils.Run(cmd)
+ return string(output), err
+}
+
+// GetPodLogsSince retrieves logs from a pod since a specific time
+func (c *Client) GetPodLogsSince(podName string, since string) (string, error) {
+ cmd := exec.Command("kubectl", "logs", podName, "-n", c.namespace, "--since", since)
+ output, err := utils.Run(cmd)
+ return string(output), err
+}
+
+// GetPodLogsTail retrieves the last N lines of logs from a pod
+func (c *Client) GetPodLogsTail(podName string, lines int) (string, error) {
+ cmd := exec.Command("kubectl", "logs", podName, "-n", c.namespace, "--tail", fmt.Sprintf("%d", lines))
+ output, err := utils.Run(cmd)
+ return string(output), err
+}
+
+// GetPodLogsSinceTime retrieves logs from a pod since a specific time with line limit
+// Uses --since-time for temporal filtering and --tail as a safety limit
+func (c *Client) GetPodLogsSinceTime(podName string, since time.Time, tailLines int) (string, error) {
+ // Format time as RFC3339 for Kubernetes
+ sinceTime := since.Format(time.RFC3339)
+
+ // Use both --since-time and --tail:
+ // --since-time filters logs by timestamp
+ // --tail provides safety limit if too many logs match
+ cmd := exec.Command("kubectl", "logs", podName, "-n", c.namespace,
+ "--since-time", sinceTime,
+ "--tail", fmt.Sprintf("%d", tailLines))
+ output, err := utils.Run(cmd)
+ return string(output), err
+}
+
+// GetPodsByLabel retrieves pod names matching a label selector
+func (c *Client) GetPodsByLabel(labelSelector string) ([]string, error) {
+ // Validate parameters to prevent command injection
+ if err := ValidateNamespace(c.namespace); err != nil {
+ return nil, fmt.Errorf("namespace validation failed: %w", err)
+ }
+ if err := ValidateLabelSelector(labelSelector); err != nil {
+ return nil, fmt.Errorf("label selector validation failed: %w", err)
+ }
+
+ // Log command for audit and reproducibility
+ log.Printf("KUBECTL_CMD: kubectl get pods -n %s -l %s -o jsonpath={.items[*].metadata.name}", c.namespace, labelSelector)
+
+ cmd := exec.Command("kubectl", "get", "pods", "-n", c.namespace, "-l", labelSelector, "-o", "jsonpath={.items[*].metadata.name}")
+ output, err := utils.Run(cmd)
+ if err != nil {
+ return nil, err
+ }
+
+ podNames := strings.Fields(string(output))
+ return podNames, nil
+}
+
+// WaitForPodReady waits for a pod to become ready
+func (c *Client) WaitForPodReady(podName string, timeout string) error {
+ // Validate parameters to prevent command injection
+ if err := ValidateNamespace(c.namespace); err != nil {
+ return fmt.Errorf("namespace validation failed: %w", err)
+ }
+ if err := ValidateResourceName(podName); err != nil {
+ return fmt.Errorf("pod name validation failed: %w", err)
+ }
+ if err := ValidateTimeout(timeout); err != nil {
+ return fmt.Errorf("timeout validation failed: %w", err)
+ }
+
+ // Log command for audit and reproducibility
+ log.Printf("KUBECTL_CMD: kubectl wait --for=condition=Ready --timeout=%s pod/%s -n %s", timeout, podName, c.namespace)
+
+ cmd := exec.Command("kubectl", "wait",
+ "--for=condition=Ready",
+ fmt.Sprintf("--timeout=%s", timeout),
+ fmt.Sprintf("pod/%s", podName),
+ "-n", c.namespace)
+ _, err := utils.Run(cmd)
+ return err
+}
diff --git a/test/e2e/framework/kubectl/validation.go b/test/e2e/framework/kubectl/validation.go
new file mode 100644
index 00000000..40986df7
--- /dev/null
+++ b/test/e2e/framework/kubectl/validation.go
@@ -0,0 +1,143 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kubectl
+
+import (
+ "fmt"
+ "regexp"
+)
+
+// ValidateNamespace validates namespace against RFC 1123 DNS Label requirements.
+// A valid namespace must:
+// - Be 1-63 characters long
+// - Contain only lowercase alphanumeric characters or '-'
+// - Start with an alphanumeric character
+// - End with an alphanumeric character
+// Empty namespace is allowed for cluster-scoped resources
+func ValidateNamespace(namespace string) error {
+ // Allow empty namespace for cluster-scoped resources
+ if len(namespace) == 0 {
+ return nil
+ }
+
+ if len(namespace) > 63 {
+ return fmt.Errorf("namespace length must be 1-63 characters, got %d", len(namespace))
+ }
+
+ // RFC 1123 DNS Label regex: lowercase alphanumeric and hyphens only
+ // Must start and end with alphanumeric
+ if !regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$`).MatchString(namespace) {
+ return fmt.Errorf("invalid namespace format: %s (must match RFC 1123 DNS Label)", namespace)
+ }
+
+ return nil
+}
+
+// ValidateResourceName validates Kubernetes resource names against RFC 1123 DNS Subdomain requirements.
+// A valid resource name must:
+// - Be 1-253 characters long
+// - Contain only lowercase alphanumeric characters, '-', or '.'
+// - Start with an alphanumeric character
+// - End with an alphanumeric character
+func ValidateResourceName(name string) error {
+ if len(name) == 0 {
+ return fmt.Errorf("resource name cannot be empty")
+ }
+
+ if len(name) > 253 {
+ return fmt.Errorf("resource name length must be 1-253 characters, got %d", len(name))
+ }
+
+ // RFC 1123 DNS Subdomain regex
+ if !regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`).MatchString(name) {
+ return fmt.Errorf("invalid resource name format: %s (must match RFC 1123 DNS Subdomain)", name)
+ }
+
+ return nil
+}
+
+// ValidateResourceType validates Kubernetes resource type names.
+// These are typically lowercase and may contain '.' for API groups.
+func ValidateResourceType(resourceType string) error {
+ if len(resourceType) == 0 {
+ return fmt.Errorf("resource type cannot be empty")
+ }
+
+ // Allow alphanumeric, dots for API groups (e.g., "apps.deployment")
+ if !regexp.MustCompile(`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$`).MatchString(resourceType) {
+ return fmt.Errorf("invalid resource type format: %s", resourceType)
+ }
+
+ return nil
+}
+
+// ValidateLabelSelector validates Kubernetes label selectors.
+// Label selectors have specific syntax requirements for keys and values.
+func ValidateLabelSelector(selector string) error {
+ if selector == "" {
+ // Empty selector is valid (means no filter)
+ return nil
+ }
+
+ // Basic validation: check for suspicious characters that could be used for injection
+ // Allow alphanumeric, dots, hyphens, underscores, slashes (for label keys), equals, commas
+ if !regexp.MustCompile(`^[a-zA-Z0-9\.\_\-/=,]+$`).MatchString(selector) {
+ return fmt.Errorf("invalid label selector format: %s", selector)
+ }
+
+ return nil
+}
+
+// ValidateTimeout validates timeout strings used with kubectl commands.
+// Valid formats: "30s", "5m", "1h", "2m0s", "1h30m", "1h30m45s"
+// Accepts both simple format (5m) and Go duration format (5m0s)
+func ValidateTimeout(timeout string) error {
+ if timeout == "" {
+ return fmt.Errorf("timeout cannot be empty")
+ }
+
+ // Allow Go duration format: combinations of hours, minutes, seconds
+ // Examples: 30s, 5m, 1h, 2m0s, 1h30m, 1h30m45s
+ // Pattern: optional hours (Nh), optional minutes (Nm), optional seconds (Ns)
+ if !regexp.MustCompile(`^([0-9]+h)?([0-9]+m)?([0-9]+(\.[0-9]+)?[sµμn]s?)?$`).MatchString(timeout) {
+ return fmt.Errorf("invalid timeout format: %s (must be Go duration like '30s', '5m', '2m0s', or '1h30m')", timeout)
+ }
+
+ // Ensure at least one component is present
+ if !regexp.MustCompile(`[0-9]`).MatchString(timeout) {
+ return fmt.Errorf("invalid timeout format: %s (must contain at least one time component)", timeout)
+ }
+
+ return nil
+}
+
+// ValidateJSONPath validates JSONPath expressions used with kubectl.
+// This is a basic validation to prevent obvious injection attempts.
+func ValidateJSONPath(jsonPath string) error {
+ if jsonPath == "" {
+ return fmt.Errorf("jsonPath cannot be empty")
+ }
+
+ // Basic validation: JSONPath should not contain shell metacharacters
+ // Allow alphanumeric, dots, brackets, quotes, underscores, hyphens, asterisks, colons, backslashes
+ // Backslash is needed for escaping dots in keys like .data['agent\.json']
+ if !regexp.MustCompile(`^[\w\.\[\]\{\}'":\*\-\s,@\?\\]+$`).MatchString(jsonPath) {
+ return fmt.Errorf("invalid jsonPath format: %s", jsonPath)
+ }
+
+ return nil
+}
diff --git a/test/e2e/framework/kubectl/wait.go b/test/e2e/framework/kubectl/wait.go
new file mode 100644
index 00000000..cfaed942
--- /dev/null
+++ b/test/e2e/framework/kubectl/wait.go
@@ -0,0 +1,111 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kubectl
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+ "time"
+
+ . "github.com/onsi/gomega"
+
+ "github.com/kaasops/vector-operator/test/e2e/framework/config"
+ "github.com/kaasops/vector-operator/test/utils"
+)
+
+// WaitForDeploymentReady waits for a deployment to be created and ready
+func (c *Client) WaitForDeploymentReady(name string) {
+ // First wait for deployment to exist (with reduced timeout)
+ Eventually(func() error {
+ cmd := exec.Command("kubectl", "get", "deployment", name, "-n", c.namespace)
+ _, err := utils.Run(cmd)
+ return err
+ }, config.DeploymentCreateTimeout, config.DefaultPollInterval).Should(Succeed(),
+ "Deployment %s should be created in namespace %s", name, c.namespace)
+
+ // Then wait for it to be available
+ err := c.Wait("deployment", name, "condition=available", config.DeploymentReadyTimeout.String())
+ Expect(err).NotTo(HaveOccurred(),
+ "Deployment %s should become ready in namespace %s", name, c.namespace)
+}
+
+// WaitForPipelineValid waits for a VectorPipeline to become valid
+func (c *Client) WaitForPipelineValid(name string) {
+ Eventually(func() error {
+ result, err := c.GetWithJsonPath("vectorpipeline", name, ".status.configCheckResult")
+ if err != nil {
+ return err
+ }
+ if result != "true" {
+ return fmt.Errorf("pipeline not valid yet: %s", result)
+ }
+ return nil
+ }, config.PipelineValidTimeout, config.SlowPollInterval).Should(Succeed(),
+ "Pipeline %s should become valid in namespace %s", name, c.namespace)
+}
+
+// WaitForPipelineInvalid waits for a VectorPipeline to become invalid (for negative tests)
+func (c *Client) WaitForPipelineInvalid(name string) {
+ Eventually(func() error {
+ result, err := c.GetWithJsonPath("vectorpipeline", name, ".status.configCheckResult")
+ if err != nil {
+ return err
+ }
+ if result != "false" {
+ return fmt.Errorf("expected pipeline to be invalid, got: %s", result)
+ }
+ return nil
+ }, config.PipelineValidTimeout, config.SlowPollInterval).Should(Succeed(),
+ "Pipeline %s should become invalid in namespace %s", name, c.namespace)
+}
+
+// WaitForServiceExists waits for a service to be created
+func (c *Client) WaitForServiceExists(name string) {
+ Eventually(func() error {
+ _, err := c.Get("service", name)
+ return err
+ }, config.ServiceCreateTimeout, config.SlowPollInterval).Should(Succeed(),
+ "Service %s should be created in namespace %s", name, c.namespace)
+}
+
+// WaitForServiceCount waits for a specific number of services matching filter
+func (c *Client) WaitForServiceCount(labelSelector string, expectedCount int, timeout time.Duration) {
+ Eventually(func() (int, error) {
+ result, err := c.GetAll("service", labelSelector)
+ if err != nil {
+ return 0, err
+ }
+ if result == "" {
+ return 0, nil
+ }
+
+ services := 0
+ for _, svc := range splitFields(result) {
+ if svc != "" {
+ services++
+ }
+ }
+ return services, nil
+ }, timeout, config.SlowPollInterval).Should(Equal(expectedCount),
+ "Expected %d services with label %s in namespace %s", expectedCount, labelSelector, c.namespace)
+}
+
+// splitFields splits space-separated fields and filters empty strings
+func splitFields(s string) []string {
+ return strings.Fields(s)
+}
diff --git a/test/e2e/framework/lifecycle.go b/test/e2e/framework/lifecycle.go
new file mode 100644
index 00000000..d7af9475
--- /dev/null
+++ b/test/e2e/framework/lifecycle.go
@@ -0,0 +1,91 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package framework
+
+import (
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+
+ "github.com/kaasops/vector-operator/test/utils"
+)
+
+// SharedDependencies manages dependencies that are shared across all tests
+type SharedDependencies struct {
+ prometheusInstalled bool
+ certManagerInstalled bool
+ installTime time.Duration
+}
+
+var globalDeps *SharedDependencies
+
+// InstallSharedDependencies installs Prometheus and cert-manager once for all tests
+// This should be called in BeforeSuite
+func InstallSharedDependencies() {
+ if globalDeps != nil {
+ GinkgoWriter.Println("⚠️ Shared dependencies already installed, skipping...")
+ return
+ }
+
+ start := time.Now()
+ globalDeps = &SharedDependencies{}
+
+ By("installing Prometheus Operator (shared)")
+ err := utils.InstallPrometheusOperator()
+ if err != nil {
+ // Ignore AlreadyExists errors - dependencies might be already installed
+ GinkgoWriter.Printf("⚠️ Prometheus Operator installation returned error (might already exist): %v\n", err)
+ }
+ globalDeps.prometheusInstalled = true
+
+ By("installing cert-manager (shared)")
+ err = utils.InstallCertManager()
+ if err != nil {
+ // Ignore AlreadyExists errors - dependencies might be already installed
+ GinkgoWriter.Printf("⚠️ cert-manager installation returned error (might already exist): %v\n", err)
+ }
+ globalDeps.certManagerInstalled = true
+
+ globalDeps.installTime = time.Since(start)
+ GinkgoWriter.Printf("✅ Shared dependencies installed in %v\n", globalDeps.installTime)
+}
+
+// UninstallSharedDependencies removes Prometheus and cert-manager
+// This should be called in AfterSuite
+func UninstallSharedDependencies() {
+ if globalDeps == nil {
+ return
+ }
+
+ By("uninstalling Prometheus Operator (shared)")
+ if globalDeps.prometheusInstalled {
+ utils.UninstallPrometheusOperator()
+ }
+
+ By("uninstalling cert-manager (shared)")
+ if globalDeps.certManagerInstalled {
+ utils.UninstallCertManager()
+ }
+
+ GinkgoWriter.Println("✅ Shared dependencies uninstalled")
+ globalDeps = nil
+}
+
+// AreSharedDependenciesInstalled checks if shared dependencies are available
+func AreSharedDependenciesInstalled() bool {
+ return globalDeps != nil && globalDeps.prometheusInstalled && globalDeps.certManagerInstalled
+}
diff --git a/test/e2e/framework/recorder/recorder.go b/test/e2e/framework/recorder/recorder.go
new file mode 100644
index 00000000..72de48b1
--- /dev/null
+++ b/test/e2e/framework/recorder/recorder.go
@@ -0,0 +1,258 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package recorder
+
+import (
+ "fmt"
+ "strings"
+ "time"
+)
+
+// TestRecorder records test operations for reproducibility and documentation
+type TestRecorder struct {
+ testName string
+ namespace string
+ steps []TestStep
+ startTime time.Time
+ stepOrder int
+}
+
+// TestStep represents a single operation in a test
+type TestStep struct {
+ Order int
+ Command string // Exact kubectl or shell command
+ Description string // Human-readable description
+ Input string // YAML or other input data
+ Expected string // Expected result
+ WaitFor string // Wait condition (e.g., "condition=available")
+ Timeout string // Timeout for the operation
+}
+
+// NewTestRecorder creates a new test recorder
+func NewTestRecorder(namespace string) *TestRecorder {
+ return &TestRecorder{
+ namespace: namespace,
+ steps: make([]TestStep, 0),
+ startTime: time.Now(),
+ stepOrder: 0,
+ }
+}
+
+// SetTestName sets the test name for this recording
+func (r *TestRecorder) SetTestName(name string) {
+ r.testName = name
+}
+
+// RecordStep records a test step
+func (r *TestRecorder) RecordStep(step TestStep) {
+ r.stepOrder++
+ step.Order = r.stepOrder
+ r.steps = append(r.steps, step)
+}
+
+// GetSteps returns all recorded steps
+func (r *TestRecorder) GetSteps() []TestStep {
+ return r.steps
+}
+
+// ExportAsShellScript exports the recorded steps as an executable shell script
+func (r *TestRecorder) ExportAsShellScript() string {
+ var sb strings.Builder
+
+ // Script header
+ sb.WriteString("#!/bin/bash\n")
+ sb.WriteString("# E2E Test Playbook\n")
+ sb.WriteString(fmt.Sprintf("# Test: %s\n", r.testName))
+ sb.WriteString(fmt.Sprintf("# Namespace: %s\n", r.namespace))
+ sb.WriteString(fmt.Sprintf("# Generated: %s\n\n", time.Now().Format(time.RFC3339)))
+
+ // Shell settings for safety
+ sb.WriteString("set -e # Exit on error\n")
+ sb.WriteString("set -u # Exit on undefined variable\n")
+ sb.WriteString("set -o pipefail # Catch errors in pipes\n\n")
+
+ // Variables
+ sb.WriteString(fmt.Sprintf("NAMESPACE='%s'\n", r.namespace))
+ sb.WriteString("KUBECTL='kubectl'\n")
+ sb.WriteString("TMPDIR=$(mktemp -d)\n")
+ sb.WriteString("trap 'rm -rf $TMPDIR' EXIT\n\n")
+
+ // Helper functions
+ sb.WriteString(r.generateHelperFunctions())
+
+ // Main steps
+ sb.WriteString("# Test Steps\n")
+ sb.WriteString("echo '═══════════════════════════════════════════════════════════'\n")
+ sb.WriteString(fmt.Sprintf("echo 'Test: %s'\n", r.testName))
+ sb.WriteString("echo '═══════════════════════════════════════════════════════════'\n\n")
+
+ for _, step := range r.steps {
+ sb.WriteString(fmt.Sprintf("# Step %d: %s\n", step.Order, step.Description))
+ sb.WriteString("echo '───────────────────────────────────────────────────────────'\n")
+ sb.WriteString(fmt.Sprintf("log_info 'Step %d: %s'\n", step.Order, step.Description))
+ sb.WriteString("echo '───────────────────────────────────────────────────────────'\n")
+
+ // If there's input data, save it to a temporary file
+ if step.Input != "" {
+ tmpFile := fmt.Sprintf("$TMPDIR/step-%d.yaml", step.Order)
+ sb.WriteString(fmt.Sprintf("cat <<'EOF' > %s\n", tmpFile))
+ sb.WriteString(step.Input)
+ sb.WriteString("\nEOF\n")
+
+ // Modify command to use the temp file
+ if strings.Contains(step.Command, "kubectl apply -f -") {
+ modifiedCmd := strings.Replace(step.Command, "kubectl apply -f -", fmt.Sprintf("kubectl apply -f %s", tmpFile), 1)
+ sb.WriteString(modifiedCmd + "\n")
+ } else {
+ sb.WriteString(step.Command + "\n")
+ }
+ } else {
+ sb.WriteString(step.Command + "\n")
+ }
+
+ // Add expected result as comment
+ if step.Expected != "" {
+ sb.WriteString(fmt.Sprintf("# Expected: %s\n", step.Expected))
+ }
+
+ // Add wait condition if specified
+ if step.WaitFor != "" {
+ sb.WriteString(fmt.Sprintf("# Wait for: %s (timeout: %s)\n", step.WaitFor, step.Timeout))
+ }
+
+ sb.WriteString("\n")
+ }
+
+ // Success message
+ sb.WriteString("echo '═══════════════════════════════════════════════════════════'\n")
+ sb.WriteString("log_success 'Test completed successfully!'\n")
+ sb.WriteString("echo '═══════════════════════════════════════════════════════════'\n")
+
+ return sb.String()
+}
+
+// ExportAsMarkdown exports the recorded steps as Markdown documentation
+func (r *TestRecorder) ExportAsMarkdown() string {
+ var sb strings.Builder
+
+ // Document header
+ sb.WriteString(fmt.Sprintf("# Test Plan: %s\n\n", r.testName))
+ sb.WriteString(fmt.Sprintf("**Generated**: %s\n\n", time.Now().Format(time.RFC3339)))
+ sb.WriteString(fmt.Sprintf("**Namespace**: `%s`\n\n", r.namespace))
+
+ // Prerequisites
+ sb.WriteString("## Prerequisites\n\n")
+ sb.WriteString("- Kubernetes cluster with Vector Operator installed\n")
+ sb.WriteString("- kubectl configured with cluster access\n")
+ sb.WriteString("- Appropriate RBAC permissions\n\n")
+
+ // Test steps
+ sb.WriteString("## Test Steps\n\n")
+
+ for _, step := range r.steps {
+ sb.WriteString(fmt.Sprintf("### Step %d: %s\n\n", step.Order, step.Description))
+
+ // Command
+ sb.WriteString("**Command**:\n")
+ sb.WriteString("```bash\n")
+ sb.WriteString(step.Command + "\n")
+ sb.WriteString("```\n\n")
+
+ // Input YAML if present
+ if step.Input != "" {
+ sb.WriteString("**Input YAML**:\n")
+ sb.WriteString("```yaml\n")
+ sb.WriteString(step.Input + "\n")
+ sb.WriteString("```\n\n")
+ }
+
+ // Wait condition if present
+ if step.WaitFor != "" {
+ sb.WriteString(fmt.Sprintf("**Wait Condition**: `%s`\n\n", step.WaitFor))
+ if step.Timeout != "" {
+ sb.WriteString(fmt.Sprintf("**Timeout**: %s\n\n", step.Timeout))
+ }
+ }
+
+ // Expected result
+ if step.Expected != "" {
+ sb.WriteString(fmt.Sprintf("**Expected Result**: %s\n\n", step.Expected))
+ }
+
+ sb.WriteString("---\n\n")
+ }
+
+ return sb.String()
+}
+
+// generateHelperFunctions generates helper shell functions for the script
+func (r *TestRecorder) generateHelperFunctions() string {
+ return `# Helper Functions
+log_info() {
+ echo "[INFO] $(date '+%Y-%m-%d %H:%M:%S') - $1"
+}
+
+log_error() {
+ echo "[ERROR] $(date '+%Y-%m-%d %H:%M:%S') - $1" >&2
+}
+
+log_success() {
+ echo "[SUCCESS] $(date '+%Y-%m-%d %H:%M:%S') - $1"
+}
+
+check_deployment() {
+ local name=$1
+ local namespace=${2:-$NAMESPACE}
+ log_info "Checking deployment $name in namespace $namespace..."
+ kubectl get deployment "$name" -n "$namespace" &>/dev/null || {
+ log_error "Deployment $name not found!"
+ return 1
+ }
+ log_info "Deployment $name exists"
+}
+
+check_service() {
+ local name=$1
+ local namespace=${2:-$NAMESPACE}
+ log_info "Checking service $name in namespace $namespace..."
+ kubectl get service "$name" -n "$namespace" &>/dev/null || {
+ log_error "Service $name not found!"
+ return 1
+ }
+ log_info "Service $name exists"
+}
+
+wait_for_pods() {
+ local label=$1
+ local namespace=${2:-$NAMESPACE}
+ local timeout=${3:-120s}
+ log_info "Waiting for pods with label $label in namespace $namespace..."
+ kubectl wait --for=condition=Ready pods -l "$label" -n "$namespace" --timeout="$timeout" || {
+ log_error "Pods with label $label did not become ready within $timeout"
+ return 1
+ }
+ log_info "Pods are ready"
+}
+
+`
+}
+
+// Clear clears all recorded steps
+func (r *TestRecorder) Clear() {
+ r.steps = make([]TestStep, 0)
+ r.stepOrder = 0
+}
diff --git a/test/e2e/framework/resources.go b/test/e2e/framework/resources.go
new file mode 100644
index 00000000..9e02fe13
--- /dev/null
+++ b/test/e2e/framework/resources.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package framework
+
+import (
+ "github.com/kaasops/vector-operator/test/e2e/framework/assertions"
+)
+
+// Pipeline returns a pipeline resource wrapper for custom matchers
+func (f *Framework) Pipeline(name string) *assertions.PipelineResource {
+ return assertions.NewPipelineResource(f.namespace, name)
+}
+
+// ClusterPipeline returns a cluster-scoped pipeline resource wrapper for custom matchers
+func (f *Framework) ClusterPipeline(name string) *assertions.PipelineResource {
+ return assertions.NewPipelineResource("", name)
+}
+
+// Service returns a service resource wrapper for custom matchers
+func (f *Framework) Service(name string) *assertions.ServiceResource {
+ return assertions.NewServiceResource(f.namespace, name)
+}
diff --git a/test/e2e/normal_mode_e2e_test.go b/test/e2e/normal_mode_e2e_test.go
new file mode 100644
index 00000000..1f250574
--- /dev/null
+++ b/test/e2e/normal_mode_e2e_test.go
@@ -0,0 +1,222 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package e2e
+
+import (
+ "fmt"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "github.com/kaasops/vector-operator/test/e2e/framework"
+ "github.com/kaasops/vector-operator/test/e2e/framework/config"
+)
+
+// Normal Mode tests verify that the operator works correctly for standard pipelines.
+// These tests cover basic Vector Agent and Aggregator functionality.
+var _ = Describe("Normal Mode", Label(config.LabelSmoke, config.LabelFast), Ordered, func() {
+ f := framework.NewUniqueFramework("test-normal-mode")
+
+ BeforeAll(func() {
+ f.Setup()
+ })
+
+ AfterAll(func() {
+ f.Teardown()
+ f.PrintMetrics()
+ })
+
+ Context("VectorPipeline basics", func() {
+ It("should create and validate a basic pipeline with agent", func() {
+ By("deploying Vector Agent")
+ f.ApplyTestData("normal-mode/agent.yaml")
+
+ // Give controller time to process Vector CR Create event and create daemonset
+ // Normal mode requires slightly more time as it involves more resources
+ time.Sleep(5 * time.Second)
+
+ By("creating a VectorPipeline")
+ f.ApplyTestData("normal-mode/pipeline-basic.yaml")
+
+ By("waiting for pipeline to become valid")
+ f.WaitForPipelineValid("basic-pipeline")
+
+ By("verifying agent processes the pipeline configuration")
+ Eventually(func() error {
+ // Check that agent config contains the pipeline components
+ return f.VerifyAgentHasPipeline("normal-agent", "basic-pipeline")
+ }, config.ServiceCreateTimeout, config.DefaultPollInterval).Should(Succeed())
+ })
+
+ It("should handle pipeline with transforms and multiple sinks", func() {
+ By("creating a complex pipeline with transforms")
+ f.ApplyTestData("normal-mode/pipeline-complex.yaml")
+
+ By("waiting for pipeline to become valid")
+ f.WaitForPipelineValid("complex-pipeline")
+
+ By("verifying pipeline has expected components")
+ // Pipeline should have sources, transforms, and sinks all in agent
+ Eventually(func() error {
+ return f.VerifyAgentHasPipeline("normal-agent", "complex-pipeline")
+ }, config.ServiceCreateTimeout, config.DefaultPollInterval).Should(Succeed())
+ })
+ })
+
+ Context("VectorAggregator basics", func() {
+ It("should deploy aggregator and process pipelines", func() {
+ By("deploying VectorAggregator")
+ f.ApplyTestData("normal-mode/aggregator.yaml")
+ f.WaitForDeploymentReady("normal-aggregator-aggregator")
+
+ By("creating a pipeline with aggregator role")
+ f.ApplyTestData("normal-mode/pipeline-aggregator-role.yaml")
+
+ By("waiting for pipeline to become valid")
+ f.WaitForPipelineValid("aggregator-pipeline")
+
+ By("verifying pipeline has aggregator role")
+ role := f.GetPipelineStatus("aggregator-pipeline", "role")
+ Expect(role).To(Equal("aggregator"))
+
+ By("verifying aggregator processes the pipeline")
+ Eventually(func() error {
+ return f.VerifyAggregatorHasPipeline("normal-aggregator", "aggregator-pipeline")
+ }, config.ServiceCreateTimeout, config.DefaultPollInterval).Should(Succeed())
+ })
+ })
+
+ Context("Multiple pipelines in normal mode", func() {
+ It("should handle multiple pipelines without conflicts", func() {
+ By("creating 3 pipelines in normal mode")
+ for i := 1; i <= 3; i++ {
+ f.ApplyTestDataWithVars("normal-mode/pipeline-template.yaml",
+ map[string]string{"{{INDEX}}": fmt.Sprintf("pipeline-%d", i)})
+ }
+
+ By("waiting for all pipelines to become valid")
+ f.WaitForPipelineValid("pipeline-1")
+ f.WaitForPipelineValid("pipeline-2")
+ f.WaitForPipelineValid("pipeline-3")
+
+ By("verifying all pipelines are in agent configuration")
+ Eventually(func() error {
+ if err := f.VerifyAgentHasPipeline("normal-agent", "pipeline-1"); err != nil {
+ return err
+ }
+ if err := f.VerifyAgentHasPipeline("normal-agent", "pipeline-2"); err != nil {
+ return err
+ }
+ return f.VerifyAgentHasPipeline("normal-agent", "pipeline-3")
+ }, config.ServiceCreateTimeout, config.DefaultPollInterval).Should(Succeed())
+ })
+ })
+
+ Context("Pipeline deletion in normal mode", func() {
+ It("should clean up pipeline from agent config when deleted", func() {
+ By("creating a pipeline")
+ f.ApplyTestData("normal-mode/pipeline-deletable.yaml")
+ f.WaitForPipelineValid("deletable-pipeline")
+
+ By("verifying pipeline is in agent config")
+ Eventually(func() error {
+ return f.VerifyAgentHasPipeline("normal-agent", "deletable-pipeline")
+ }, config.ServiceCreateTimeout, config.DefaultPollInterval).Should(Succeed())
+
+ By("deleting the pipeline")
+ f.DeleteResource("vectorpipeline", "deletable-pipeline")
+
+ By("verifying pipeline is removed from agent config")
+ Eventually(func() bool {
+ err := f.VerifyAgentHasPipeline("normal-agent", "deletable-pipeline")
+ return err != nil // Should return error when pipeline not found
+ }, config.ServiceCreateTimeout, config.DefaultPollInterval).Should(BeTrue())
+ })
+ })
+
+ Context("Kubernetes logs source with label selectors", func() {
+ It("should collect logs from pods matching label selector", func() {
+ By("deploying a test pod with specific labels")
+ f.ApplyTestData("normal-mode/test-app-pod.yaml")
+ f.WaitForPodReady("test-app")
+
+ By("creating pipeline with kubernetes_logs source and label selector")
+ f.ApplyTestData("normal-mode/pipeline-kubernetes-logs.yaml")
+ f.WaitForPipelineValid("k8s-logs-pipeline")
+
+ By("verifying agent has kubernetes_logs source")
+ Eventually(func() error {
+ return f.VerifyAgentHasPipeline("normal-agent", "k8s-logs-pipeline")
+ }, config.ServiceCreateTimeout, config.DefaultPollInterval).Should(Succeed())
+
+ By("verifying pipeline role is Agent")
+ role := f.GetPipelineStatus("k8s-logs-pipeline", "role")
+ Expect(role).To(Equal("agent"), "kubernetes_logs pipeline should have agent role")
+ })
+ })
+
+ Context("Namespace isolation", func() {
+ It("should only collect logs from the pipeline's namespace", func() {
+ By("creating a separate namespace")
+ f.ApplyTestDataWithoutNamespaceReplacement("normal-mode/namespace-isolation-ns.yaml")
+
+ By("deploying Vector agent in isolated namespace")
+ // Note: In real scenario, the same Vector DaemonSet serves all namespaces
+ // But pipelines are namespace-scoped
+
+ By("deploying pods in both namespaces")
+ f.ApplyTestData("normal-mode/namespace-isolation-pod-main.yaml")
+ f.ApplyTestDataWithoutNamespaceReplacement("normal-mode/namespace-isolation-pod-isolated.yaml")
+ f.WaitForPodReady("main-namespace-pod")
+ f.WaitForPodReadyInNamespace("isolated-pod", "test-normal-mode-isolated")
+
+ By("creating pipeline in isolated namespace")
+ f.ApplyTestDataWithoutNamespaceReplacement("normal-mode/namespace-isolation-pipeline.yaml")
+ f.WaitForPipelineValidInNamespace("isolated-pipeline", "test-normal-mode-isolated")
+
+ By("verifying namespace isolation in configuration")
+ // The agent config should have extra_namespace_label_selector set to the pipeline's namespace
+ Eventually(func() error {
+ return f.VerifyAgentHasPipelineInNamespace("normal-agent", "isolated-pipeline", "test-normal-mode-isolated")
+ }, config.ServiceCreateTimeout, config.DefaultPollInterval).Should(Succeed())
+ })
+ })
+
+ Context("ClusterVectorPipeline", func() {
+ It("should collect logs from multiple namespaces", func() {
+ By("creating ClusterVectorPipeline")
+ f.ApplyTestDataWithoutNamespaceReplacement("normal-mode/cluster-pipeline.yaml")
+ f.WaitForClusterPipelineValid("cluster-wide-pipeline")
+
+ By("deploying test pods in different namespaces with matching labels")
+ f.ApplyTestData("normal-mode/cluster-pipeline-pod-ns1.yaml")
+ f.ApplyTestDataWithoutNamespaceReplacement("normal-mode/cluster-pipeline-pod-ns2.yaml")
+ f.WaitForPodReady("cluster-monitored-pod-1")
+ f.WaitForPodReadyInNamespace("cluster-monitored-pod-2", "test-normal-mode-isolated")
+
+ By("verifying agent processes the ClusterVectorPipeline")
+ Eventually(func() error {
+ return f.VerifyAgentHasClusterPipeline("normal-agent", "cluster-wide-pipeline")
+ }, config.ServiceCreateTimeout, config.DefaultPollInterval).Should(Succeed())
+
+ By("verifying pipeline role is Agent")
+ role := f.GetClusterPipelineStatus("cluster-wide-pipeline", "role")
+ Expect(role).To(Equal("agent"), "ClusterVectorPipeline with kubernetes_logs should have agent role")
+ })
+ })
+})
diff --git a/test/e2e/scripts/README.md b/test/e2e/scripts/README.md
new file mode 100644
index 00000000..1669ae19
--- /dev/null
+++ b/test/e2e/scripts/README.md
@@ -0,0 +1,40 @@
+# E2E Test Scripts
+
+Utilities for working with e2e test results and test environment.
+
+## Available Scripts
+
+### generate_report.py
+
+Generates an interactive HTML pivot grid report from e2e test results.
+
+**Usage:**
+```bash
+# From project root
+make test-report
+
+# Or directly
+cd test/e2e/results
+python3 ../scripts/generate_report.py
+```
+
+**What it does:**
+- Scans all `run-*` directories in `test/e2e/results/`
+- Parses test metadata and results from each run
+- Generates `test_results_report.html` with interactive pivot grid
+- Shows test stability across multiple runs (flaky tests, always-failing tests, etc.)
+
+**Requirements:**
+- Python 3.6+
+- Test results in `test/e2e/results/run-YYYY-MM-DD-HHMMSS/` format
+
+**Output:**
+- `test/e2e/results/test_results_report.html` - Interactive HTML report
+
+## Adding New Scripts
+
+When adding new test utilities:
+1. Place the script in this directory
+2. Update this README with usage instructions
+3. Add a Makefile target if appropriate (see `make help`)
+4. Ensure the script has proper error handling and help text
diff --git a/test/e2e/scripts/generate_report.py b/test/e2e/scripts/generate_report.py
new file mode 100644
index 00000000..d02c1bce
--- /dev/null
+++ b/test/e2e/scripts/generate_report.py
@@ -0,0 +1,4554 @@
+#!/usr/bin/env python3
+"""
+Generate HTML Pivot Grid Report for E2E Test Results (Enhanced V2)
+
+Features:
+- Interactive pivot grid showing test results across multiple runs
+- Trend analysis charts (Pass Rate, Duration)
+- Advanced Log Viewer with ANSI support and filtering
+- Deep Flakiness Analysis (Score, Patterns)
+- Run Comparison (New Failures, Fixed Tests)
+- Smart artifact matching
+- Filtering and Search
+"""
+
+import json
+import sys
+import html
+import re
+from datetime import datetime
+from pathlib import Path
+from typing import Dict, List, Any, Optional, Set
+from dataclasses import dataclass, field
+
+# --- SVG Icons ---
+
+def svg_icon(name: str, size: int = 16, color: str = 'currentColor') -> str:
+ """Generate inline SVG icons"""
+ icons = {
+ 'search': f'',
+ 'copy': f'',
+ 'download': f'',
+ 'error': f'',
+ 'warning': f'',
+ 'info': f'',
+ 'bug': f'',
+ 'chevron-up': f'',
+ 'chevron-down': f'',
+ 'arrow-up': f'',
+ 'arrow-down': f'',
+ 'wrap': f'',
+ 'sun': f'',
+ 'moon': f'',
+ }
+ return icons.get(name, '')
+
+def format_duration(seconds: float) -> str:
+ """Format duration in human-readable format"""
+ if not seconds or seconds < 0:
+ return 'N/A'
+
+ hours = int(seconds // 3600)
+ minutes = int((seconds % 3600) // 60)
+ secs = int(seconds % 60)
+
+ if hours > 0:
+ return f"{hours}h {minutes}m {secs}s"
+ elif minutes > 0:
+ return f"{minutes}m {secs}s"
+ else:
+ return f"{secs}s"
+
+# --- Data Structures ---
+
+@dataclass
+class TestResult:
+ name: str
+ full_name: str
+ leaf_text: str
+ state: str
+ runtime: float
+ failure_message: str = ""
+ labels: List[str] = field(default_factory=list)
+ container_hierarchy: List[str] = field(default_factory=list)
+ start_time: str = ""
+ end_time: str = ""
+ artifact_metadata: Optional[Dict[str, Any]] = None
+
+@dataclass
+class TestRun:
+ run_id: str
+ start_time: str
+ total_tests: int
+ passed_tests: int
+ failed_tests: int
+ environment: Dict[str, Any]
+ total_runtime: float
+ test_output_log: str
+ tests: List[TestResult]
+ git_commit: str = ""
+ git_branch: str = ""
+ git_dirty: str = ""
+ description: str = ""
+
+ @property
+ def date_str(self) -> str:
+ return datetime.fromisoformat(self.start_time).strftime('%Y-%m-%d %H:%M')
+
+@dataclass
+class PivotRow:
+ test_name: str
+ full_test_name: str
+ leaf_text: str
+ container_hierarchy: List[str]
+ runs: Dict[str, Any] = field(default_factory=dict) # run_id -> result dict
+
+ # Stats
+ total_runs: int = 0
+ pass_count: int = 0
+ fail_count: int = 0
+ skip_count: int = 0
+ pass_rate: float = 0.0
+ total_runtime: float = 0.0
+ avg_runtime: float = 0.0
+ min_runtime: float = float('inf')
+ max_runtime: float = 0.0
+
+ # Flakiness
+ is_flaky: bool = False
+ flakiness_score: float = 0.0
+ flakiness_pattern: str = 'stable'
+
+# --- Templates ---
+
+class ReportTemplates:
+ """Holds HTML, CSS, and JS templates."""
+
+ CSS = """
+ :root {
+ /* Light theme colors */
+ --bg-primary: #ffffff;
+ --bg-secondary: #f8fafc;
+ --bg-tertiary: #fafbfc;
+ --bg-hover: #e0e7ff;
+ --cell-hover-bg: #eff6ff;
+ --cell-active-bg: #dbeafe;
+ --text-primary: #0f172a;
+ --text-secondary: #64748b;
+ --text-tertiary: #94a3b8;
+ --border-color: #e2e8f0;
+ --border-secondary: #cbd5e1;
+ --modal-backdrop: rgba(0,0,0,0.4);
+ --accent-color: #3b82f6;
+
+ /* Status colors */
+ --success-bg: #d1fae5;
+ --success-text: #065f46;
+ --error-bg: #fee2e2;
+ --error-text: #991b1b;
+ --warning-bg: #fef3c7;
+ --warning-text: #92400e;
+ --info-bg: #dbeafe;
+ --info-text: #1e40af;
+
+ /* Log viewer colors */
+ --log-bg: #1e293b;
+ --log-text: #e2e8f0;
+ --log-border: #334155;
+ --log-controls-bg: #334155;
+ --log-input-bg: #1e293b;
+ --log-input-border: #475569;
+ }
+
+ [data-theme="dark"] {
+ /* Dark theme colors */
+ --bg-primary: #1e293b;
+ --bg-secondary: #0f172a;
+ --bg-tertiary: #1e293b;
+ --bg-hover: #334155;
+ --cell-hover-bg: rgba(59, 130, 246, 0.2);
+ --cell-active-bg: rgba(59, 130, 246, 0.3);
+ --text-primary: #f1f5f9;
+ --text-secondary: #cbd5e1;
+ --text-tertiary: #94a3b8;
+ --border-color: #334155;
+ --border-secondary: #475569;
+ --modal-backdrop: rgba(0,0,0,0.7);
+ --accent-color: #60a5fa;
+
+ /* Status colors */
+ --success-bg: #064e3b;
+ --success-text: #a7f3d0;
+ --error-bg: #7f1d1d;
+ --error-text: #fca5a5;
+ --warning-bg: #78350f;
+ --warning-text: #fcd34d;
+ --info-bg: #1e3a8a;
+ --info-text: #93c5fd;
+
+ /* Log viewer colors */
+ --log-bg: #0f172a;
+ --log-text: #e2e8f0;
+ --log-border: #1e293b;
+ }
+
+ /* Row hover effect */
+ tbody tr {
+ transition: background-color 0.15s ease;
+ }
+ tbody tr:hover {
+ background-color: var(--bg-hover);
+ }
+ tbody tr:hover .test-name-cell,
+ tbody tr:hover .stats-cell {
+ background-color: var(--bg-hover);
+ }
+
+ * { margin: 0; padding: 0; box-sizing: border-box; }
+ body {
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
+ background: var(--bg-secondary);
+ color: var(--text-primary);
+ min-height: 100vh;
+ }
+
+ /* SVG Icons */
+ svg {
+ display: inline-block;
+ vertical-align: middle;
+ flex-shrink: 0;
+ }
+ button svg, .filter-label svg, .log-actions-item svg {
+ margin-right: 6px;
+ }
+
+ .container { background: var(--bg-primary); min-height: 100vh; display: flex; flex-direction: column; }
+
+ /* Header & Tabs */
+ .header {
+ padding: 20px 40px;
+ border-bottom: 1px solid var(--border-color);
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ background: var(--bg-primary);
+ }
+ .header h1 { font-size: 22px; font-weight: 600; margin-bottom: 4px; }
+
+ .tabs {
+ display: flex;
+ padding: 0 40px;
+ background: var(--bg-primary);
+ border-bottom: 1px solid var(--border-color);
+ gap: 24px;
+ }
+ .tab-btn {
+ padding: 16px 4px;
+ background: none;
+ border: none;
+ border-bottom: 2px solid transparent;
+ color: var(--text-secondary);
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s;
+ }
+ .tab-btn:hover { color: var(--text-primary); }
+ .tab-btn.active {
+ color: var(--accent-color);
+ border-bottom-color: var(--accent-color);
+ }
+
+ .tab-content { display: none; padding: 24px 40px; }
+ .tab-content.active { display: block; }
+
+ /* Summary Cards */
+ .summary-grid {
+ display: grid;
+ grid-template-columns: repeat(auto-fit, minmax(180px, 1fr));
+ gap: 16px;
+ margin-bottom: 24px;
+ }
+ .card {
+ background: var(--bg-primary);
+ padding: 20px;
+ border-radius: 8px;
+ border: 1px solid var(--border-color);
+ text-align: center;
+ display: flex;
+ flex-direction: column;
+ justify-content: center;
+ min-height: 100px;
+ }
+ .card .label { font-size: 13px; color: var(--text-secondary); text-transform: uppercase; font-weight: 600; margin-bottom: 8px; }
+ .card .value { font-size: 32px; font-weight: 700; color: var(--text-primary); text-align: center; display: block; }
+ .card.passed .value { color: #10b981; }
+ .card.failed .value { color: #ef4444; }
+
+ /* Tooltips */
+ .tooltip {
+ position: relative;
+ cursor: help;
+ }
+ .card.tooltip {
+ display: flex;
+ flex-direction: column;
+ justify-content: center;
+ min-height: 100px;
+ }
+ .pass-rate.tooltip {
+ display: inline-block;
+ }
+ .badge.tooltip {
+ position: relative;
+ display: inline-block;
+ }
+ .tooltip .tooltiptext {
+ visibility: hidden;
+ width: 250px;
+ background-color: #1f2937;
+ color: #fff;
+ text-align: left;
+ border-radius: 6px;
+ padding: 8px 12px;
+ position: absolute;
+ z-index: 1000;
+ top: 100%;
+ margin-top: 8px;
+ left: 50%;
+ margin-left: -125px;
+ opacity: 0;
+ transition: opacity 0.3s;
+ font-size: 12px;
+ line-height: 1.4;
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.3);
+ pointer-events: none;
+ }
+ .tooltip .tooltiptext::after {
+ content: "";
+ position: absolute;
+ bottom: 100%;
+ left: 50%;
+ margin-left: -5px;
+ border-width: 5px;
+ border-style: solid;
+ border-color: transparent transparent #1f2937 transparent;
+ }
+ .tooltip:hover .tooltiptext {
+ visibility: visible;
+ opacity: 1;
+ }
+
+ /* Charts */
+ .charts-container {
+ display: grid;
+ grid-template-columns: 1fr 1fr;
+ gap: 24px;
+ margin-bottom: 32px;
+ }
+ .chart-wrapper {
+ background: var(--bg-primary);
+ padding: 20px;
+ border-radius: 12px;
+ border: 1px solid var(--border-color);
+ height: 350px;
+ }
+
+ /* Flaky Section */
+ .flaky-grid {
+ display: grid;
+ grid-template-columns: repeat(auto-fill, minmax(300px, 1fr));
+ gap: 16px;
+ margin-top: 16px;
+ }
+ .flaky-card {
+ text-align: left;
+ cursor: pointer;
+ transition: transform 0.2s;
+ }
+ .flaky-card:hover { transform: translateY(-2px); border-color: var(--accent-color); }
+ .flaky-header { display: flex; justify-content: space-between; margin-bottom: 8px; }
+ .flaky-name { font-weight: 500; margin-bottom: 8px; font-size: 14px; overflow: hidden; text-overflow: ellipsis; }
+ .flaky-stats { font-size: 12px; color: var(--text-secondary); }
+
+ /* Table Styles */
+ .table-wrapper {
+ overflow-x: auto;
+ border: 1px solid var(--border-color);
+ border-radius: 8px;
+ }
+ table { width: 100%; border-collapse: collapse; font-size: 14px; }
+ th, td {
+ padding: 12px 16px;
+ border-bottom: 1px solid var(--border-color);
+ text-align: left;
+ white-space: nowrap;
+ }
+ th {
+ background: var(--bg-secondary);
+ font-weight: 600;
+ color: var(--text-secondary);
+ position: sticky;
+ top: 0;
+ z-index: 10;
+ }
+ th.run-header {
+ cursor: pointer;
+ transition: all 0.2s ease;
+ }
+ th.run-header:hover {
+ background: var(--bg-hover);
+ color: var(--text-primary);
+ transform: scale(1.02);
+ }
+ th:first-child, td:first-child {
+ position: sticky;
+ left: 0;
+ background: var(--bg-primary);
+ z-index: 11;
+ border-right: 1px solid var(--border-color);
+ min-width: 500px;
+ max-width: 700px;
+ white-space: normal;
+ }
+ th:first-child { background: var(--bg-secondary); z-index: 12; }
+
+ /* Breadcrumb Test Names */
+ .test-name-cell {
+ padding: 10px 12px !important;
+ line-height: 1.5;
+ position: relative;
+ }
+ .test-name-wrapper {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ }
+ .copy-test-name-btn {
+ opacity: 0;
+ transition: opacity 0.2s ease;
+ background: var(--bg-primary);
+ border: 1px solid var(--border-color);
+ border-radius: 4px;
+ padding: 4px 8px;
+ cursor: pointer;
+ font-size: 11px;
+ color: var(--text-secondary);
+ display: flex;
+ align-items: center;
+ gap: 4px;
+ white-space: nowrap;
+ }
+ .copy-test-name-btn:hover {
+ background: var(--bg-hover);
+ color: var(--text-primary);
+ border-color: var(--text-tertiary);
+ }
+ .copy-test-name-btn:active {
+ transform: scale(0.95);
+ }
+ tbody tr:hover .copy-test-name-btn {
+ opacity: 1;
+ }
+ .test-breadcrumb {
+ display: flex;
+ align-items: center;
+ flex-wrap: wrap;
+ gap: 6px;
+ font-size: 13px;
+ }
+ .breadcrumb-item {
+ display: inline-flex;
+ align-items: center;
+ }
+ .breadcrumb-container {
+ color: var(--text-secondary);
+ font-weight: 500;
+ }
+ .breadcrumb-container.level-0 {
+ color: var(--text-primary);
+ font-weight: 600;
+ }
+ .breadcrumb-separator {
+ color: var(--text-tertiary);
+ margin: 0 6px;
+ font-weight: 300;
+ user-select: none;
+ }
+ .breadcrumb-leaf {
+ color: var(--text-primary);
+ font-weight: 400;
+ }
+
+ .result-cell {
+ text-align: center;
+ cursor: pointer !important;
+ transition: all 0.2s ease;
+ position: relative;
+ }
+ .result-cell:hover {
+ background: var(--cell-hover-bg) !important;
+ transform: translateY(-1px);
+ box-shadow: 0 2px 4px rgba(0,0,0,0.1);
+ }
+ .result-cell:active {
+ transform: translateY(0);
+ }
+ .result-cell * {
+ cursor: pointer !important;
+ pointer-events: none;
+ }
+ .badge {
+ padding: 4px 8px;
+ border-radius: 4px;
+ font-size: 12px;
+ font-weight: 600;
+ text-transform: uppercase;
+ pointer-events: none;
+ }
+ .badge.passed { background: var(--success-bg); color: var(--success-text); }
+ .badge.failed { background: var(--error-bg); color: var(--error-text); }
+ .badge.skipped { background: var(--warning-bg); color: var(--warning-text); }
+
+ /* Runtime Badges */
+ .runtime { margin-left: 6px; font-size: 11px; pointer-events: none; }
+ .runtime-fast { color: #10b981; }
+ .runtime-medium { color: #f59e0b; }
+ .runtime-slow { color: #ef4444; }
+
+ /* Stats Column */
+ .stats-col, .stats-cell {
+ position: sticky;
+ left: 500px; /* after test name */
+ background: var(--bg-primary);
+ border-right: 1px solid var(--border-color);
+ min-width: 120px;
+ text-align: center;
+ font-size: 12px;
+ z-index: 11;
+ }
+ .stats-col { z-index: 12; background: var(--bg-secondary); }
+ .pass-rate { padding: 4px 8px; border-radius: 4px; margin-bottom: 4px; display: inline-block; font-weight: bold; }
+ .rate-high { background: var(--success-bg); color: var(--success-text); }
+ .rate-medium { background: var(--warning-bg); color: var(--warning-text); }
+ .rate-low { background: var(--error-bg); color: var(--error-text); }
+ .counts { color: var(--text-secondary); margin-bottom: 2px; }
+ .avg-time { color: var(--text-tertiary); font-size: 10px; }
+
+ /* Filters */
+ .filters {
+ display: flex;
+ gap: 16px;
+ margin-bottom: 20px;
+ flex-wrap: wrap;
+ }
+ .filter-input {
+ padding: 8px 12px;
+ border: 1px solid var(--border-secondary);
+ border-radius: 6px;
+ background: var(--bg-primary);
+ color: var(--text-primary);
+ min-width: 200px;
+ }
+
+ /* Modal */
+ .modal {
+ display: none;
+ position: fixed;
+ top: 0; left: 0; width: 100%; height: 100%;
+ background: var(--modal-backdrop);
+ z-index: 1000;
+ backdrop-filter: blur(2px);
+ }
+ .modal-content {
+ background: var(--bg-primary);
+ width: 90%; max-width: 1200px;
+ margin: 20px auto 30px;
+ border-radius: 12px;
+ max-height: calc(100vh - 50px);
+ display: flex;
+ flex-direction: column;
+ box-shadow: 0 25px 50px -12px rgba(0, 0, 0, 0.25);
+ }
+ .modal-header {
+ padding: 20px 30px;
+ border-bottom: 1px solid var(--border-color);
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ flex-shrink: 0;
+ }
+ .modal-header h2 {
+ font-size: 18px;
+ margin: 0;
+ }
+ .modal-body {
+ padding: 0;
+ overflow-y: auto;
+ background: var(--bg-secondary);
+ flex: 1;
+ }
+ .close-btn {
+ font-size: 28px;
+ cursor: pointer;
+ color: var(--text-secondary);
+ transition: color 0.2s;
+ }
+ .close-btn:hover {
+ color: var(--text-primary);
+ }
+
+ /* Modal Tabs */
+ .modal-tabs {
+ display: flex;
+ background: var(--bg-primary);
+ border-bottom: 1px solid var(--border-color);
+ padding: 0 30px;
+ gap: 24px;
+ }
+ .modal-tab {
+ padding: 14px 4px;
+ background: none;
+ border: none;
+ border-bottom: 2px solid transparent;
+ color: var(--text-secondary);
+ font-weight: 500;
+ font-size: 14px;
+ cursor: pointer;
+ transition: all 0.2s;
+ }
+ .modal-tab:hover {
+ color: var(--text-primary);
+ }
+ .modal-tab.active {
+ color: var(--accent-color);
+ border-bottom-color: var(--accent-color);
+ }
+ .modal-tab-content {
+ display: none;
+ padding: 30px;
+ }
+ .modal-tab-content.active {
+ display: block;
+ }
+
+ /* Test Detail Sections */
+ .test-detail-section {
+ background: var(--bg-primary);
+ border-radius: 8px;
+ padding: 20px;
+ margin-bottom: 16px;
+ border: 1px solid var(--border-color);
+ }
+ .test-detail-section h4 {
+ margin: 0 0 16px 0;
+ font-size: 14px;
+ font-weight: 600;
+ color: var(--text-primary);
+ text-transform: uppercase;
+ letter-spacing: 0.5px;
+ }
+ .detail-grid {
+ display: grid;
+ grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
+ gap: 16px;
+ }
+ .detail-item {
+ display: flex;
+ flex-direction: column;
+ }
+ .detail-label {
+ font-size: 11px;
+ color: var(--text-tertiary);
+ text-transform: uppercase;
+ letter-spacing: 0.5px;
+ margin-bottom: 4px;
+ }
+ .detail-value {
+ font-size: 14px;
+ color: var(--text-primary);
+ font-weight: 500;
+ }
+ .detail-value code {
+ background: var(--bg-secondary);
+ padding: 2px 6px;
+ border-radius: 4px;
+ font-size: 12px;
+ }
+
+ /* Artifact List */
+ .artifact-list {
+ display: flex;
+ flex-direction: column;
+ gap: 12px;
+ }
+ .artifact-item {
+ background: var(--bg-secondary);
+ border: 1px solid var(--border-color);
+ border-radius: 6px;
+ padding: 16px;
+ }
+ .artifact-header {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ margin-bottom: 12px;
+ }
+ .artifact-name {
+ font-weight: 600;
+ font-size: 14px;
+ color: var(--text-primary);
+ }
+ .artifact-meta {
+ font-size: 11px;
+ color: var(--text-tertiary);
+ }
+
+ /* Error Box */
+ .error-box {
+ background: var(--error-bg);
+ border: 1px solid var(--error-text);
+ border-radius: 8px;
+ padding: 16px;
+ margin-top: 16px;
+ }
+ .error-box h4 {
+ margin: 0 0 12px 0;
+ color: var(--error-text);
+ font-size: 14px;
+ }
+ .error-box pre {
+ margin: 0;
+ color: var(--error-text);
+ font-size: 12px;
+ white-space: pre-wrap;
+ word-break: break-word;
+ }
+
+ /* Log Viewer */
+ .log-viewer {
+ background: var(--log-bg);
+ color: var(--log-text);
+ padding: 0;
+ border-radius: 8px;
+ font-family: 'JetBrains Mono', monospace;
+ font-size: 13px;
+ max-height: 600px;
+ overflow-y: auto;
+ line-height: 1.6;
+ counter-reset: line-number;
+ }
+ .log-viewer.with-line-numbers {
+ padding-left: 0;
+ }
+ .log-line {
+ display: flex;
+ padding: 2px 0;
+ position: relative;
+ }
+ .log-line:hover {
+ background: rgba(59, 130, 246, 0.15);
+ }
+ .log-line-number {
+ counter-increment: line-number;
+ flex-shrink: 0;
+ width: 50px;
+ padding: 0 12px;
+ text-align: right;
+ color: var(--text-tertiary);
+ user-select: none;
+ border-right: 1px solid var(--border-color);
+ font-size: 11px;
+ line-height: 1.6;
+ }
+ .log-line-number::before {
+ content: counter(line-number);
+ }
+ .log-line-number:hover {
+ color: var(--text-secondary);
+ cursor: pointer;
+ }
+ .log-line-content {
+ flex: 1;
+ padding: 0 12px;
+ white-space: pre-wrap;
+ word-break: break-word;
+ }
+
+ /* Log syntax highlighting */
+ .log-viewer .log-passed {
+ color: #10b981;
+ font-weight: 600;
+ }
+ .log-viewer .log-failed {
+ color: #ef4444;
+ font-weight: 600;
+ background: rgba(239, 68, 68, 0.1);
+ padding: 2px 4px;
+ border-radius: 2px;
+ }
+ .log-viewer .log-step {
+ color: #3b82f6;
+ font-weight: 600;
+ }
+ .log-viewer .log-error {
+ color: #f97316;
+ font-weight: 600;
+ background: rgba(249, 115, 22, 0.1);
+ padding: 2px 4px;
+ border-radius: 2px;
+ }
+ .log-viewer .log-timestamp {
+ color: #6366f1;
+ opacity: 0.8;
+ font-size: 0.95em;
+ }
+ .log-viewer .log-duration {
+ color: #8b5cf6;
+ font-weight: 500;
+ }
+ .log-viewer .log-command {
+ color: #06b6d4;
+ font-style: italic;
+ }
+ .log-viewer .log-test-name {
+ color: #fbbf24;
+ font-weight: 500;
+ }
+ .log-viewer .log-warning {
+ color: #f59e0b;
+ font-weight: 500;
+ }
+
+ /* Test separators in logs */
+ .log-test-separator {
+ border-top: 2px solid var(--border-color);
+ margin: 20px 0 16px 0;
+ padding-top: 16px;
+ position: relative;
+ }
+ .log-test-separator::before {
+ content: '━━━━';
+ position: absolute;
+ top: -13px;
+ left: 0;
+ background: var(--log-bg);
+ padding-right: 10px;
+ color: var(--border-color);
+ font-size: 14px;
+ letter-spacing: 2px;
+ }
+ .log-test-header {
+ font-weight: 600;
+ color: var(--accent-color);
+ font-size: 14px;
+ margin-bottom: 8px;
+ padding: 8px 12px;
+ background: var(--bg-hover);
+ border-left: 3px solid var(--accent-color);
+ border-radius: 4px;
+ }
+ .log-test-header .test-status {
+ float: right;
+ font-size: 12px;
+ padding: 2px 8px;
+ border-radius: 3px;
+ font-weight: 600;
+ }
+ .log-test-header .test-status.passed {
+ background: #d1fae5;
+ color: #065f46;
+ }
+ .log-test-header .test-status.failed {
+ background: #fee2e2;
+ color: #991b1b;
+ }
+
+ .log-controls {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ margin-bottom: 12px;
+ padding: 10px 12px;
+ background: linear-gradient(to bottom, var(--bg-primary), var(--bg-hover));
+ border: 1px solid var(--border-color);
+ border-radius: 8px;
+ box-shadow: 0 1px 3px rgba(0, 0, 0, 0.05);
+ flex-wrap: wrap;
+ }
+ .log-controls > strong {
+ margin-right: 8px;
+ font-size: 13px;
+ color: var(--text-primary);
+ }
+ .toolbar-separator {
+ width: 1px;
+ height: 24px;
+ background: var(--border-color);
+ margin: 0 4px;
+ }
+ .log-btn {
+ padding: 4px 12px;
+ background: var(--log-controls-bg);
+ border: 1px solid var(--log-border);
+ color: var(--log-text);
+ border-radius: 4px;
+ cursor: pointer;
+ }
+ .log-btn.active { background: var(--accent-color); color: white; }
+
+ /* Control groups */
+ .log-actions-menu,
+ .log-filters-menu {
+ position: relative;
+ display: inline-flex;
+ margin-right: 6px;
+ }
+ .log-search-controls {
+ display: flex;
+ align-items: center;
+ gap: 4px;
+ padding: 4px 8px;
+ background: var(--bg-hover);
+ border-radius: 6px;
+ }
+ .log-nav-buttons {
+ display: flex;
+ gap: 4px;
+ padding: 4px 8px;
+ background: var(--bg-hover);
+ border-radius: 6px;
+ }
+ .log-display-controls {
+ display: flex;
+ gap: 6px;
+ align-items: center;
+ padding: 4px 8px;
+ background: var(--bg-hover);
+ border-radius: 6px;
+ }
+ .log-search-input {
+ padding: 4px 8px;
+ border: 1px solid var(--border-color);
+ border-radius: 4px;
+ font-size: 11px;
+ width: 180px;
+ outline: none;
+ transition: all 0.2s ease;
+ }
+ .log-search-input:focus {
+ border-color: var(--accent-color);
+ box-shadow: 0 0 0 2px rgba(59, 130, 246, 0.1);
+ }
+ .log-search-btn {
+ padding: 4px 8px;
+ background: var(--bg-primary);
+ border: 1px solid var(--border-color);
+ color: var(--text-secondary);
+ border-radius: 4px;
+ cursor: pointer;
+ font-size: 13px;
+ transition: all 0.2s ease;
+ min-width: 28px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ }
+ .log-search-btn:hover {
+ background: var(--bg-hover);
+ color: var(--text-primary);
+ border-color: var(--accent-color);
+ box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
+ transform: translateY(-1px);
+ }
+ .log-search-btn:active {
+ transform: translateY(0);
+ box-shadow: none;
+ }
+ .log-search-btn:disabled {
+ opacity: 0.4;
+ cursor: not-allowed;
+ }
+ .search-counter {
+ font-size: 11px;
+ color: var(--text-secondary);
+ min-width: 40px;
+ text-align: center;
+ }
+ /* Search result highlighting */
+ .search-highlight {
+ background: #bfdbfe;
+ color: #1e3a8a;
+ border-radius: 2px;
+ padding: 0 2px;
+ }
+ .theme-dark .search-highlight {
+ background: #1e3a8a;
+ color: #bfdbfe;
+ }
+ .search-highlight-current {
+ background: #3b82f6;
+ color: white;
+ border-radius: 2px;
+ padding: 0 2px;
+ font-weight: 600;
+ box-shadow: 0 0 0 2px rgba(59, 130, 246, 0.3);
+ }
+ .theme-dark .search-highlight-current {
+ background: #60a5fa;
+ color: #0f172a;
+ }
+
+ /* Actions and Filters menus */
+ .log-actions-btn {
+ padding: 4px 8px;
+ background: var(--bg-primary);
+ border: 1px solid var(--border-color);
+ color: var(--text-secondary);
+ border-radius: 4px;
+ cursor: pointer;
+ font-size: 16px;
+ transition: all 0.2s ease;
+ min-width: 32px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ }
+ .log-actions-btn:hover {
+ background: var(--bg-hover);
+ color: var(--text-primary);
+ border-color: var(--accent-color);
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
+ transform: translateY(-1px);
+ }
+ .log-actions-btn:active {
+ transform: translateY(0);
+ box-shadow: none;
+ }
+ .log-actions-dropdown {
+ display: none;
+ position: absolute;
+ top: 100%;
+ left: 0;
+ margin-top: 4px;
+ background: var(--bg-primary);
+ border: 1px solid var(--border-color);
+ border-radius: 6px;
+ box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
+ z-index: 1000;
+ min-width: 160px;
+ overflow: hidden;
+ }
+ .log-actions-dropdown.show {
+ display: block;
+ }
+ .log-actions-item {
+ padding: 8px 12px;
+ cursor: pointer;
+ font-size: 12px;
+ color: var(--text-primary);
+ transition: background 0.15s ease;
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ }
+ .log-actions-item:hover {
+ background: var(--bg-hover);
+ }
+ .log-actions-divider {
+ height: 1px;
+ background: var(--border-color);
+ margin: 4px 0;
+ }
+
+ /* Filters menu */
+ .log-filters-btn {
+ padding: 4px 10px;
+ background: var(--bg-primary);
+ border: 1px solid var(--border-color);
+ color: var(--text-secondary);
+ border-radius: 4px;
+ cursor: pointer;
+ font-size: 11px;
+ transition: all 0.2s ease;
+ display: flex;
+ align-items: center;
+ gap: 4px;
+ }
+ .log-filters-btn:hover {
+ background: var(--bg-hover);
+ color: var(--text-primary);
+ border-color: var(--accent-color);
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
+ transform: translateY(-1px);
+ }
+ .log-filters-btn:active {
+ transform: translateY(0);
+ box-shadow: none;
+ }
+ .log-filters-dropdown {
+ display: none;
+ position: absolute;
+ top: 100%;
+ left: 0;
+ margin-top: 4px;
+ background: var(--bg-primary);
+ border: 1px solid var(--border-color);
+ border-radius: 6px;
+ box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
+ z-index: 1000;
+ min-width: 180px;
+ padding: 8px 0;
+ overflow: hidden;
+ }
+ .log-filters-dropdown.show {
+ display: block;
+ }
+ .log-filters-header {
+ padding: 4px 12px 8px;
+ font-size: 11px;
+ font-weight: 600;
+ color: var(--text-secondary);
+ text-transform: uppercase;
+ border-bottom: 1px solid var(--border-color);
+ margin-bottom: 4px;
+ }
+ .log-filter-item {
+ padding: 6px 12px;
+ cursor: pointer;
+ font-size: 12px;
+ color: var(--text-primary);
+ transition: background 0.15s ease;
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ }
+ .log-filter-item:hover {
+ background: var(--bg-hover);
+ }
+ .log-filter-item input[type="checkbox"] {
+ margin: 0;
+ cursor: pointer;
+ }
+ .filter-label {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ flex: 1;
+ }
+ .filter-count {
+ margin-left: auto;
+ font-size: 11px;
+ color: var(--text-secondary);
+ background: var(--bg-hover);
+ padding: 2px 6px;
+ border-radius: 10px;
+ }
+ .log-filters-actions {
+ display: flex;
+ gap: 6px;
+ padding: 8px 12px 4px;
+ border-top: 1px solid var(--border-color);
+ margin-top: 4px;
+ }
+ .log-filter-action-btn {
+ flex: 1;
+ padding: 4px 8px;
+ background: var(--bg-primary);
+ border: 1px solid var(--border-color);
+ color: var(--text-secondary);
+ border-radius: 4px;
+ cursor: pointer;
+ font-size: 11px;
+ transition: all 0.2s ease;
+ }
+ .log-filter-action-btn:hover {
+ background: var(--bg-hover);
+ border-color: var(--accent-color);
+ }
+ .log-filter-action-btn.primary {
+ background: var(--accent-color);
+ color: white;
+ border-color: var(--accent-color);
+ }
+ .log-filter-action-btn.primary:hover {
+ background: #2563eb;
+ }
+
+ /* Navigation buttons */
+ .log-nav-btn {
+ padding: 4px 10px;
+ background: var(--bg-primary);
+ border: 1px solid var(--border-color);
+ color: var(--text-secondary);
+ border-radius: 4px;
+ cursor: pointer;
+ font-size: 11px;
+ transition: all 0.2s ease;
+ display: flex;
+ align-items: center;
+ gap: 4px;
+ }
+ .log-nav-btn:hover {
+ background: var(--bg-hover);
+ color: var(--text-primary);
+ border-color: var(--accent-color);
+ box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
+ transform: translateY(-1px);
+ }
+ .log-nav-btn:active {
+ transform: translateY(0);
+ box-shadow: none;
+ }
+ .log-nav-btn:disabled {
+ opacity: 0.4;
+ cursor: not-allowed;
+ }
+ .log-nav-btn:disabled:hover {
+ background: var(--bg-primary);
+ color: var(--text-secondary);
+ border-color: var(--border-color);
+ }
+ .log-nav-btn .nav-icon {
+ font-size: 12px;
+ }
+ .log-nav-btn.error-nav {
+ border-color: #ef4444;
+ color: #ef4444;
+ }
+ .log-nav-btn.error-nav:hover {
+ background: #fef2f2;
+ box-shadow: 0 1px 3px rgba(239, 68, 68, 0.2);
+ transform: translateY(-1px);
+ }
+
+ /* Display controls */
+ .log-zoom-control {
+ display: flex;
+ gap: 2px;
+ align-items: center;
+ }
+ .log-zoom-btn {
+ padding: 2px 6px;
+ background: var(--bg-primary);
+ border: 1px solid var(--border-color);
+ color: var(--text-secondary);
+ border-radius: 3px;
+ cursor: pointer;
+ font-size: 12px;
+ font-weight: 600;
+ transition: all 0.2s ease;
+ }
+ .log-zoom-btn:hover {
+ background: var(--bg-hover);
+ color: var(--text-primary);
+ border-color: var(--accent-color);
+ box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
+ transform: scale(1.05);
+ }
+ .log-zoom-btn:active {
+ transform: scale(0.95);
+ box-shadow: none;
+ }
+ .log-zoom-value {
+ font-size: 11px;
+ color: var(--text-tertiary);
+ min-width: 35px;
+ text-align: center;
+ }
+ .log-viewer.font-sm { font-size: 11px; }
+ .log-viewer.font-md { font-size: 13px; }
+ .log-viewer.font-lg { font-size: 15px; }
+ .log-viewer.font-xl { font-size: 17px; }
+ .log-viewer.wrap-enabled .log-line-content {
+ white-space: pre-wrap;
+ word-break: break-word;
+ }
+ .log-viewer.wrap-disabled .log-line-content {
+ white-space: pre;
+ overflow-x: auto;
+ }
+
+ /* Floating Controls Panel (Google Maps style) */
+ .log-floating-controls {
+ position: absolute;
+ bottom: 16px;
+ right: 16px;
+ display: flex;
+ flex-direction: column;
+ gap: 10px;
+ z-index: 100;
+ pointer-events: none;
+ opacity: 0.3;
+ transition: opacity 0.2s ease;
+ }
+ .log-floating-controls:hover {
+ opacity: 1;
+ }
+ .log-floating-controls > * {
+ pointer-events: auto;
+ }
+ .floating-control-group {
+ background: rgba(255, 255, 255, 0.95);
+ border: none;
+ border-radius: 2px;
+ padding: 4px;
+ box-shadow: rgba(0, 0, 0, 0.3) 0px 1px 4px -1px;
+ display: flex;
+ flex-direction: column;
+ gap: 0;
+ align-items: center;
+ }
+ .theme-dark .floating-control-group {
+ background: rgba(30, 41, 59, 0.75);
+ box-shadow: rgba(0, 0, 0, 0.4) 0px 2px 6px 0px;
+ backdrop-filter: blur(8px);
+ }
+ .floating-control-divider {
+ width: 20px;
+ height: 1px;
+ background: #e5e7eb;
+ margin: 4px 0;
+ }
+ .theme-dark .floating-control-divider {
+ background: #4a5568;
+ }
+ .floating-zoom-control {
+ display: flex;
+ flex-direction: column;
+ gap: 0;
+ align-items: center;
+ }
+ .floating-zoom-btn {
+ width: 28px;
+ height: 28px;
+ padding: 0;
+ background: transparent;
+ border: none;
+ color: #5f6368;
+ border-radius: 2px;
+ cursor: pointer;
+ font-size: 18px;
+ font-weight: 400;
+ line-height: 1;
+ transition: background 0.1s ease;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ }
+ .floating-zoom-btn:hover {
+ background: #f1f3f4;
+ }
+ .theme-dark .floating-zoom-btn {
+ color: #e2e8f0;
+ }
+ .theme-dark .floating-zoom-btn:hover {
+ background: #4a5568;
+ }
+ .floating-zoom-btn:active {
+ background: #e8eaed;
+ }
+ .theme-dark .floating-zoom-btn:active {
+ background: #2d3748;
+ }
+ .floating-zoom-value {
+ font-size: 10px;
+ color: #5f6368;
+ font-weight: 400;
+ text-align: center;
+ min-width: 28px;
+ padding: 2px 0;
+ }
+ .theme-dark .floating-zoom-value {
+ color: #cbd5e0;
+ }
+ .floating-wrap-btn {
+ width: 28px;
+ height: 28px;
+ padding: 0;
+ background: transparent;
+ border: none;
+ color: #5f6368;
+ border-radius: 2px;
+ cursor: pointer;
+ font-size: 14px;
+ line-height: 1;
+ transition: background 0.1s ease;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ }
+ .floating-wrap-btn:hover {
+ background: #f1f3f4;
+ }
+ .theme-dark .floating-wrap-btn {
+ color: #e2e8f0;
+ }
+ .theme-dark .floating-wrap-btn:hover {
+ background: #4a5568;
+ }
+ .floating-wrap-btn:active {
+ background: #e8eaed;
+ }
+ .theme-dark .floating-wrap-btn:active {
+ background: #2d3748;
+ }
+ .floating-wrap-btn.active {
+ background: #e8f0fe;
+ color: #1a73e8;
+ }
+ .theme-dark .floating-wrap-btn.active {
+ background: #3b82f6;
+ color: white;
+ }
+ .log-line-number-clickable {
+ position: relative;
+ cursor: pointer;
+ }
+ .log-line-number-clickable:hover {
+ background: rgba(59, 130, 246, 0.1);
+ }
+
+ /* pprof Visualization - Compact */
+ .pprof-compact-header {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ margin-bottom: 12px;
+ padding: 8px 12px;
+ background: var(--bg-secondary);
+ border-radius: 6px;
+ flex-wrap: wrap;
+ gap: 8px;
+ }
+ .pprof-stats {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ font-size: 13px;
+ color: var(--text-primary);
+ flex-wrap: wrap;
+ }
+ .pprof-stat strong {
+ color: #3b82f6;
+ }
+ .pprof-stat-sep {
+ color: var(--text-secondary);
+ font-size: 10px;
+ }
+ .pprof-help-btn {
+ display: flex;
+ align-items: center;
+ gap: 4px;
+ font-size: 11px;
+ color: var(--text-secondary);
+ background: transparent;
+ border: 1px solid var(--border-color);
+ padding: 4px 8px;
+ border-radius: 4px;
+ cursor: pointer;
+ transition: all 0.2s;
+ }
+ .pprof-help-btn:hover {
+ background: var(--bg-hover);
+ color: var(--text-primary);
+ }
+ .pprof-help-popup {
+ display: none;
+ position: absolute;
+ right: 0;
+ top: 100%;
+ margin-top: 4px;
+ width: 320px;
+ padding: 12px;
+ background: #1f2937;
+ color: #fff;
+ border-radius: 8px;
+ font-size: 12px;
+ line-height: 1.5;
+ box-shadow: 0 4px 12px rgba(0,0,0,0.3);
+ z-index: 100;
+ }
+ .pprof-help-popup.visible {
+ display: block;
+ }
+ .pprof-help-popup code {
+ background: rgba(255,255,255,0.1);
+ padding: 2px 6px;
+ border-radius: 3px;
+ font-size: 11px;
+ }
+ .pprof-compact-header {
+ position: relative;
+ }
+ .pprof-bars {
+ display: flex;
+ flex-direction: column;
+ gap: 4px;
+ }
+ .pprof-bar-item {
+ display: grid;
+ grid-template-columns: 150px 1fr;
+ gap: 8px;
+ align-items: center;
+ }
+ .pprof-bar-label {
+ font-size: 11px;
+ color: var(--text-secondary);
+ text-overflow: ellipsis;
+ overflow: hidden;
+ white-space: nowrap;
+ font-family: monospace;
+ }
+ .pprof-bar-container {
+ position: relative;
+ height: 18px;
+ background: var(--bg-secondary);
+ border-radius: 3px;
+ overflow: hidden;
+ }
+ .pprof-bar {
+ height: 100%;
+ border-radius: 3px;
+ background: linear-gradient(90deg, #3b82f6, #8b5cf6);
+ }
+ .pprof-bar-value {
+ position: absolute;
+ right: 6px;
+ top: 50%;
+ transform: translateY(-50%);
+ font-size: 10px;
+ font-weight: 600;
+ color: var(--text-primary);
+ }
+ .pprof-stacks {
+ display: flex;
+ flex-direction: column;
+ gap: 2px;
+ }
+ .pprof-stack-item {
+ background: var(--bg-secondary);
+ border-radius: 4px;
+ overflow: hidden;
+ }
+ .pprof-stack-header {
+ display: flex;
+ align-items: center;
+ padding: 6px 10px;
+ cursor: pointer;
+ gap: 8px;
+ transition: background 0.2s;
+ }
+ .pprof-stack-header:hover {
+ background: var(--bg-hover);
+ }
+ .pprof-stack-count {
+ background: #10b981;
+ color: white;
+ padding: 1px 6px;
+ border-radius: 8px;
+ font-size: 10px;
+ font-weight: 600;
+ min-width: 32px;
+ text-align: center;
+ }
+ .pprof-stack-name {
+ flex: 1;
+ font-size: 11px;
+ font-family: monospace;
+ color: var(--text-secondary);
+ text-overflow: ellipsis;
+ overflow: hidden;
+ white-space: nowrap;
+ }
+ .pprof-stack-toggle {
+ color: var(--text-secondary);
+ font-size: 9px;
+ transition: transform 0.2s;
+ }
+ .pprof-stack-item.expanded .pprof-stack-toggle {
+ transform: rotate(90deg);
+ }
+ .pprof-stack-frames {
+ display: none;
+ padding: 0 10px 8px 48px;
+ font-size: 10px;
+ font-family: monospace;
+ color: var(--text-secondary);
+ }
+ .pprof-stack-item.expanded .pprof-stack-frames {
+ display: block;
+ }
+ .pprof-frame {
+ padding: 2px 0;
+ border-left: 2px solid var(--border-color);
+ padding-left: 10px;
+ margin-left: 2px;
+ }
+ .pprof-section {
+ margin-bottom: 24px;
+ }
+ .pprof-section-header {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ margin-bottom: 12px;
+ }
+ .pprof-section-title {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ font-size: 14px;
+ font-weight: 600;
+ color: var(--text-primary);
+ }
+ .pprof-help-link {
+ font-size: 11px;
+ color: #3b82f6;
+ text-decoration: none;
+ display: flex;
+ align-items: center;
+ gap: 4px;
+ }
+ .pprof-help-link:hover {
+ text-decoration: underline;
+ }
+ .pprof-raw-toggle {
+ font-size: 11px;
+ color: var(--text-secondary);
+ background: var(--bg-secondary);
+ border: 1px solid var(--border-color);
+ padding: 4px 10px;
+ border-radius: 4px;
+ cursor: pointer;
+ margin-top: 12px;
+ }
+ .pprof-raw-toggle:hover {
+ background: var(--bg-hover);
+ }
+ .pprof-raw-content {
+ display: none;
+ margin-top: 12px;
+ }
+ .pprof-raw-content.visible {
+ display: block;
+ }
+
+ /* Comparison */
+ .comparison-run-info {
+ display: grid;
+ grid-template-columns: 1fr 1fr;
+ gap: 20px;
+ margin-top: 20px;
+ }
+ .run-info-card {
+ background: var(--bg-primary);
+ padding: 15px;
+ border-radius: 8px;
+ border: 1px solid var(--border-color);
+ }
+ .run-info-card h4 {
+ margin: 0 0 10px 0;
+ color: var(--text-primary);
+ font-size: 14px;
+ }
+ .run-info-details {
+ font-size: 12px;
+ color: var(--text-secondary);
+ }
+ .run-info-details > div {
+ margin: 5px 0;
+ }
+ .comparison-grid {
+ display: grid;
+ grid-template-columns: 1fr 1fr 1fr;
+ gap: 20px;
+ margin-top: 20px;
+ }
+ .comparison-col {
+ background: var(--bg-primary);
+ padding: 20px;
+ border-radius: 8px;
+ border: 1px solid var(--border-color);
+ }
+
+ /* Utility */
+ .hidden { display: none !important; }
+ .theme-toggle {
+ background: var(--bg-hover);
+ border: 1px solid var(--border-color);
+ padding: 8px 12px;
+ border-radius: 6px;
+ cursor: pointer;
+ color: var(--text-primary);
+ }
+
+ @media print {
+ .theme-toggle, .tabs, .filters, .log-controls, .close-btn { display: none !important; }
+ .container { display: block; }
+ .tab-content { display: block !important; padding: 0; }
+ .card, .chart-wrapper, .table-wrapper { border: 1px solid #ddd; break-inside: avoid; }
+ body { background: white; color: black; }
+ * { box-shadow: none !important; }
+ }
+ """
+
+ JS = """
+ // ============================================================
+ // SECTION: Icons & Utilities
+ // ============================================================
+
+ function svgIcon(name, size = 16, color = 'currentColor') {
+ const icons = {
+ 'search': ``,
+ 'copy': ``,
+ 'download': ``,
+ 'error': ``,
+ 'warning': ``,
+ 'info': ``,
+ 'bug': ``,
+ 'chevron-up': ``,
+ 'chevron-down': ``,
+ 'arrow-up': ``,
+ 'arrow-down': ``,
+ 'wrap': ``,
+ 'sun': ``,
+ 'moon': ``,
+ };
+ return icons[name] || '';
+ }
+
+ // ============================================================
+ // SECTION: Global State & Navigation
+ // ============================================================
+
+ let currentTab = 'dashboard';
+
+ function switchTab(tabId) {
+ document.querySelectorAll('.tab-btn').forEach(b => b.classList.remove('active'));
+ document.querySelectorAll('.tab-content').forEach(c => c.classList.remove('active'));
+
+ document.querySelector(`[onclick="switchTab('${tabId}')"]`).classList.add('active');
+ document.getElementById(tabId).classList.add('active');
+ currentTab = tabId;
+ }
+
+ // Theme
+ function toggleTheme() {
+ const html = document.documentElement;
+ const current = html.getAttribute('data-theme');
+ const next = current === 'dark' ? 'light' : 'dark';
+ html.setAttribute('data-theme', next);
+ localStorage.setItem('theme', next);
+ updateThemeButton();
+ }
+
+ function updateThemeButton() {
+ const theme = document.documentElement.getAttribute('data-theme');
+ const btn = document.getElementById('themeToggle');
+ if (btn) {
+ btn.innerHTML = theme === 'dark' ? svgIcon('sun', 18) : svgIcon('moon', 18);
+ btn.title = theme === 'dark' ? 'Switch to light mode' : 'Switch to dark mode';
+ }
+ }
+
+ // Init
+ document.addEventListener('DOMContentLoaded', () => {
+ const savedTheme = localStorage.getItem('theme') ||
+ (window.matchMedia('(prefers-color-scheme: dark)').matches ? 'dark' : 'light');
+ document.documentElement.setAttribute('data-theme', savedTheme);
+ updateThemeButton();
+
+ renderCharts();
+ });
+
+ // Filtering
+ function filterTable() {
+ const search = document.getElementById('searchInput').value.toLowerCase();
+ const status = document.getElementById('statusFilter').value;
+ const stability = document.getElementById('stabilityFilter').value;
+ const label = document.getElementById('labelFilter').value;
+
+ const rows = document.querySelectorAll('#resultsTable tbody tr');
+
+ rows.forEach(row => {
+ const rowData = JSON.parse(row.dataset.json);
+ const name = rowData.test_name.toLowerCase();
+
+ // Search
+ const matchesSearch = name.includes(search);
+
+ // Status (check if ANY run matches)
+ let matchesStatus = status === 'all';
+ if (!matchesStatus) {
+ matchesStatus = Object.values(rowData.runs).some(r => r && r.state === status);
+ }
+
+ // Stability
+ let matchesStability = true;
+ if (stability === 'flaky') matchesStability = rowData.is_flaky;
+ if (stability === 'stable') matchesStability = !rowData.is_flaky && rowData.fail_count === 0;
+ if (stability === 'always-failing') matchesStability = rowData.pass_count === 0;
+
+ // Label
+ let matchesLabel = label === 'all';
+ if (!matchesLabel) {
+ // Check if any run has this label (simplified)
+ matchesLabel = Object.values(rowData.runs).some(r => r && r.labels && r.labels.includes(label));
+ }
+
+ if (matchesSearch && matchesStatus && matchesStability && matchesLabel) {
+ row.style.display = '';
+ } else {
+ row.style.display = 'none';
+ }
+ });
+ }
+
+ // Modal Tabs
+ function switchModalTab(tabId) {
+ // Update tab buttons
+ document.querySelectorAll('.modal-tab').forEach(btn => btn.classList.remove('active'));
+ document.querySelector(`[onclick="switchModalTab('${tabId}')"]`).classList.add('active');
+
+ // Update tab content
+ document.querySelectorAll('.modal-tab-content').forEach(content => content.classList.remove('active'));
+ document.getElementById(tabId).classList.add('active');
+ }
+
+ // ============================================================
+ // SECTION: Modal & Test Details
+ // ============================================================
+
+ function showTestDetails(data) {
+ const modal = document.getElementById('testModal');
+ const modalBody = document.getElementById('modalContent');
+
+ // Check if this is pivot data (flaky tests) or single test data
+ const isPivotData = data.state === undefined;
+
+ // Build tabs - only Summary tab for pivot data
+ let html = `
+
+
+ `;
+
+ if (!isPivotData) {
+ html += `
+
+
+
+ `;
+ }
+
+ html += `
+
+ `;
+
+ // Summary Tab
+ html += `
+
+
+
Test Information
+
+
+
Test Name
+
${escapeHtml(data.test_name)}
+
+ `;
+
+ // Single test data (from clicking on a result cell)
+ if (data.state !== undefined) {
+ html += `
+
+ `;
+ }
+ }
+
+ if (data.failure_message) {
+ html += `
+
+
Failure Message
+
${escapeHtml(data.failure_message)}
+
+ `;
+ }
+
+ html += `
`; // Close summary tab
+
+ // Queue for pprof rendering - declared at function scope for access after DOM update
+ let pprofRenderQueue = [];
+
+ // Only show these tabs for single test data (not pivot data)
+ if (!isPivotData) {
+ // Artifacts Tab
+ html += `
`;
+ if (data.artifact_metadata?.relative_path) {
+ html += `
+
+
Artifact Location
+
${data.artifact_metadata.relative_path}
+
+ `;
+ }
+ if (data.artifact_metadata?.artifacts) {
+ const artifacts = data.artifact_metadata.artifacts;
+ if (artifacts.log_files?.length > 0) {
+ html += `
+
+
Log Files (${artifacts.log_files.length})
+
+ ${artifacts.log_files.map(f => `
📄 ${f}
`).join('')}
+
+
+ `;
+ }
+ if (artifacts.resource_files?.length > 0) {
+ html += `
+
+ ${formatBytes(parsed.liveSize)} live
+ •
+ ${parsed.liveObjects?.toLocaleString() || 0} objects
+ •
+ ${formatBytes(parsed.totalSize)} total allocated
+
+
+
+ How to interpret heap profile:
+ • Live memory — currently allocated and in use
+ • Top allocators — functions allocating most memory
+ • Look for unexpected large allocations or memory leaks
+ Deep analysis:
+ go tool pprof heap.pb.gz
+ Commands: top, web, list funcName
+
+ How to interpret goroutine profile:
+ • Count (Nx) — number of goroutines with same stack
+ • High counts may indicate goroutine leaks
+ • Click stack to see full call trace
+ What to look for:
+ • Blocked goroutines (waiting on channels/mutexes)
+ • Unexpected goroutine accumulation over time
+ • Goroutines stuck in infinite loops
+
';
+ }
+
+ // Initialize on load
+ document.addEventListener('DOMContentLoaded', function() {
+ updateRunInfo();
+ });
+ """
+
+# --- Report Generator ---
+
+class ReportGenerator:
+ def __init__(self, results_dir: Path):
+ self.results_dir = results_dir
+ self.runs: List[TestRun] = []
+ self.pivot_data: List[PivotRow] = []
+ self.all_labels: Set[str] = set()
+
+ def parse_results(self):
+ """Parse all test run results from the results directory."""
+ if not self.results_dir.exists():
+ return
+
+ for run_dir in sorted(self.results_dir.glob("run-*")):
+ if not run_dir.is_dir():
+ continue
+
+ metadata_file = run_dir / "artifacts" / "metadata.json"
+ report_file = run_dir / "reports" / "report.json"
+
+ if not metadata_file.exists() or not report_file.exists():
+ continue
+
+ try:
+ with open(metadata_file, 'r') as f:
+ metadata = json.load(f)
+ with open(report_file, 'r') as f:
+ report = json.load(f)
+ except Exception as e:
+ print(f"Error reading {run_dir}: {e}")
+ continue
+
+ # Parse tests
+ tests = []
+ spec_reports = report[0].get('SpecReports', []) if isinstance(report, list) else []
+
+ # Build artifact index once per run for O(1) lookup
+ artifact_index = self._build_artifact_index(run_dir)
+
+ for spec in spec_reports:
+ if not spec.get('LeafNodeText'):
+ continue
+
+ hierarchy = spec.get('ContainerHierarchyTexts', [])
+ leaf_text = spec['LeafNodeText']
+ full_name = ' '.join(hierarchy + [leaf_text]) if hierarchy else leaf_text
+
+ # Collect labels
+ labels = []
+ for l_list in spec.get('ContainerHierarchyLabels', []):
+ if isinstance(l_list, list): labels.extend(l_list)
+ if spec.get('LeafNodeLabels'): labels.extend(spec.get('LeafNodeLabels'))
+ self.all_labels.update(labels)
+
+ # Find artifacts using pre-built index
+ artifact_meta = self._find_artifacts(artifact_index, full_name)
+
+ tests.append(TestResult(
+ name=full_name,
+ full_name=full_name,
+ leaf_text=leaf_text,
+ state=spec['State'],
+ runtime=spec['RunTime'] / 1e9,
+ failure_message=spec.get('FailureMessage', ''),
+ labels=labels,
+ container_hierarchy=hierarchy,
+ start_time=spec.get('StartTime', ''),
+ artifact_metadata=artifact_meta
+ ))
+
+ # Calculate total runtime from tests if not in metadata
+ total_runtime = sum(t.runtime for t in tests)
+
+ # Read test output log
+ test_output_log = ""
+ log_file = run_dir / "reports" / "test-output.log"
+ if log_file.exists():
+ try:
+ with open(log_file, 'r', encoding='utf-8', errors='replace') as f:
+ lines = f.readlines()
+ if len(lines) > 10000:
+ test_output_log = f"Log truncated. Showing last 10,000 of {len(lines)} lines.\\n\\n" + "".join(lines[-10000:])
+ else:
+ test_output_log = "".join(lines)
+ except Exception as e:
+ test_output_log = f"Error reading log file: {e}"
+
+ self.runs.append(TestRun(
+ run_id=str(metadata.get('run_id', run_dir.name)),
+ start_time=metadata.get('start_time', datetime.now().isoformat()),
+ total_tests=metadata.get('total_tests', len(tests)),
+ passed_tests=metadata.get('passed_tests', len([t for t in tests if t.state == 'passed'])),
+ failed_tests=metadata.get('failed_tests', len([t for t in tests if t.state == 'failed'])),
+ environment=metadata.get('environment', {}),
+ total_runtime=total_runtime,
+ test_output_log=test_output_log,
+ tests=tests,
+ git_commit=metadata.get('git_commit', ''),
+ git_branch=metadata.get('git_branch', ''),
+ git_dirty=metadata.get('git_dirty', '')
+ ))
+
+ # Sort runs by time (newest first)
+ self.runs.sort(key=lambda r: r.start_time, reverse=True)
+ self._build_pivot_data()
+
+ def _build_artifact_index(self, run_dir: Path) -> Dict[str, Path]:
+ """Build name -> artifact_dir mapping for O(1) lookup."""
+ index = {}
+ artifacts_dir = run_dir / "artifacts"
+ if not artifacts_dir.exists():
+ return index
+ for artifact_dir in artifacts_dir.glob("*"):
+ if not artifact_dir.is_dir():
+ continue
+ meta_file = artifact_dir / "metadata.json"
+ if meta_file.exists():
+ try:
+ with open(meta_file) as f:
+ data = json.load(f)
+ if name := data.get('name'):
+ index[name.strip()] = artifact_dir
+ except:
+ pass
+ return index
+
+ def _find_artifacts(self, artifact_index: Dict[str, Path], test_name: str) -> Optional[Dict[str, Any]]:
+ """Locate artifact metadata for a specific test using pre-built index."""
+ artifact_dir = artifact_index.get(test_name.strip())
+ if not artifact_dir:
+ return None
+
+ meta_file = artifact_dir / "metadata.json"
+ if not meta_file.exists():
+ return None
+
+ try:
+ with open(meta_file) as f:
+ data = json.load(f)
+
+ data['relative_path'] = str(artifact_dir.relative_to(self.results_dir))
+ data['file_contents'] = {}
+
+ # Read log files (last 500 lines)
+ for log in data.get('artifacts', {}).get('log_files', []):
+ lp = artifact_dir / log
+ if lp.exists():
+ try:
+ with open(lp) as lf:
+ lines = lf.readlines()
+ content = ''.join(lines[-500:])
+ data['file_contents'][log] = {
+ 'content': content,
+ 'type': 'log',
+ 'truncated': len(lines) > 500,
+ 'total_lines': len(lines)
+ }
+ except Exception as e:
+ data['file_contents'][log] = {'content': str(e), 'type': 'error'}
+
+ # Read resource files (limit 50KB)
+ for res in data.get('artifacts', {}).get('resource_files', [])[:10]:
+ rp = artifact_dir / res
+ if rp.exists():
+ try:
+ with open(rp) as rf:
+ content = rf.read()
+ if len(content) > 51200: content = content[:51200] + '\\n... (truncated)'
+ data['file_contents'][res] = {'content': content, 'type': 'resource'}
+ except Exception as e:
+ data['file_contents'][res] = {'content': str(e), 'type': 'error'}
+
+ # Read event files
+ for evt in data.get('artifacts', {}).get('event_files', []):
+ ep = artifact_dir / evt
+ if ep.exists():
+ try:
+ with open(ep) as ef:
+ data['file_contents'][evt] = {'content': ef.read(), 'type': 'events'}
+ except Exception as e:
+ data['file_contents'][evt] = {'content': str(e), 'type': 'error'}
+
+ return data
+ except:
+ pass
+ return None
+
+ def _build_pivot_data(self):
+ """Build the pivot table data structure and analyze flakiness."""
+ all_names = sorted({t.full_name for run in self.runs for t in run.tests})
+
+ for name in all_names:
+ # Get container hierarchy from first occurrence of test
+ test_obj = None
+ for run in self.runs:
+ test_obj = next((t for t in run.tests if t.full_name == name), None)
+ if test_obj:
+ break
+
+ row = PivotRow(
+ test_name=name,
+ full_test_name=name,
+ leaf_text=test_obj.leaf_text if test_obj else name.split(' ')[-1],
+ container_hierarchy=test_obj.container_hierarchy if test_obj else []
+ )
+
+ results_sequence = []
+
+ for run in self.runs:
+ result = next((t for t in run.tests if t.full_name == name), None)
+ if result:
+ row.runs[run.run_id] = {
+ 'state': result.state,
+ 'runtime': result.runtime,
+ 'failure_message': result.failure_message,
+ 'labels': result.labels,
+ 'artifact_metadata': result.artifact_metadata
+ }
+ row.total_runs += 1
+ row.total_runtime += result.runtime
+ row.min_runtime = min(row.min_runtime, result.runtime)
+ row.max_runtime = max(row.max_runtime, result.runtime)
+
+ if result.state == 'passed':
+ row.pass_count += 1
+ results_sequence.append('P')
+ elif result.state == 'failed':
+ row.fail_count += 1
+ results_sequence.append('F')
+ else:
+ row.skip_count += 1
+ results_sequence.append('S')
+ else:
+ row.runs[run.run_id] = None
+
+ if row.total_runs > 0:
+ row.pass_rate = (row.pass_count / row.total_runs) * 100
+ row.avg_runtime = row.total_runtime / row.total_runs
+
+ # Flakiness Analysis
+ if row.total_runs >= 2 and row.pass_count > 0 and row.fail_count > 0:
+ row.is_flaky = True
+ row.flakiness_score = 100 - 2 * abs(row.pass_rate - 50)
+
+ # Pattern detection
+ if len(results_sequence) >= 3:
+ is_alternating = True
+ for i in range(len(results_sequence) - 1):
+ if results_sequence[i] == results_sequence[i+1]:
+ is_alternating = False
+ break
+ if is_alternating: row.flakiness_pattern = 'alternating'
+
+ self.pivot_data.append(row)
+
+ # Sort by failure count
+ self.pivot_data.sort(key=lambda x: (x.fail_count, -x.pass_rate), reverse=True)
+
+ def _generate_chart_data(self) -> str:
+ """Generate JSON data for Chart.js."""
+ chronological_runs = sorted(self.runs, key=lambda r: r.start_time)
+ labels = [f"Run {r.run_id}" for r in chronological_runs]
+
+ pass_rates = []
+ durations = []
+ runs_data = []
+
+ for r in chronological_runs:
+ rate = (r.passed_tests / r.total_tests * 100) if r.total_tests > 0 else 0
+ pass_rates.append(round(rate, 1))
+ durations.append(round(r.total_runtime, 1))
+
+ # Add run metadata for comparison
+ runs_data.append({
+ 'run_id': r.run_id,
+ 'timestamp': r.start_time,
+ 'total': r.total_tests,
+ 'passed': r.passed_tests,
+ 'failed': r.failed_tests,
+ 'runtime': round(r.total_runtime, 1)
+ })
+
+ data = {
+ "passRate": {
+ "labels": labels,
+ "datasets": [{
+ "label": "Pass Rate (%)",
+ "data": pass_rates,
+ "borderColor": "#10b981",
+ "backgroundColor": "rgba(16, 185, 129, 0.1)",
+ "fill": True
+ }]
+ },
+ "duration": {
+ "labels": labels,
+ "datasets": [{
+ "label": "Total Duration (s)",
+ "data": durations,
+ "borderColor": "#3b82f6",
+ "backgroundColor": "rgba(59, 130, 246, 0.1)",
+ "fill": True
+ }]
+ },
+ "runs": runs_data
+ }
+ return json.dumps(data)
+
+ def generate_html(self, output_file: Path):
+ """Generate the full HTML report."""
+
+ # Serialize pivot data for JS
+ pivot_json = json.dumps([
+ {
+ 'test_name': r.test_name,
+ 'is_flaky': r.is_flaky,
+ 'pass_count': r.pass_count,
+ 'fail_count': r.fail_count,
+ 'total_runs': r.total_runs,
+ 'flakiness_score': r.flakiness_score,
+ 'runs': r.runs
+ } for r in self.pivot_data
+ ], default=str)
+
+ html_content = f"""
+
+
+
+
+ E2E Test Report
+
+
+
+
+
+
+
+
E2E Test Results
+
Generated on {datetime.now().strftime('%Y-%m-%d %H:%M')}
+
+
+
+
+
+
+
+
+
+ {f'' if len(self.runs) >= 2 else ''}
+
+
+
+
+
+
+
Total Runs
+
{len(self.runs)}
+
+
+
Total Tests
+
{len(self.pivot_data)}
+
+
+
Flaky Tests
+
{len([r for r in self.pivot_data if r.is_flaky])}
+ Tests that show inconsistent results across runs - sometimes passing, sometimes failing. These may indicate timing issues, race conditions, or environmental dependencies.
+
+
+
Always Failing
+
{len([r for r in self.pivot_data if r.fail_count == r.total_runs and r.total_runs > 0])}
+ Tests that failed in every single run. These are consistently broken and require immediate attention.
+
+
+
Avg Runtime
+
{format_duration(sum(r.total_runtime for r in self.runs) / len(self.runs)) if self.runs else 'N/A'}
+ Average total runtime across all test runs. Helps track performance trends over time.
+
+
+
Pass Rate Trend
+
+ {self._get_pass_rate_trend()}
+
+ Pass rate change compared to the previous run. ↑ indicates improvement, ↓ indicates more failures, → means stable.
+
+
+
+
+
Latest Run Summary
+
+
+
Pass Rate
+
{self._get_latest_pass_rate()}%
+
+
+
Failures
+
{self.runs[0].failed_tests if self.runs else 0}
+
+
+
Runtime
+
{format_duration(self.runs[0].total_runtime) if self.runs else 'N/A'}
+
+
+
+
+ {self.runs[0].git_branch if self.runs and self.runs[0].git_branch else 'unknown'}
+
+ {f'●' if self.runs and self.runs[0].git_dirty else ''}
+
+ {svg_icon('copy', 12)} {self.runs[0].git_commit[:7] if self.runs and self.runs[0].git_commit else 'unknown'}
+
+