66 - cron : ' 0 6 * * *'
77 workflow_dispatch :
88
9+ # Explicit permissions are required for GITHUB_TOKEN to pull from GHCR
10+ permissions :
11+ contents : read
12+ packages : read
13+
914jobs :
1015 helm-install :
1116 runs-on : ubuntu-latest
1217 timeout-minutes : 15
1318 steps :
14- - name : Checkout
15- uses : actions/checkout@v6
19+ - name : Set up Helm
20+ uses : azure/setup-helm@v4.3.1
21+
22+ - name : Log in to Container Registry
23+ run : |
24+ echo "${{ secrets.GITHUB_TOKEN }}" | helm registry login ghcr.io -u ${{ github.actor }} --password-stdin
1625
1726 - name : Create Kind cluster
1827 uses : helm/kind-action@v1
@@ -101,17 +110,29 @@ jobs:
101110 EOF
102111 kubectl wait --for=condition=ready pod -l app=redis -n s3proxy --timeout=120s
103112
104- - name : Build Helm dependencies
105- run : helm dependency build manifests/
113+ - name : Create K8s Image Pull Secret & Patch Namespace
114+ run : |
115+ # 1. Create the secret using the workflow token
116+ kubectl create secret docker-registry ghcr-login \
117+ --docker-server=ghcr.io \
118+ --docker-username=${{ github.actor }} \
119+ --docker-password=${{ secrets.GITHUB_TOKEN }} \
120+ --namespace s3proxy \
121+ --dry-run=client -o yaml | kubectl apply -f -
122+
123+ # 2. Patch the default service account to automatically use this secret
124+ # This acts as a fail-safe if the Helm 'imagePullSecrets' set doesn't propagate
125+ kubectl patch serviceaccount default -n s3proxy -p '{"imagePullSecrets": [{"name": "ghcr-login"}]}'
106126
107- - name : Install chart from GHCR image
127+ - name : Install chart from GHCR
108128 run : |
109129 OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
110- helm install s3proxy manifests/ \
130+ helm install s3proxy oci://ghcr.io/${OWNER}/charts/s3proxy-python --version 0.0.0-latest \
111131 --namespace s3proxy \
112132 --set image.repository=ghcr.io/${OWNER}/s3proxy-python \
113133 --set image.tag=latest \
114134 --set image.pullPolicy=Always \
135+ --set "imagePullSecrets[0].name=ghcr-login" \
115136 --set s3.host="http://minio:9000" \
116137 --set secrets.encryptKey=test-encryption-key-for-ci \
117138 --set secrets.awsAccessKeyId=minioadmin \
@@ -128,7 +149,6 @@ jobs:
128149 run : |
129150 kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=s3proxy-python -n s3proxy --timeout=120s
130151 kubectl get pods -n s3proxy
131- # Verify we have 3 s3proxy pods
132152 POD_COUNT=$(kubectl get pods -n s3proxy -l app.kubernetes.io/name=s3proxy-python --no-headers | grep Running | wc -l)
133153 if [ "$POD_COUNT" -lt 3 ]; then
134154 echo "Expected 3 s3proxy pods, got $POD_COUNT"
@@ -139,7 +159,7 @@ jobs:
139159 - name : Check health endpoint
140160 run : |
141161 kubectl port-forward svc/s3proxy-python 4433:4433 -n s3proxy &
142- sleep 3
162+ sleep 5
143163 curl -sf http://localhost:4433/healthz && echo "Health check passed"
144164
145165 - name : Run S3 smoke test
@@ -201,11 +221,14 @@ jobs:
201221 echo "=== Pod Status ==="
202222 kubectl get pods -n s3proxy -o wide
203223 echo ""
224+ echo "=== Describe Failed Pods ==="
225+ kubectl describe pods -n s3proxy -l app.kubernetes.io/name=s3proxy-python
226+ echo ""
204227 echo "=== S3Proxy Logs ==="
205228 kubectl logs -l app.kubernetes.io/name=s3proxy-python -n s3proxy --tail=100
206229 echo ""
207230 echo "=== MinIO Logs ==="
208231 kubectl logs -l app=minio -n s3proxy --tail=50
209232 echo ""
210233 echo "=== Events ==="
211- kubectl get events -n s3proxy --sort-by=.lastTimestamp
234+ kubectl get events -n s3proxy --sort-by=.lastTimestamp
0 commit comments