66 - cron : ' 0 6 * * *'
77 workflow_dispatch :
88
9+ # Explicit permissions are required for GITHUB_TOKEN to pull from GHCR
10+ permissions :
11+ contents : read
12+ packages : read
13+
914jobs :
1015 helm-install :
1116 runs-on : ubuntu-latest
@@ -105,6 +110,20 @@ jobs:
105110 EOF
106111 kubectl wait --for=condition=ready pod -l app=redis -n s3proxy --timeout=120s
107112
113+ - name : Create K8s Image Pull Secret & Patch Namespace
114+ run : |
115+ # 1. Create the secret using the workflow token
116+ kubectl create secret docker-registry ghcr-login \
117+ --docker-server=ghcr.io \
118+ --docker-username=${{ github.actor }} \
119+ --docker-password=${{ secrets.GITHUB_TOKEN }} \
120+ --namespace s3proxy \
121+ --dry-run=client -o yaml | kubectl apply -f -
122+
123+ # 2. Patch the default service account to automatically use this secret
124+ # This acts as a fail-safe if the Helm 'imagePullSecrets' set doesn't propagate
125+ kubectl patch serviceaccount default -n s3proxy -p '{"imagePullSecrets": [{"name": "ghcr-login"}]}'
126+
108127 - name : Install chart from GHCR
109128 run : |
110129 OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
@@ -113,6 +132,7 @@ jobs:
113132 --set image.repository=ghcr.io/${OWNER}/s3proxy-python \
114133 --set image.tag=latest \
115134 --set image.pullPolicy=Always \
135+ --set "imagePullSecrets[0].name=ghcr-login" \
116136 --set s3.host="http://minio:9000" \
117137 --set secrets.encryptKey=test-encryption-key-for-ci \
118138 --set secrets.awsAccessKeyId=minioadmin \
@@ -129,7 +149,6 @@ jobs:
129149 run : |
130150 kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=s3proxy-python -n s3proxy --timeout=120s
131151 kubectl get pods -n s3proxy
132- # Verify we have 3 s3proxy pods
133152 POD_COUNT=$(kubectl get pods -n s3proxy -l app.kubernetes.io/name=s3proxy-python --no-headers | grep Running | wc -l)
134153 if [ "$POD_COUNT" -lt 3 ]; then
135154 echo "Expected 3 s3proxy pods, got $POD_COUNT"
@@ -140,7 +159,7 @@ jobs:
140159 - name : Check health endpoint
141160 run : |
142161 kubectl port-forward svc/s3proxy-python 4433:4433 -n s3proxy &
143- sleep 3
162+ sleep 5
144163 curl -sf http://localhost:4433/healthz && echo "Health check passed"
145164
146165 - name : Run S3 smoke test
@@ -202,11 +221,14 @@ jobs:
202221 echo "=== Pod Status ==="
203222 kubectl get pods -n s3proxy -o wide
204223 echo ""
224+ echo "=== Describe Failed Pods ==="
225+ kubectl describe pods -n s3proxy -l app.kubernetes.io/name=s3proxy-python
226+ echo ""
205227 echo "=== S3Proxy Logs ==="
206228 kubectl logs -l app.kubernetes.io/name=s3proxy-python -n s3proxy --tail=100
207229 echo ""
208230 echo "=== MinIO Logs ==="
209231 kubectl logs -l app=minio -n s3proxy --tail=50
210232 echo ""
211233 echo "=== Events ==="
212- kubectl get events -n s3proxy --sort-by=.lastTimestamp
234+ kubectl get events -n s3proxy --sort-by=.lastTimestamp
0 commit comments