-
Notifications
You must be signed in to change notification settings - Fork 3
308 lines (261 loc) · 11.1 KB
/
kind.yml
File metadata and controls
308 lines (261 loc) · 11.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
name: Test Capsule Attachment with kind
on:
push:
branches: [ main ]
paths-ignore:
- '**.md'
pull_request:
branches: [ main ]
paths-ignore:
- '**.md'
workflow_dispatch:
jobs:
test-with-kind:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '^1.24'
cache: true
- name: Build binary app
run: |
# Build with output path specified
go build -v -o basic-docker
# Verify binary exists and is executable
ls -la basic-docker
chmod +x basic-docker
# Move to a directory in PATH (alternative approach)
sudo mv basic-docker /usr/local/bin/
which basic-docker
- name: Create kind cluster
uses: helm/kind-action@v1.5.0
with:
cluster_name: capsule-test
wait: 120s
- name: Wait for kind to be ready
run: |
kubectl cluster-info
kubectl wait --for=condition=Ready nodes --all --timeout=90s
kubectl get nodes
- name: Create test resources
run: |
# Create test namespace
kubectl create namespace capsule-test
# Install ResourceCapsule CRD
kubectl apply -f k8s/crd-resourcecapsule.yaml
# Wait for CRD to be established
kubectl wait --for condition=established --timeout=30s crd/resourcecapsules.capsules.docker.io
# Create test ConfigMap capsule
cat <<EOF | kubectl apply -f - -n capsule-test
apiVersion: v1
kind: ConfigMap
metadata:
name: test-config-1.0
labels:
capsule.docker.io/name: test-config
capsule.docker.io/version: "1.0"
data:
config.yml: |
testKey: testValue
environment: test
EOF
# Create test ResourceCapsule CRD
cat <<EOF | kubectl apply -f - -n capsule-test
apiVersion: capsules.docker.io/v1
kind: ResourceCapsule
metadata:
name: test-crd-capsule
spec:
data:
config.yaml: |
testKey: testValue
environment: test
version: "1.0"
capsuleType: configmap
rollback:
enabled: true
EOF
# Create test Deployment
cat <<EOF | kubectl apply -f - -n capsule-test
apiVersion: apps/v1
kind: Deployment
metadata:
name: test-app
spec:
replicas: 1
selector:
matchLabels:
app: test-app
template:
metadata:
labels:
app: test-app
spec:
containers:
- name: nginx
image: nginx:alpine
ports:
- containerPort: 80
EOF
# Wait for deployment to be ready
kubectl wait --for=condition=Available deployment/test-app -n capsule-test --timeout=60s
- name: Run capsule attachment tests
id: verify-binary
continue-on-error: true
run: |
# Create dummy path for testing
mkdir -p /tmp/capsules
echo "test-config data" > /tmp/capsules/test-config
# Use the binary that should now be in PATH
basic-docker k8s-capsule create test-config 1.0 /tmp/capsules/test-config
- name: Test CRD functionality
id: test-crd
continue-on-error: true
run: |
echo "::group::Testing ResourceCapsule CRD functionality"
# Test CRD listing
echo "Testing k8s-crd list command:"
basic-docker k8s-crd list || echo "CRD list command failed (expected in test environment)"
# Check if the ResourceCapsule CRD was created
echo "Checking ResourceCapsule CRD in cluster:"
kubectl get resourcecapsules -n capsule-test || echo "No ResourceCapsules found (expected if CRD not working)"
# Check if the test ResourceCapsule was created
echo "Checking test ResourceCapsule:"
kubectl get resourcecapsule test-crd-capsule -n capsule-test -o yaml || echo "Test ResourceCapsule not found"
# Test status of the ResourceCapsule
STATUS=$(kubectl get resourcecapsule test-crd-capsule -n capsule-test -o jsonpath='{.status.phase}' 2>/dev/null || echo "Unknown")
echo "ResourceCapsule status: $STATUS"
echo "::endgroup::"
if [[ "$STATUS" == "Active" ]]; then
echo "crd_test_success=true" >> $GITHUB_OUTPUT
else
echo "crd_test_success=false" >> $GITHUB_OUTPUT
fi
- name: Test Go CRD tests
id: go-crd-tests
continue-on-error: true
run: |
echo "::group::Running Go tests for CRD functionality"
export KUBECONFIG=$HOME/.kube/config
export TEST_NAMESPACE=capsule-test
go test -v -run TestResourceCapsule
TEST_RESULT=$?
echo "Go CRD test exit code: $TEST_RESULT"
echo "::endgroup::"
if [ $TEST_RESULT -eq 0 ]; then
echo "✅ Go CRD tests passed successfully!"
echo "go_crd_tests_success=true" >> $GITHUB_OUTPUT
else
echo "⚠️ Go CRD tests failed, but continuing"
echo "go_crd_tests_success=false" >> $GITHUB_OUTPUT
fi
- name: Test API methods with Go tests
id: go-tests
continue-on-error: true
run: |
echo "::group::Running Go tests for AttachCapsuleToDeployment"
export KUBECONFIG=$HOME/.kube/config
export TEST_NAMESPACE=capsule-test
go test -v -run TestAttachCapsuleToDeployment
TEST_RESULT=$?
echo "Go test exit code: $TEST_RESULT"
echo "::endgroup::"
if [ $TEST_RESULT -eq 0 ]; then
echo "✅ Go tests passed successfully!"
echo "go_tests_success=true" >> $GITHUB_OUTPUT
else
echo "⚠️ Go tests failed, but continuing with manual tests"
echo "go_tests_success=false" >> $GITHUB_OUTPUT
fi
- name: Verify deployment configuration
id: verify-deployment
continue-on-error: true
run: |
echo "::group::Verifying deployment configuration"
# Check if the volume was added to the deployment
VOLUMES=$(kubectl get deployment test-app -n capsule-test -o jsonpath='{.spec.template.spec.volumes[*].name}')
echo "Deployment volumes: $VOLUMES"
VERIFICATION_RESULT=0
if [[ $VOLUMES == *"capsule-test-config-1.0"* ]]; then
echo "✅ Capsule volume successfully attached to deployment!"
else
echo "❌ Capsule volume not found in deployment"
VERIFICATION_RESULT=1
fi
# Check volume source type (should be ConfigMap)
VOLUME_CM_NAME=$(kubectl get deployment test-app -n capsule-test -o jsonpath='{.spec.template.spec.volumes[?(@.name=="capsule-test-config-1.0")].configMap.name}')
if [[ "$VOLUME_CM_NAME" == "test-config-1.0" ]]; then
echo "✅ Volume correctly references ConfigMap!"
else
echo "❌ Volume doesn't reference correct ConfigMap: $VOLUME_CM_NAME"
VERIFICATION_RESULT=1
fi
echo "::endgroup::"
if [ $VERIFICATION_RESULT -eq 0 ]; then
echo "deployment_verified=true" >> $GITHUB_OUTPUT
else
echo "deployment_verified=false" >> $GITHUB_OUTPUT
fi
- name: Verify pod configuration
id: verify-pod
continue-on-error: true
run: |
echo "::group::Verifying pod configuration"
# Restart the deployment to pick up changes
kubectl rollout restart deployment/test-app -n capsule-test
kubectl rollout status deployment/test-app -n capsule-test --timeout=60s
# Get the new pod name
POD_NAME=$(kubectl get pods -n capsule-test -l app=test-app -o jsonpath="{.items[0].metadata.name}")
echo "Pod name: $POD_NAME"
# Verify the volume mount exists in the pod
MOUNTS=$(kubectl get pod $POD_NAME -n capsule-test -o jsonpath='{.spec.containers[0].volumeMounts[*].name}')
echo "Pod volume mounts: $MOUNTS"
VERIFICATION_RESULT=0
if [[ $MOUNTS == *"capsule-test-config-1.0"* ]]; then
echo "✅ Capsule volume mount successfully added to pod!"
else
echo "❌ Capsule volume mount not found in pod"
VERIFICATION_RESULT=1
fi
# Verify mount path
MOUNT_PATH=$(kubectl get pod $POD_NAME -n capsule-test -o jsonpath='{.spec.containers[0].volumeMounts[?(@.name=="capsule-test-config-1.0")].mountPath}')
echo "Mount path: $MOUNT_PATH"
if [[ "$MOUNT_PATH" == "/capsules/test-config/1.0" ]]; then
echo "✅ Volume mounted at correct path!"
else
echo "❌ Volume mounted at incorrect path: $MOUNT_PATH"
VERIFICATION_RESULT=1
fi
# Try to access the capsule data from the pod
echo "Attempting to access capsule in pod:"
kubectl exec $POD_NAME -n capsule-test -- ls -la /capsules/test-config/1.0/ || {
echo "⚠️ Could not access capsule path in pod"
VERIFICATION_RESULT=1
}
# Try to view the actual config content
echo "Attempting to view config content:"
kubectl exec $POD_NAME -n capsule-test -- cat /capsules/test-config/1.0/config.yml || {
echo "⚠️ Could not view config content"
}
echo "::endgroup::"
if [ $VERIFICATION_RESULT -eq 0 ]; then
echo "pod_verified=true" >> $GITHUB_OUTPUT
else
echo "pod_verified=false" >> $GITHUB_OUTPUT
fi
- name: Display logs on failure
if: failure()
run: |
echo "==== kubectl get all ===="
kubectl get all -n capsule-test
echo "==== Deployment YAML ===="
kubectl get deployment test-app -n capsule-test -o yaml
echo "==== Pod logs ===="
POD_NAME=$(kubectl get pods -n capsule-test -l app=test-app -o jsonpath="{.items[0].metadata.name}" 2>/dev/null || echo "no-pod-found")
if [ "$POD_NAME" != "no-pod-found" ]; then
kubectl logs $POD_NAME -n capsule-test
fi