-
Notifications
You must be signed in to change notification settings - Fork 144
Expand file tree
/
Copy path.gitlab-ci.yml
More file actions
784 lines (722 loc) · 27 KB
/
.gitlab-ci.yml
File metadata and controls
784 lines (722 loc) · 27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
#
# A dCache build/deploy/test pipeline file.
#
# The following environment variables injected by gitlab CI
#
# DCACHE_ORG_PGP_KEY: GPG key used to sign RPM and DEB packages
# DCACHE_ORG_KEY_NAME: GPG key name
# DCACHE_ORG_PGP_KEY_PASS: GPG key password
#
# PKG_UPLOAD_URL: URL to upload dCache release packages
# PKG_UPLOAD_USER: user name to use for authorization
# PKG_UPLOAD_PASS: password
# PKG_UPLOAD_OPTIONS: additional options to curl
#
# DOCKER_HUB_USER: user name on docker hub
# DOCKER_HUB_ACCESS_KEY: Access Key or Password of the docker user
#
#
#
# KUBECONFIG: env file that contains kubernetes configuration to access the cluster
#
# dCache deployment in kubernetes managed by helm chart
# https://gitlab.desy.de/dcache/dcache-helm
#
#
# The kubernetes based jobs don't use directly any job artefact, thus pulling artefact
# is explicitly disabled by default.
stages:
# build rpm, tar, deb, oci container
- build
# sign rpm, deb
- sign
# create k8s namespace
- testenv_pre
# deploy 3rd party infrastructure services
- test_infra
# deploy dCache helm chart
- test_deploy
# run tests
- testing
# clean k8s namespace, collect logs
- testenv_post
# on release, upload rpm, deb, tar, container
- upload
variables:
MAVEN_CLI_OPTS: "--batch-mode --errors --fail-at-end --show-version -DinstallAtEnd=true -DdeployAtEnd=true -Dmaven.repo.local=.m2/repository"
K8S_NAMESPACE: dcb-$CI_PIPELINE_ID
CHECK_TIMEOUT: --timeout=300s
HELM_OPTS: --replace --timeout 10m0s
AUTOCA_URL: https://ci.dcache.org/ca
DCACHE_HELM_REPO: https://gitlab.desy.de/api/v4/projects/7648/packages/helm/test
DCACHE_COMPATIBILITY_VERSION: 9.2.40
JACOCO_VERSION: 0.8.14 #must match maven plugin version
# let's debug nodes where job is running
before_script:
- |
set +x
echo "============== GitLab Agent =============="
uname -a
date
echo "Runner : $CI_RUNNER_DESCRIPTION"
echo "Runner id : $CI_RUNNER_ID"
echo "Runner version : $CI_RUNNER_VERSION"
echo "Runner revision : $CI_RUNNER_REVISION"
echo "Runner tags : $CI_RUNNER_TAGS"
echo "=========================================="
default:
retry:
max: 2
when:
- runner_system_failure
- api_failure
#
# default tags and image for testing stages/kubernetes/helm
#
.kubernetes_tags:
tags:
- kubernetes
- dcache-dev
dependencies: []
.kubernetes_image:
extends: .kubernetes_tags
image: dtzar/helm-kubectl:latest
.helm_image:
extends: .kubernetes_tags
image:
name: dtzar/helm-kubectl:latest
entrypoint: ['']
#
# default cache konfiguration for maven build jobs
# Cache downloaded dependencies and plugins between builds.
# To keep cache across branches add 'key: "$CI_JOB_NAME"'
#
.build_cache:
cache:
key:
files:
- pom.xml
prefix: "$CI_JOB_NAME"
paths:
- ./.m2/repository
#
# default rules for upload stage
#
.upload_rules:
rules:
- if: $CI_COMMIT_TAG
# rules for running coverage
.no_release_rules:
rules:
- if: $CI_COMMIT_TAG == null
# cache and image configuration for jacoco report jobs
.jacoco_base:
image: eclipse-temurin:21-jre
extends: .no_release_rules
cache:
key: "jacoco-cli-${JACOCO_VERSION}"
paths:
- "jacoco-${JACOCO_VERSION}/"
policy: pull
rpm:
stage: build
image: dcache/maven-java21-rpm-build
extends: .build_cache
script:
- mvn $MAVEN_CLI_OPTS -DskipTests -am -pl packages/fhs -P rpm clean package
artifacts:
paths:
- "packages/fhs/target/rpmbuild/RPMS/noarch/dcache*.rpm"
expire_in: 2 days
deb:
stage: build
image: dcache/maven-java21-deb-build
extends: .build_cache
script:
- mvn $MAVEN_CLI_OPTS -DskipTests -am -pl packages/fhs -P deb clean package
artifacts:
paths:
- "packages/fhs/target/dcache_*.deb"
expire_in: 2 days
# needed for minimal pipeline run
tar:
stage: build
image: dcache/maven-java21-tar-build
extends: .build_cache
script:
- mvn $MAVEN_CLI_OPTS -DskipTests -am -pl packages/tar clean package
artifacts:
paths:
- "packages/tar/target/dcache-*.tar.gz"
expire_in: 2 days
# run tests with coverage profile
run_ut_with_coverage:
stage: build
image: maven:3.9.12-eclipse-temurin-21
extends:
- .build_cache
- .no_release_rules
script:
- mvn $MAVEN_CLI_OPTS -P code-coverage clean verify -DskipTests=false -Drun.slow.tests
artifacts:
reports:
junit:
- "**/target/surefire-reports/TEST-*.xml"
- "**/target/failsafe-reports/TEST-*.xml"
paths:
#pass .exec files and compiled classes to the next stage
- "**/target/coverage-reports/jacoco-ut.exec"
- "**/target/classes/"
expire_in: 1 day
# run unit tests without coverage
run_unit_tests:
stage: build
image: maven:3.9.12-eclipse-temurin-21
extends:
- .build_cache
- .upload_rules
script:
- mvn $MAVEN_CLI_OPTS clean verify -DskipTests=false -Drun.slow.tests
artifacts:
reports:
junit:
- "**/target/surefire-reports/TEST-*.xml"
- "**/target/failsafe-reports/TEST-*.xml"
expire_in: 1 day
spotbugs:
stage: build
image: dcache/maven-java21-tar-build
extends: .build_cache
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
script:
- dnf -y -q install jq
- mvn $MAVEN_CLI_OPTS -DskipTests -am -pl packages/tar -DskipTests package com.github.spotbugs:spotbugs-maven-plugin:4.8.3.0:spotbugs verify
- find . -name gl-code-quality-report.json -print | xargs cat | jq -s "add" > merged-gl-code-quality-report.json
artifacts:
reports:
codequality:
- merged-gl-code-quality-report.json
paths:
- merged-gl-code-quality-report.json
expire_in: 2 days
# needed for minimal pipeline run
container:
stage: build
# For latest releases see https://github.com/GoogleContainerTools/kaniko/releases
# Only debug/*-debug versions of the Kaniko image are known to work within Gitlab CI
image: ghcr.io/kaniko-build/dist/chainguard-dev-kaniko/executor:latest-debug
needs:
- tar
- job: get_jacoco_cli
optional: true
dependencies:
- tar
- get_jacoco_cli
script:
- |-
tag=$CI_COMMIT_SHORT_SHA
jacoco_enabled=true
if [[ -n "$CI_COMMIT_TAG" ]]; then
tag=$CI_COMMIT_TAG
jacoco_enabled=false
fi
- mkdir maven
- tar -C maven --strip-components=1 -xzvf packages/tar/target/dcache-*.tar.gz
- cp $CI_PROJECT_DIR/packages/tar/src/main/container/* .
- mkdir -p /kaniko/.docker
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- >
/kaniko/executor
--label dcache.build=testing
--context $CI_PROJECT_DIR
--dockerfile $CI_PROJECT_DIR/Dockerfile
--build-arg JACOCO_ENABLED=${jacoco_enabled}
--build-arg JACOCO_VERSION=${JACOCO_VERSION}
--destination $CI_REGISTRY_IMAGE:$tag
#
# check packaged libraries: sort, drop the version and check for non unique dependencies
#
Check for duplicate dependencies:
stage: build
image: almalinux:10-minimal
needs:
- tar
dependencies:
- tar
script:
- microdnf install -y tar gzip
- mkdir /dcache
- tar -C /dcache --strip-components=1 -xzf packages/tar/target/dcache-*.tar.gz
- ls -1 /dcache/share/classes/ | grep -v netty-tcnative-boringssl-static | sort | sed -e 's/\-[0-9].*$//' | uniq -D > duplicate-dependencies.txt
- if [ -s duplicate-dependencies.txt ]; then cat duplicate-dependencies.txt; exit 1; fi
sign_rpm:
stage: sign
image: almalinux:10-minimal
needs: ["rpm"]
script:
- microdnf install -y rpm-sign gpg
- echo $DCACHE_ORG_PGP_KEY | base64 -d -i > secret.gpg
- gpg --quiet --batch --yes --allow-secret-key-import --passphrase="$DCACHE_ORG_PGP_KEY_PASS" --import secret.gpg
- gpg -a --export "$DCACHE_ORG_KEY_NAME" > RPM-GPG-KEY
- rpmsign --addsign --define "_signature gpg" --define "_gpg_name $DCACHE_ORG_KEY_NAME" --define "_gpg_sign_cmd_extra_args --pinentry-mode loopback --passphrase $DCACHE_ORG_PGP_KEY_PASS" packages/fhs/target/rpmbuild/RPMS/noarch/dcache*.rpm
- rpmkeys --import RPM-GPG-KEY
- rpm --checksig -v packages/fhs/target/rpmbuild/RPMS/noarch/dcache*.rpm
artifacts:
paths:
- packages/fhs/target/rpmbuild/RPMS/noarch/dcache*.rpm
sign_deb:
stage: sign
image: ubuntu:22.04
needs: ["deb"]
script:
- apt-get -qq update
- apt-get -qq install debsigs gpg
- echo $DCACHE_ORG_PGP_KEY | base64 -d -i > secret.gpg
- gpg --quiet --batch --yes --allow-secret-key-import --passphrase="$DCACHE_ORG_PGP_KEY_PASS" --import secret.gpg
- echo $DCACHE_ORG_PGP_KEY_PASS > $HOME/.gnupg/gpg-passphrase
- echo "passphrase-file $HOME/.gnupg/gpg-passphrase" >> "$HOME/.gnupg/gpg.conf"
- echo 'allow-loopback-pinentry' >> "$HOME/.gnupg/gpg-agent.conf"
- echo 'pinentry-mode loopback' >> "$HOME/.gnupg/gpg.conf"
- echo 'use-agent' >> "$HOME/.gnupg/gpg.conf"
- echo RELOADAGENT | gpg-connect-agent
- debsigs --sign=origin --verify --check -v -k "$DCACHE_ORG_KEY_NAME" packages/fhs/target/dcache_*.deb
artifacts:
paths:
- packages/fhs/target/dcache_*.deb
RPM test install on EL9:
stage: test_deploy
image: almalinux:9
script:
- dnf --nogpgcheck install -y packages/fhs/target/rpmbuild/RPMS/noarch/dcache*.rpm
#install_deb:
# stage: test_deploy
# image: ubuntu:21.10
# script:
# - apt-get update
# - DEBIAN_FRONTEND=noninteractive apt install -y -f ./packages/fhs/target/dcache_*.deb
upload_rpm:
stage: upload
image: almalinux:10-minimal
dependencies:
- sign_rpm
extends: .upload_rules
script:
- RPM_NAME=`ls packages/fhs/target/rpmbuild/RPMS/noarch/ | grep dcache`
- VERSION=`echo $RPM_NAME | cut -d'-' -f 2 | cut -d'.' -f 1,2`
- curl $PKG_UPLOAD_OPTIONS -u $PKG_UPLOAD_USER:$PKG_UPLOAD_PASS --upload-file packages/fhs/target/rpmbuild/RPMS/noarch/$RPM_NAME "$PKG_UPLOAD_URL/$VERSION/$RPM_NAME"
upload_deb:
stage: upload
image: almalinux:10-minimal
dependencies:
- sign_deb
extends: .upload_rules
script:
- DEB_NAME=`ls packages/fhs/target/ | grep dcache`
- VERSION=`echo $DEB_NAME | cut -d'_' -f 2 | cut -d'.' -f 1,2`
- curl $PKG_UPLOAD_OPTIONS -u $PKG_UPLOAD_USER:$PKG_UPLOAD_PASS --upload-file packages/fhs/target/$DEB_NAME "$PKG_UPLOAD_URL/$VERSION/$DEB_NAME"
upload_tar:
stage: upload
image: almalinux:10-minimal
dependencies:
- tar
extends: .upload_rules
script:
- TAR_NAME=`ls packages/tar/target/ | grep dcache`
- VERSION=`echo $TAR_NAME | cut -d'-' -f 2 | cut -d'.' -f 1,2`
- curl $PKG_UPLOAD_OPTIONS -u $PKG_UPLOAD_USER:$PKG_UPLOAD_PASS --upload-file packages/tar/target/$TAR_NAME "$PKG_UPLOAD_URL/$VERSION/$TAR_NAME"
upload_container:
stage: upload
# Cache downloaded dependencies and plugins between builds.
# To keep cache across branches add 'key: "$CI_JOB_NAME"'
# For latest releases see https://github.com/GoogleContainerTools/kaniko/releases
# Only debug/*-debug versions of the Kaniko image are known to work within Gitlab CI
image: ghcr.io/kaniko-build/dist/chainguard-dev-kaniko/executor:latest-debug
dependencies:
- tar
extends: .upload_rules
script:
- |-
tag=$CI_COMMIT_TAG
branch_latest=`echo $CI_COMMIT_TAG | cut -d '.' -f 1,2`
- mkdir maven
- tar -C maven --strip-components=1 -xzvf packages/tar/target/dcache-*.tar.gz
- cp $CI_PROJECT_DIR/packages/tar/src/main/container/* .
- mkdir -p /kaniko/.docker
- echo "{\"auths\":{\"https://index.docker.io/v1/\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_ACCESS_KEY\"}}}" > /kaniko/.docker/config.json
- >
/kaniko/executor
--label dcache.build=GA
--context $CI_PROJECT_DIR
--dockerfile $CI_PROJECT_DIR/Dockerfile
--destination dcache/dcache:$tag --destination dcache/dcache:$branch_latest
#
# This jobs needs that the number of changes to fetch from GitLab when cloning a repository is high enough to generate
# the changelog.
AI Generated Release Notes:
image: almalinux:10-minimal
stage: upload
extends: .upload_rules
allow_failure: true
dependencies:
- sign_deb
- sign_rpm
- tar
script:
- microdnf install -y git-core
- git fetch --refetch --all --tags
- .ci/generate-changelog.sh >> release-$CI_COMMIT_TAG.md
- curl -L -o chatgpt https://github.com/kardolus/chatgpt-cli/releases/latest/download/chatgpt-linux-amd64 && chmod +x chatgpt
- LAST_TAG=$(.ci/get-git-version.sh $CI_COMMIT_TAG)
- |-
git log --grep=maven-release-plugin --invert-grep $LAST_TAG..$CI_COMMIT_TAG | \
OPENAI_API_KEY=$LLM_API_KEY \
OPENAI_URL=$LLM_API_ENDPOINT \
OPENAI_MODEL=$LLM_MODEL \
OPENAI_COMPLETIONS_PATH=$LLM_COMPLETIONS_PATH \
OPENAI_ROLE="You are a helpful tech writer working on release notes of the dCache project, ignoring commits with messages starting with '[maven-release-plugin]'." \
./chatgpt "$LLM_PROMPT" | \
tee -a release-$CI_COMMIT_TAG.md
artifacts:
paths:
- release-*.md
#
# Prepare releases download table in html format
#
Release Notes Table:
image: almalinux:10-minimal
stage: upload
extends: .upload_rules
allow_failure: true
dependencies:
- sign_deb
- sign_rpm
- tar
script:
- microdnf install -y git-core
- git fetch --refetch --all --tags
- .ci/generate-releases-xml.sh >> release-$CI_COMMIT_TAG.xml
#tee -a release-$CI_COMMIT_TAG.md
artifacts:
paths:
- release1-*.md
#
# prepare kubernetes env for the build
#
# needed for minimal pipeline run
Prepare k8s environment:
stage: testenv_pre
extends: .kubernetes_image
script:
- kubectl create namespace ${K8S_NAMESPACE}
#
# collect all logs
#
Collect container logs:
stage: testenv_post
extends: .kubernetes_image
when: always
allow_failure: true
script:
- kubectl -n $K8S_NAMESPACE events | tee $K8S_NAMESPACE-events.log
- kubectl -n $K8S_NAMESPACE get pods | grep Running | awk '{print $1}' | xargs -n1 kubectl -n $K8S_NAMESPACE logs | tee $K8S_NAMESPACE.log
- kubectl -n $K8S_NAMESPACE run -i --rm --restart=Never -q --image=edenhill/kcat:1.7.1 kcat -- kcat -C -t billing -b billing-kafka:9092 -p 0 -e -q -o beginning | tee $K8S_NAMESPACE-billing.json
artifacts:
name: "logs-$CI_PIPELINE_ID"
paths:
- "$K8S_NAMESPACE.log"
- "$K8S_NAMESPACE-billing.json"
#
# dispose kubernetes resources
#
Clean k8s environment:
stage: testenv_post
extends: .kubernetes_image
needs:
- Collect container logs
- job: Extract Coverage
optional: true
when: always
script:
- kubectl delete namespace ${K8S_NAMESPACE} --grace-period=1 --ignore-not-found=true
#
# infrastructure required to run dCache
#
Deploy PostgreSQL:
stage: test_infra
extends: .kubernetes_image
script:
- kubectl -n ${K8S_NAMESPACE} apply -f .ci/k8s/postgresql-deployment.yaml
- kubectl -n ${K8S_NAMESPACE} wait --for=condition=Ready pod/postgres-0
#
# infrastructure required to run dCache
#
Deploy Zookeeper:
stage: test_infra
extends: .kubernetes_image
script:
- kubectl -n ${K8S_NAMESPACE} apply -f .ci/k8s/zookeeper-deployment.yaml
- kubectl -n ${K8S_NAMESPACE} wait --for=condition=Ready pod/zookeeper-0
#
# infrastructure required to run dCache
#
Deploy Kafka:
stage: test_infra
extends: .kubernetes_image
script:
- kubectl -n ${K8S_NAMESPACE} apply -f .ci/k8s/kafka-deployment.yaml
- kubectl -n ${K8S_NAMESPACE} wait --for=condition=Ready pod/kafka-0
- kubectl -n ${K8S_NAMESPACE} exec kafka-0 -- /opt/kafka/bin/kafka-topics.sh --bootstrap-server localhost:9092 --create --topic billing
#
# infrastructure required to run dCache
#
Deploy MinIO as Tape:
stage: test_infra
extends: .helm_image
script:
- helm repo add minio https://charts.min.io/
- helm repo update
- helm -n ${K8S_NAMESPACE} install ${HELM_OPTS} --set-string rootUser=dcache --set-string rootPassword=let-me-in --set replicas=1 --set persistence.enabled=false --set mode=standalone --set buckets[0].name=hsm,buckets[0].policy=none,buckets[0].purge=false --wait tape minio/minio
#
# OIDC provided for token-based testing
#
Deploy Keycloak:
stage: test_infra
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE apply -f .ci/k8s/keycloack-deployment.yaml
# FIXME: add readiness check
# - while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready deployment.apps/keycloak; do sleep 1; done
#
# Init dCache database with old version. On start new version will perform migration
#
Init dCache DB with old version:
stage: test_deploy
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE apply -f .ci/k8s/dcache-deployment.yaml
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod dcache-db-migration-0; do sleep 1; done
- kubectl -n $K8S_NAMESPACE exec dcache-db-migration-0 -- /opt/dcache/bin/dcache database update
#
# Start Current dCache version and an old pools
#
Helm-Deploy Current dCache build:
stage: test_deploy
extends: .helm_image
needs:
- Init dCache DB with old version
script:
- |-
tag=$CI_COMMIT_SHORT_SHA
if [[ -n "$CI_COMMIT_TAG" ]]; then
tag=$CI_COMMIT_TAG
fi
- helm repo add dcache ${DCACHE_HELM_REPO}
- helm repo update
- >
helm -n ${K8S_NAMESPACE} upgrade --install store dcache/dcache
--wait
--timeout=20m
--set image.registry=${CI_REGISTRY}
--set image.repository=${CI_PROJECT_PATH}
--set image.tag=${tag}
--set dcache.hsm.enabled=true
--set dcache.qos.enabled=true
--set services.srm.enabled=false
--values .ci/custom-helm-config.yaml
Helm-Deploy Latest dCache Golden Release:
stage: test_deploy
extends: .helm_image
needs:
- Helm-Deploy Current dCache build
script:
- helm repo add dcache ${DCACHE_HELM_REPO}
- helm repo update
- >
helm -n ${K8S_NAMESPACE} upgrade --install old-store dcache/dcache
--wait
--set image.registry=${CI_REGISTRY}
--set image.repository=${CI_PROJECT_PATH}
--set image.tag=${DCACHE_COMPATIBILITY_VERSION}
--set "dcache.pools={d,f}"
--set dcache.door.enabled=false
Extract Coverage:
stage: testenv_post
extends:
- .kubernetes_image
- .no_release_rules
dependencies:
- get_jacoco_cli
needs:
- job: get_jacoco_cli
artifacts: true
- job: NFS4.x protocol compliance tests
script:
- JACOCO_CLI_PATH="jacoco-${JACOCO_VERSION}/lib/jacococli.jar"
- PODS=$(kubectl -n $K8S_NAMESPACE get pods -l app.kubernetes.io/instance=store -o jsonpath='{.items[*].metadata.name}')
- |
for POD in $PODS; do
echo "Collecting coverage and woven classes from $POD..."
mkdir -p "./integration-results/classes-dump/${POD}"
kubectl -n $K8S_NAMESPACE cp "$JACOCO_CLI_PATH" "${POD}:/tmp/jacococli.jar"
POD_IP=$(kubectl -n $K8S_NAMESPACE get pod $POD -o jsonpath='{.status.podIP}')
if kubectl -n $K8S_NAMESPACE exec $POD -- \
/usr/bin/java -jar /tmp/jacococli.jar dump \
--address $POD_IP --port 6300 --destfile /tmp/jacoco-it.exec; then
kubectl -n $K8S_NAMESPACE cp "${POD}:/tmp/jacoco-it.exec" "./integration-results/${POD}.exec"
echo "Pulling woven classes from $POD..."
if kubectl -n $K8S_NAMESPACE cp "${POD}:/opt/dcache/var/log/jacoco-dump/." "./integration-results/classes-dump/${POD}/"; then
echo "Successfully collected woven classes from $POD"
else
echo "Warning: Could not find dumped classes on $POD"
fi
else
echo "Failed to dump coverage from $POD"
fi
done
artifacts:
paths:
- integration-results/*.exec
- integration-results/classes-dump/
when: always
Grid EL9 WN tests:
stage: testing
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE apply -f .ci/wn-with-cvmfs.yaml
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod grid-tester; do sleep 1; done
- kubectl -n $K8S_NAMESPACE cp .ci/init-el9-ui.sh grid-tester:/init-el9-ui.sh
- kubectl -n $K8S_NAMESPACE cp .ci/run-grid-tests.sh grid-tester:/run-grid-tests.sh
- kubectl -n $K8S_NAMESPACE exec grid-tester -- /bin/sh /run-grid-tests.sh
- kubectl -n $K8S_NAMESPACE cp grid-tester:/xunit .
artifacts:
reports:
junit:
- "xunit*.xml"
Frontend test suite:
stage: testing
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE apply -f .ci/frontend.yaml
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod http-tester; do sleep 1; done
- kubectl -n $K8S_NAMESPACE cp .ci/poolEndpoint.http http-tester:/poolEndpoint.http
- kubectl -n $K8S_NAMESPACE cp .ci/qosEndpoint.http http-tester:/qosEndpoint.http
- kubectl -n $K8S_NAMESPACE cp .ci/qosPolicyEndpoint.http http-tester:/qosPolicyEndpoint.http
- kubectl -n $K8S_NAMESPACE cp .ci/migrationEndpoint.http http-tester:/migrationEndpoint.http
- kubectl -n $K8S_NAMESPACE cp .ci/userEndpoint.http http-tester:/userEndpoint.http
- kubectl -n $K8S_NAMESPACE cp .ci/bulkRequestsEndpoint.http http-tester:/bulkRequestsEndpoint.http
- kubectl -n $K8S_NAMESPACE cp .ci/spacemanagerEndpoint.http http-tester:/spacemanagerEndpoint.http
- kubectl -n $K8S_NAMESPACE cp README.md http-tester:/README.md
- kubectl -n $K8S_NAMESPACE cp http-client.private.env.json http-tester:/http-client.private.env.json
- kubectl -n $K8S_NAMESPACE exec http-tester -- java --add-opens=java.base/java.util=ALL-UNNAMED $IJHTTP_JAVA_OPTS -cp /intellij-http-client/\* com.intellij.httpClient.cli.HttpClientMain -e Test -D /poolEndpoint.http /qosEndpoint.http /qosPolicyEndpoint.http /migrationEndpoint.http /userEndpoint.http /bulkRequestsEndpoint.http /spacemanagerEndpoint.http -p /http-client.private.env.json --insecure --report=/httpTests
- kubectl -n $K8S_NAMESPACE cp http-tester:/httpTests/report.xml pool-report.xml
artifacts:
reports:
junit:
- "*-report.xml"
gsi_xroot_tests:
stage: testing
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE apply -f .ci/wn-with-cvmfs-xroot.yaml
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod xroot-tester; do sleep 1; done
- kubectl -n $K8S_NAMESPACE cp .ci/init-el9-ui.sh xroot-tester:/init-el9-ui.sh
- kubectl -n $K8S_NAMESPACE cp .ci/run-xroot-tests.sh xroot-tester:/run-xroot-tests.sh
- kubectl -n $K8S_NAMESPACE exec xroot-tester -- /bin/sh /run-xroot-tests.sh
webdav_with_x509_tests:
stage: testing
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE apply -f .ci/webdav-wn-cvmfs.yaml
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod webdav-tester; do sleep 1; done
- kubectl -n $K8S_NAMESPACE cp .ci/init-el9-ui.sh webdav-tester:/init-el9-ui.sh
- kubectl -n $K8S_NAMESPACE cp .ci/run-webdav-tests.sh webdav-tester:/run-webdav-tests.sh
- kubectl -n $K8S_NAMESPACE exec webdav-tester -- /bin/sh /run-webdav-tests.sh
NFS4.x protocol compliance tests:
stage: testing
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE run pynfs-tester --image=dcache/pynfs:0.5 --restart=Never --command -- sleep 3600
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod pynfs-tester; do sleep 1; done
- kubectl -n $K8S_NAMESPACE exec pynfs-tester -- /bin/bash -c "/run-nfs4.0.sh --maketree store-door-svc:/data OPEN5; exit 0"
- |-
kubectl -n $K8S_NAMESPACE exec pynfs-tester -- /bin/bash -c "/run-nfs4.0.sh --xml=/xunit-report-v40.xml \
--noinit store-door-svc:/data all \
noACC2a noACC2b noACC2c noACC2d noACC2f noACC2r noACC2s \
noCID1 noCID2 noCID4a noCID4b noCID4c noCID4d noCID4e \
noCLOSE8 noCLOSE9 noCLOSE10 noCLOSE12 noCLOSE6 \
noCMT1aa noCMT1b noCMT1c noCMT1d noCMT1e noCMT1f noCMT2a noCMT2b noCMT2c noCMT2d noCMT2f \
noCMT2s noCMT3 noCMT4 noCR12 noLKT1 noLKT2a noLKT2b noLKT2c noLKT2d noLKT2f noLKT2s noLKT3 \
noLKT4 noLKT6 noLKT7 noLKT8 noLKT9 noLKU7 \
noLKUNONE noLOCK13 noLOCKRNG noLOCKCHGU noLOCKCHGD noRLOWN3 \
noOPCF1 noOPCF6 noOPDG2 noOPDG3 noOPDG6 noOPDG7 noOPEN15 noOPEN18 noOPEN2 noOPEN20 noOPEN22 \
noOPEN23 noOPEN24 noOPEN26 noOPEN27 noOPEN28 noOPEN3 noOPEN30 noOPEN4 noRENEW3 noRD1 noRD10 \
noRD2 noRD3 noRD5 noRD5a noRD6 noRD7a noRD7b noRD7c noRD7d noRD7f noRD7s noRDDR12 noRDDR11 \
noRPLY1 noRPLY10 noRPLY12 \
noRPLY14 noRPLY2 noRPLY3 noRPLY5 noRPLY6 noRPLY7 noRPLY8 noRPLY9 \
noSEC7 noWRT1 noWRT11 noWRT13 noWRT14 noWRT15 noWRT18 noWRT19 noWRT1b noWRT2 \
noWRT3 noWRT6a noWRT6b noWRT6c noWRT6d noWRT6f noWRT6s noWRT8 noWRT9; \
exit 0"
- |-
kubectl -n $K8S_NAMESPACE exec pynfs-tester -- /bin/bash -c "/run-nfs4.1.sh --minorversion=2 --xml=/xunit-report-v41.xml \
--noinit store-door-svc:/data all xattr \
noCOUR2 noCSESS25 noCSESS26 noCSESS27 noCSESS28 noCSESS29 noCSID3 noCSID4 noCSID9 noEID5f \
noEID50 noOPEN31 noSEQ6 noRECC3 noSEQ7 noSEQ10b noSEQ2 noXATT11 noXATT10 noALLOC1 noALLOC2 noALLOC3; \
exit 0"
- kubectl -n $K8S_NAMESPACE cp pynfs-tester:/xunit-report-v40.xml xunit-report-v40.xml
- kubectl -n $K8S_NAMESPACE cp pynfs-tester:/xunit-report-v41.xml xunit-report-v41.xml
- nfs40_errors=$(( $(echo 0$(sed -n 's/.*testsuite .*errors=\"\([0-9]*\)\".*/+\1/p' xunit-report-v40.xml)) ))
- nfs40_failures=$(( $(echo 0$(sed -n 's/.*testsuite .*failures=\"\([0-9]*\)\".*/+\1/p' xunit-report-v40.xml)) ))
- nfs41_errors=$(( $(echo 0$(sed -n 's/.*testsuite .*errors=\"\([0-9]*\)\".*/+\1/p' xunit-report-v41.xml)) ))
- nfs41_failures=$(( $(echo 0$(sed -n 's/.*testsuite .*failures=\"\([0-9]*\)\".*/+\1/p' xunit-report-v41.xml)) ))
- exit $(( $nfs40_errors + $nfs41_errors + $nfs40_failures + $nfs41_failures ))
environment: testing
artifacts:
reports:
junit:
- "xunit*.xml"
Run OIDC test:
stage: testing
extends: .kubernetes_image
script:
- kubectl -n $K8S_NAMESPACE run oidc-tester --image=almalinux:9 --restart=Never --command -- sleep 3600
- while ! kubectl -n $K8S_NAMESPACE wait --for=condition=Ready pod oidc-tester; do sleep 1; done
- kubectl -n $K8S_NAMESPACE cp .ci/run-oidc-test.sh oidc-tester:/run-oidc-test.sh
- kubectl -n $K8S_NAMESPACE exec oidc-tester -- /bin/sh /run-oidc-test.sh
#Job to find JaCoCo CLI in cache or download it
get_jacoco_cli:
stage: build
extends: .jacoco_base
cache:
key: "jacoco-cli-${JACOCO_VERSION}"
paths:
- "jacoco-${JACOCO_VERSION}/"
policy: pull-push
before_script:
- apt-get update && apt-get install -y unzip wget
script:
- ./get-jacoco.sh
artifacts:
paths:
- "jacoco-${JACOCO_VERSION}/lib/*.jar"
expire_in: 1 week
# Job to generate JaCoCo report using script
generate_jacoco_report:
stage: testenv_post
extends: .jacoco_base
needs:
- job: run_ut_with_coverage
artifacts: true
- job: get_jacoco_cli
- job: Extract Coverage
artifacts: true
script:
- ./generate-jacoco-report.sh
artifacts:
paths:
- target/coverage-reports/site/
reports:
coverage_report:
coverage_format: jacoco
path: target/coverage-reports/jacoco.xml
expire_in: 1 week