-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcluster.sh
More file actions
executable file
·201 lines (177 loc) · 7.03 KB
/
cluster.sh
File metadata and controls
executable file
·201 lines (177 loc) · 7.03 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
#!/bin/bash
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
COMPOSE_FILE="docker-compose.yml"
case "${1:-help}" in
up)
echo "=== Starting database test cluster ==="
docker compose -f $COMPOSE_FILE up --build -d
echo "Waiting for cluster to be ready..."
( docker compose -f $COMPOSE_FILE logs -f & ) | while read -r line; do
echo "$line"
if echo "$line" | grep -q "Cluster is ready"; then
break
fi
done
echo ""
echo "=========================================="
echo "Cluster is running in background."
echo ""
echo "Admin dashboard: http://localhost:4433/admin/"
echo "Login: minioadmin / minioadmin"
echo ""
echo "Run tests:"
echo " ./cluster.sh postgres"
echo " ./cluster.sh elasticsearch"
echo " ./cluster.sh scylla"
echo ""
echo "Or open a shell:"
echo " ./cluster.sh shell"
echo ""
echo "Cleanup when done:"
echo " ./cluster.sh down"
echo "=========================================="
;;
postgres)
echo "=== Running PostgreSQL (CloudNativePG) test ==="
docker compose -f $COMPOSE_FILE exec db-test ./postgres/test.sh
;;
elasticsearch|es)
echo "=== Running Elasticsearch (ECK) test ==="
docker compose -f $COMPOSE_FILE exec db-test ./elasticsearch/test.sh
;;
scylla)
echo "=== Running ScyllaDB test ==="
docker compose -f $COMPOSE_FILE exec db-test ./scylla/test.sh
;;
clickhouse|ch)
echo "=== Running ClickHouse test ==="
docker compose -f $COMPOSE_FILE exec db-test ./clickhouse/test.sh
;;
s3-compat|s3)
echo "=== Running S3 Compatibility (Ceph s3-tests) ==="
docker compose -f $COMPOSE_FILE exec db-test ./s3-compatibility/test.sh
;;
all)
echo "=== Running all database tests ==="
docker compose -f $COMPOSE_FILE exec db-test ./postgres/test.sh
docker compose -f $COMPOSE_FILE exec db-test ./elasticsearch/test.sh
docker compose -f $COMPOSE_FILE exec db-test ./scylla/test.sh
docker compose -f $COMPOSE_FILE exec db-test ./clickhouse/test.sh
echo "=== All tests completed ==="
;;
load-test)
echo "=== Running S3 load test (3 concurrent 10MB uploads) ==="
docker compose -f $COMPOSE_FILE exec db-test sh -c '
PODS=$(kubectl get pods -n s3proxy -l app=s3proxy-python -o jsonpath="{.items[*].metadata.name}")
POD_COUNT=$(echo $PODS | wc -w)
echo "Found $POD_COUNT s3proxy pods: $PODS"
mkdir -p /tmp/lb-test
for pod in $PODS; do
kubectl logs $pod -n s3proxy 2>/dev/null | wc -l > /tmp/lb-test/$pod.start
done
echo "=== Creating test pod with AWS CLI ==="
kubectl run s3-load-test -n s3proxy --rm -i --restart=Never \
--image=amazon/aws-cli:latest \
--env="AWS_ACCESS_KEY_ID=minioadmin" \
--env="AWS_SECRET_ACCESS_KEY=minioadmin" \
--env="AWS_DEFAULT_REGION=us-east-1" \
--command -- /bin/sh -c "
aws --endpoint-url http://s3-gateway.s3proxy s3 mb s3://load-test-bucket 2>/dev/null || true
echo \"Generating 10MB test files...\"
mkdir -p /tmp/testfiles
for i in 1 2 3; do
dd if=/dev/urandom of=/tmp/testfiles/file-\$i.bin bs=1M count=10 2>/dev/null &
done
wait
ls -lh /tmp/testfiles/
echo \"=== Starting concurrent uploads ===\"
START=\$(date +%s)
for i in 1 2 3; do
aws --endpoint-url http://s3-gateway.s3proxy s3 cp /tmp/testfiles/file-\$i.bin s3://load-test-bucket/file-\$i.bin &
done
wait
END=\$(date +%s)
echo \"Upload complete in \$((END - START))s\"
echo \"=== Verifying uploads ===\"
aws --endpoint-url http://s3-gateway.s3proxy s3 ls s3://load-test-bucket/
echo \"=== Downloading and verifying ===\"
mkdir -p /tmp/downloads
for i in 1 2 3; do
aws --endpoint-url http://s3-gateway.s3proxy s3 cp s3://load-test-bucket/file-\$i.bin /tmp/downloads/file-\$i.bin &
done
wait
md5sum /tmp/testfiles/*.bin > /tmp/orig.md5
md5sum /tmp/downloads/*.bin > /tmp/down.md5
ORIG_SUMS=\$(cat /tmp/orig.md5 | while read sum name; do echo \$sum; done | sort)
DOWN_SUMS=\$(cat /tmp/down.md5 | while read sum name; do echo \$sum; done | sort)
if [ \"\$ORIG_SUMS\" = \"\$DOWN_SUMS\" ]; then
echo \"✓ Checksums match - round-trip successful\"
else
echo \"✗ Checksum mismatch!\"
exit 1
fi
"
echo ""
echo "=== Checking load balancing ==="
sleep 2
PODS_HIT=0
for pod in $PODS; do
START_LINE=$(cat /tmp/lb-test/$pod.start 2>/dev/null || echo "0")
REQUEST_COUNT=$(kubectl logs $pod -n s3proxy 2>/dev/null | tail -n +$((START_LINE + 1)) | grep -c -E "GET|POST|PUT|HEAD" || echo "0")
if [ "$REQUEST_COUNT" -gt 0 ]; then
PODS_HIT=$((PODS_HIT + 1))
echo "✓ Pod $pod: received $REQUEST_COUNT requests"
else
echo " Pod $pod: received 0 requests"
fi
done
rm -rf /tmp/lb-test
if [ "$PODS_HIT" -ge 2 ]; then
echo "✓ Load balancing verified - traffic distributed across $PODS_HIT pods"
else
echo "⚠ Traffic went to only $PODS_HIT pod(s)"
fi
'
;;
down)
echo "=== Cleaning up ==="
docker compose -f $COMPOSE_FILE down 2>/dev/null || true
# Remove all e2e volumes EXCEPT registry cache
docker volume ls -q --filter name=e2e_ | grep -v e2e_registry-data | xargs -r docker volume rm 2>/dev/null || true
# Remove anonymous volumes (64-char hex names from Kind/Docker)
docker volume ls -q | grep -E '^[a-f0-9]{64}$' | xargs -r docker volume rm 2>/dev/null || true
# Delete Kind cluster containers directly
docker rm -f db-backup-test-control-plane db-backup-test-worker db-backup-test-worker2 db-backup-test-worker3 2>/dev/null || true
# Clean up Kind network
docker network rm kind 2>/dev/null || true
echo "Cleanup complete (registry cache preserved)"
;;
*)
echo "Usage: $0 <command>"
echo ""
echo "Commands:"
echo " up - Start Kind cluster + s3proxy + MinIO"
echo " down - Stop and cleanup everything"
echo " status - Show cluster status"
echo " logs - Show cluster logs"
echo " shell - Open shell in test container"
echo ""
echo "Tests:"
echo " load-test - Run S3 load test (upload/download verification)"
echo " s3-compat - Run S3 compatibility tests (Ceph s3-tests)"
echo " postgres - Run PostgreSQL (CloudNativePG) backup test"
echo " elasticsearch - Run Elasticsearch (ECK) backup test"
echo " scylla - Run ScyllaDB backup test"
echo " clickhouse - Run ClickHouse backup test"
echo " all - Run all database backup tests"
echo ""
echo "Example:"
echo " ./cluster.sh up"
echo " ./cluster.sh load-test"
echo " ./cluster.sh down"
echo ""
exit 1
;;
esac