Skip to content

Bump softprops/action-gh-release from 2.2.2 to 3.0.0 #143

Bump softprops/action-gh-release from 2.2.2 to 3.0.0

Bump softprops/action-gh-release from 2.2.2 to 3.0.0 #143

Workflow file for this run

name: CI
on:
push:
branches: [main, 'release/**', stable]
pull_request:
workflow_dispatch:
concurrency:
group: ci-${{ github.ref }}
cancel-in-progress: true
# Minimal permissions by default
permissions:
contents: read
jobs:
go-build-and-test:
name: Go Build & Test
runs-on: ubuntu-latest
permissions:
contents: read
strategy:
matrix:
service: [airlock, registry, tool-firewall, gpu-integrity-watch, mcp-firewall, policy-engine, runtime-attestor, integrity-monitor, incident-recorder]
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: "1.25"
cache-dependency-path: services/${{ matrix.service }}/go.sum
- name: Build
working-directory: services/${{ matrix.service }}
run: CGO_ENABLED=0 go build -ldflags="-s -w" -o /dev/null .
- name: Test
working-directory: services/${{ matrix.service }}
run: go test -v -race -count=1 ./...
- name: Vet
working-directory: services/${{ matrix.service }}
run: go vet ./...
python-test:
name: Python Test & Lint
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: "3.12"
- name: Install dependencies (pinned)
run: pip install -r requirements-ci.txt
- name: Lint (syntax check)
run: |
python -m py_compile services/ui/ui/app.py
python -m py_compile services/diffusion-worker/app.py
python -m py_compile services/common/audit_chain.py
python -m py_compile services/common/auth.py
python -m py_compile services/common/mlock_helper.py
python -m py_compile services/agent/agent/app.py
python -m py_compile services/agent/agent/models.py
python -m py_compile services/agent/agent/policy.py
python -m py_compile services/agent/agent/planner.py
python -m py_compile services/agent/agent/executor.py
python -m py_compile services/agent/agent/storage.py
python -m py_compile services/agent/agent/capabilities.py
python -m py_compile services/agent/agent/sandbox.py
- name: Ruff lint
run: ruff check services/ tests/ --select E,F,W --ignore E501,E402
- name: Bandit security scan
run: |
# Fail on high-severity + high-confidence findings.
# Medium/low findings are reported as warnings.
bandit -r services/ -ll --skip B101,B404,B603 -f json -o /tmp/bandit.json || true
python3 -c "
import json, sys
with open('/tmp/bandit.json') as f:
data = json.load(f)
high = [r for r in data.get('results', [])
if r['issue_severity'] == 'HIGH' and r['issue_confidence'] == 'HIGH']
for r in data.get('results', []):
sev = r['issue_severity']
msg = f\"{r['filename']}:{r['line_number']}: [{sev}] {r['issue_text']}\"
if sev == 'HIGH':
print(f'::error ::{msg}')
else:
print(f'::warning ::{msg}')
if high:
print(f'FAIL: {len(high)} high-severity/high-confidence finding(s)')
sys.exit(1)
print('OK: no high-severity/high-confidence findings')
"
- name: Mypy type check (security-sensitive services)
run: |
mypy --ignore-missing-imports \
services/common/ \
services/agent/agent/ \
services/quarantine/quarantine/ \
services/ui/ui/
- name: Test (unit + integration)
env:
PYTHONPATH: services
run: python -m pytest tests/ -v --ignore=tests/test_adversarial.py --ignore=tests/test_m5_acceptance.py -x
- name: Test (adversarial + acceptance)
env:
PYTHONPATH: services
run: python -m pytest tests/test_adversarial.py tests/test_m5_acceptance.py -v --tb=short
shellcheck:
name: Shell Script Lint
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Lint shell scripts
run: |
shellcheck -s bash \
files/system/usr/libexec/secure-ai/*.sh \
files/scripts/build-services.sh \
files/scripts/generate-mok.sh \
files/scripts/first-boot-check.sh \
files/scripts/verify-release.sh
policy-validate:
name: Validate YAML configs
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: "3.12"
- name: Install pyyaml
run: pip install pyyaml
- name: Validate YAML files
run: |
python -c "
import yaml, sys, glob
errors = 0
for pattern in ['files/system/etc/secure-ai/**/*.yaml', 'recipes/*.yml']:
for f in glob.glob(pattern, recursive=True):
try:
with open(f) as fh:
yaml.safe_load(fh)
print(f'OK: {f}')
except Exception as e:
print(f'FAIL: {f}: {e}')
errors += 1
sys.exit(errors)
"
image-ref-consistency:
name: Image Reference Consistency
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Check for wrong image references
run: |
CANONICAL="ghcr.io/secai-hub/secai_os"
# Known wrong patterns (container image refs only)
WRONG_PATTERNS=(
"ghcr.io/sec_ai/secai_os"
)
ERRORS=0
for pattern in "${WRONG_PATTERNS[@]}"; do
# Exclude files that legitimately contain wrong patterns as string
# literals for detection purposes (this CI job + the consistency test)
MATCHES=$(grep -rn \
--include='*.sh' --include='*.py' --include='*.yaml' \
--include='*.yml' --include='*.md' --include='*.json' \
--exclude='ci.yml' \
--exclude='test_image_ref_consistency.py' \
"$pattern" . 2>/dev/null \
| grep -v '.git/' | grep -v 'node_modules/' \
|| true)
if [ -n "$MATCHES" ]; then
echo "::error::Found wrong image reference '$pattern':"
echo "$MATCHES"
ERRORS=$((ERRORS + $(echo "$MATCHES" | wc -l)))
fi
done
if [ "$ERRORS" -gt 0 ]; then
echo ""
echo "::error::Found $ERRORS wrong image reference(s)."
echo "All container image references must use: $CANONICAL"
exit 1
fi
echo "OK: All image references use canonical path: $CANONICAL"
check-pins:
name: Verify action & container pins
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- run: bash .github/scripts/check-action-pins.sh
- run: bash .github/scripts/check-container-pins.sh
supply-chain-verify:
name: Supply Chain & SBOM Verification
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: "1.25"
- name: Install Syft (SBOM generator)
run: curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin
- name: Install cosign (signing & attestation)
run: |
COSIGN_VERSION="v2.4.3"
curl -sSfL "https://github.com/sigstore/cosign/releases/download/${COSIGN_VERSION}/cosign-linux-amd64" \
-o /usr/local/bin/cosign
chmod +x /usr/local/bin/cosign
- name: Verify SBOM generation (Go services)
run: |
echo "=== SBOM generation verification ==="
for svc in airlock registry tool-firewall gpu-integrity-watch mcp-firewall \
policy-engine runtime-attestor integrity-monitor incident-recorder; do
echo "--- ${svc} ---"
syft dir:services/${svc} -o cyclonedx-json=/dev/null
echo "OK: ${svc} SBOM generated"
done
- name: Verify SBOM generation (Python services)
run: |
for svc in agent ui quarantine common diffusion-worker search-mediator; do
if [ -d "services/${svc}" ]; then
syft dir:services/${svc} -o cyclonedx-json=/dev/null
echo "OK: ${svc} SBOM generated"
fi
done
- name: Verify cosign is functional
run: |
cosign version
echo "OK: cosign available for signing and attestation"
- name: Verify release workflow has provenance steps
run: |
echo "=== Checking release.yml provenance pipeline ==="
# Verify release workflow exists and contains required supply-chain steps
test -f .github/workflows/release.yml || { echo "FAIL: release.yml missing"; exit 1; }
for keyword in "sbom-action" "attest-build-provenance" "cosign" "cyclonedx" "SHA256SUMS"; do
grep -q "${keyword}" .github/workflows/release.yml || \
{ echo "FAIL: release.yml missing '${keyword}'"; exit 1; }
echo "OK: release.yml contains '${keyword}'"
done
# Verify build workflow has SBOM attestation
test -f .github/workflows/build.yml || { echo "FAIL: build.yml missing"; exit 1; }
for keyword in "sbom-action" "cosign attest" "cyclonedx"; do
grep -q "${keyword}" .github/workflows/build.yml || \
{ echo "FAIL: build.yml missing '${keyword}'"; exit 1; }
echo "OK: build.yml contains '${keyword}'"
done
echo "=== Supply chain verification passed ==="
- name: Verify hermetic build readiness
run: |
echo "=== Checking build-services.sh hermetic controls ==="
SCRIPT="files/scripts/build-services.sh"
# Must have hermetic guard
grep -q "HERMETIC_BUILD" "$SCRIPT" || { echo "FAIL: no HERMETIC_BUILD guard"; exit 1; }
echo "OK: HERMETIC_BUILD guard present"
# Must have LLAMA_CPP_SHA256 checksum
grep -q "LLAMA_CPP_SHA256" "$SCRIPT" || { echo "FAIL: no LLAMA_CPP_SHA256"; exit 1; }
echo "OK: LLAMA_CPP_SHA256 present"
# Must have GOPROXY=off
grep -q "GOPROXY=off" "$SCRIPT" || { echo "FAIL: no GOPROXY=off"; exit 1; }
echo "OK: GOPROXY=off present"
# Must not have --clone in locate_source
if grep "locate_source.*--clone" "$SCRIPT"; then
echo "FAIL: locate_source still uses --clone"
exit 1
fi
echo "OK: no --clone in locate_source"
# Must not have dnf install
if grep -E "^\s*dnf\s+install" "$SCRIPT"; then
echo "FAIL: dnf install found"
exit 1
fi
echo "OK: no dnf install"
echo "=== Hermetic build readiness: PASSED ==="
- name: Verify upstreams manifest consistency
run: |
echo "=== Checking .upstreams.lock.yaml ==="
if [ ! -f .upstreams.lock.yaml ]; then
echo "WARNING: .upstreams.lock.yaml not found"
exit 0
fi
python3 -c "
import yaml, sys, os
with open('.upstreams.lock.yaml') as f:
data = yaml.safe_load(f)
upstreams = data.get('upstreams', {})
errors = 0
pending = 0
for name, entry in upstreams.items():
local_path = entry.get('local_path', '')
commit = str(entry.get('pinned_commit', ''))
# Check for PENDING entries
if 'PENDING' in commit:
pending += 1
# PENDING is a warning until at least one upstream is pinned.
# Once any entry has a real commit, PENDING on protected branches
# becomes a failure (enforces incremental migration).
print(f'WARN: {name} has PENDING commit')
continue
# Check local_path exists
if local_path and not os.path.isdir(local_path):
print(f'WARN: {name} local_path does not exist: {local_path}')
# Verify no subtrees under vendor/ (must be under upstreams/)
if local_path.startswith('vendor/') and not local_path.startswith('vendor/wheels'):
print(f'FAIL: {name} subtree under vendor/ — must use upstreams/')
errors += 1
print(f'Checked {len(upstreams)} upstream(s): {pending} pending, {errors} error(s)')
if errors > 0:
sys.exit(1)
"
echo "=== Upstreams manifest check: PASSED ==="
security-regression:
name: Security Regression Tests
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: "3.12"
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: "1.25"
- name: Install Python dependencies
run: pip install -r requirements-ci.txt
- name: Run adversarial Python tests
run: python -m pytest tests/test_adversarial.py -v --tb=short
- name: Run MCP firewall adversarial tests
working-directory: services/mcp-firewall
run: go test -v -race -run TestAdversarial ./...
- name: Run policy-engine adversarial tests
working-directory: services/policy-engine
run: go test -v -race -run TestAdversarial ./...
- name: Run incident-recorder recovery tests
working-directory: services/incident-recorder
run: go test -v -race -run "TestRecovery|TestEscalation|TestForensic|TestLatched" ./...
test-count-check:
name: Test Count Drift Check
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: "1.25"
- uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: "3.12"
- name: Install Python dependencies
run: pip install -r requirements-ci.txt
- name: Check test counts for drift
run: bash .github/scripts/check-test-counts.sh
dependency-audit:
name: Dependency Vulnerability Audit
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: "1.25"
- uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: "3.12"
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Go vulnerability scan (enforced)
run: |
echo "=== Go Dependency Vulnerability Scan ==="
VULN_ERRORS=0
for svc in airlock registry tool-firewall gpu-integrity-watch mcp-firewall \
policy-engine runtime-attestor integrity-monitor incident-recorder; do
echo "--- ${svc} ---"
cd "services/${svc}"
if ! govulncheck ./... 2>&1; then
VULN_ERRORS=$((VULN_ERRORS + 1))
echo "::error::${svc}: govulncheck found vulnerabilities"
fi
cd ../..
done
# Report results — count-based check with active waivers
# NOTE: This is a service-count check, not per-CVE matching.
# The release-gate job uses govulncheck-strict.sh for proper CVE-ID-level waivers.
if [ "$VULN_ERRORS" -gt 0 ]; then
WAIVED=$(python3 -c "
import json, datetime
with open('.github/vuln-waivers.json') as f:
data = json.load(f)
today = datetime.date.today().isoformat()
active = [w for w in data.get('go', []) if w.get('expires', '') >= today]
print(len(active))
")
if [ "$WAIVED" -gt 0 ]; then
echo "::warning::$VULN_ERRORS service(s) have Go vulnerabilities; $WAIVED waiver(s) active"
echo "Review findings and add per-CVE waivers to .github/vuln-waivers.json"
echo "Release branches use strict per-CVE matching via govulncheck-strict.sh"
else
echo "FAIL: $VULN_ERRORS service(s) have Go vulnerabilities with no waivers"
echo "To waive a reviewed finding, add it to .github/vuln-waivers.json"
exit 1
fi
else
echo "OK: Go vulnerability scan passed (0 findings)"
fi
- name: Python dependency audit (enforced)
run: |
pip install -r requirements-ci.txt
echo "=== Python Dependency Audit ==="
# Run pip-audit, capture output
pip-audit --strict --desc -f json -o /tmp/pip-audit.json 2>/dev/null || true
python3 -c "
import json, sys, datetime
# Load audit results
try:
with open('/tmp/pip-audit.json') as f:
data = json.load(f)
except (FileNotFoundError, json.JSONDecodeError):
print('OK: pip-audit produced no findings')
sys.exit(0)
vulns = data if isinstance(data, list) else data.get('dependencies', [])
findings = [d for d in vulns if d.get('vulns')]
if not findings:
print('OK: no Python dependency vulnerabilities')
sys.exit(0)
# Load waivers
with open('.github/vuln-waivers.json') as f:
waivers = json.load(f)
today = datetime.date.today().isoformat()
waived_ids = {w['id'] for w in waivers.get('python', []) if w.get('expires', '') >= today}
unwaived = 0
for dep in findings:
for v in dep.get('vulns', []):
vid = v.get('id', '')
if vid in waived_ids:
print(f'WAIVED: {dep[\"name\"]} {vid}')
else:
print(f'::error::{dep[\"name\"]}: {vid} — {v.get(\"description\", \"\")}')
unwaived += 1
if unwaived > 0:
print(f'FAIL: {unwaived} unwaived Python vulnerability finding(s)')
print('To waive a reviewed finding, add it to .github/vuln-waivers.json')
sys.exit(1)
print(f'OK: all Python findings waived ({len(waived_ids)} waiver(s) active)')
"
docs-validation:
name: Documentation Validation
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Check for broken internal links
run: |
echo "=== Checking internal doc links ==="
ERRORS=0
# Find all markdown links to local files
for md in $(find docs/ README.md CONTRIBUTING.md SECURITY.md -name '*.md' 2>/dev/null); do
# Extract relative links (not URLs, not anchors)
grep -oP '\[([^\]]*)\]\((?!https?://|#)([^)]+)\)' "$md" 2>/dev/null | \
grep -oP '\(([^)]+)\)' | tr -d '()' | while read -r link; do
# Strip anchor fragments
target="${link%%#*}"
[ -z "$target" ] && continue
# Resolve relative to file's directory
dir=$(dirname "$md")
resolved="${dir}/${target}"
if [ ! -f "$resolved" ] && [ ! -d "$resolved" ]; then
echo "BROKEN: ${md} -> ${link} (resolved: ${resolved})"
ERRORS=$((ERRORS + 1))
fi
done
done
if [ "$ERRORS" -gt 0 ]; then
echo "FAIL: ${ERRORS} broken internal links found"
exit 1
fi
echo "OK: All internal doc links valid"
- name: Verify required docs exist
run: |
echo "=== Checking required documentation ==="
REQUIRED_DOCS=(
"docs/threat-model.md"
"docs/architecture.md"
"docs/api.md"
"docs/security-status.md"
"docs/production-operations.md"
"docs/production-readiness-checklist.md"
"docs/slos.md"
"docs/release-policy.md"
"docs/support-lifecycle.md"
"docs/test-counts.json"
"docs/install/bare-metal.md"
"SECURITY.md"
"CONTRIBUTING.md"
"LICENSE"
)
ERRORS=0
for doc in "${REQUIRED_DOCS[@]}"; do
if [ -f "$doc" ]; then
echo "OK: $doc"
else
echo "MISSING: $doc"
ERRORS=$((ERRORS + 1))
fi
done
if [ "$ERRORS" -gt 0 ]; then
echo "FAIL: ${ERRORS} required document(s) missing"
exit 1
fi
echo "All required documents present"
- name: Validate test-counts.json format
run: |
python3 -c "
import json, sys
with open('docs/test-counts.json') as f:
data = json.load(f)
required = ['generated', 'go', 'go_total', 'python_total', 'grand_total']
for key in required:
if key not in data:
print(f'FAIL: test-counts.json missing key: {key}')
sys.exit(1)
if not isinstance(data['go'], dict):
print('FAIL: go field must be a dict of service -> count')
sys.exit(1)
print(f'OK: test-counts.json valid (total: {data[\"grand_total\"]} tests)')
"
- name: Cross-check milestone counts
run: |
python3 -c "
import re, sys
# Count milestone rows in security-status.md
with open('docs/security-status.md') as f:
status_content = f.read()
status_rows = len(re.findall(r'^\|.*\| Implemented \|', status_content, re.MULTILINE))
# Count milestone references in README.md
with open('README.md') as f:
readme_content = f.read()
# Find 'All NN project milestones' text
readme_match = re.search(r'All (\d+) project milestones', readme_content)
if not readme_match:
print('FAIL: README.md missing \"All N project milestones\" text')
sys.exit(1)
readme_count = int(readme_match.group(1))
# Count actual checkbox items
checkbox_count = len(re.findall(r'- \[x\] \*\*Milestone \d+\*\*', readme_content))
# Check 'all NN milestones' prose reference
status_ref = re.search(r'all (\d+) milestones', readme_content)
status_ref_count = int(status_ref.group(1)) if status_ref else readme_count
errors = 0
if readme_count != status_rows:
print(f'FAIL: README says {readme_count} milestones, security-status.md has {status_rows} rows')
errors += 1
if checkbox_count != readme_count:
print(f'FAIL: README says {readme_count} milestones, but has {checkbox_count} checkboxes')
errors += 1
if status_ref_count != status_rows:
print(f'FAIL: README reference ({status_ref_count}) != security-status rows ({status_rows})')
errors += 1
if errors:
sys.exit(1)
print(f'OK: Milestone counts consistent ({status_rows} milestones)')
"
- name: Verify m5-control-matrix test references
run: |
python3 -c "
import re, sys, glob
with open('docs/m5-control-matrix.md') as f:
matrix = f.read()
# Extract backtick-quoted test names (Test* for Go, test_* for Python)
test_refs = set(re.findall(r'\x60((?:Test|test_)\w+)\x60', matrix))
print(f'Found {len(test_refs)} test name references in m5-control-matrix.md')
# Collect Go test function names
go_tests = set()
for gofile in glob.glob('services/**/*_test.go', recursive=True):
with open(gofile) as f:
for m in re.finditer(r'func (Test\w+)\(', f.read()):
go_tests.add(m.group(1))
# Collect Python test method and class names
py_tests = set()
for pyfile in glob.glob('tests/test_*.py'):
with open(pyfile) as f:
content = f.read()
for m in re.finditer(r'def (test_\w+)\(', content):
py_tests.add(m.group(1))
for m in re.finditer(r'class (Test\w+):', content):
py_tests.add(m.group(1))
all_tests = go_tests | py_tests
missing = []
for ref in sorted(test_refs):
if ref not in all_tests:
print(f'MISSING: {ref} cited in m5-control-matrix.md but not found')
missing.append(ref)
if missing:
print(f'FAIL: {len(missing)} test reference(s) in m5-control-matrix.md not found')
sys.exit(1)
print(f'OK: All {len(test_refs)} test references verified')
"
- name: Check test-counts.json staleness
run: |
python3 -c "
import json, glob, re
with open('docs/test-counts.json') as f:
data = json.load(f)
documented = data.get('grand_total', 0)
# Estimate actual test count from source files
py_count = 0
for f in glob.glob('tests/test_*.py'):
with open(f) as fh:
py_count += len(re.findall(r'def test_\w+', fh.read()))
go_count = 0
for f in glob.glob('services/**/*_test.go', recursive=True):
with open(f) as fh:
go_count += len(re.findall(r'func Test\w+', fh.read()))
estimated = py_count + go_count
drift = estimated - documented
STALE_THRESHOLD = 20
if drift > STALE_THRESHOLD:
print(f'::warning::test-counts.json may be stale: ~{estimated} tests vs documented {documented} (drift: +{drift})')
print('Consider updating docs/test-counts.json')
elif drift > 0:
print(f'NOTE: ~{drift} undocumented test(s) detected (within threshold)')
else:
print(f'OK: test-counts.json is current (documented: {documented})')
"
- name: Verify release-artifacts.json consistency
run: |
python3 -c "
import json, re, sys
with open('docs/release-artifacts.json') as f:
artifacts = json.load(f)
# Verify Go services in artifacts.json match release.yml matrix
with open('.github/workflows/release.yml') as f:
release = f.read()
# Extract matrix services from release.yml
matrix_match = re.search(r'service: \[([^\]]+)\]', release)
if not matrix_match:
print('FAIL: cannot find service matrix in release.yml')
sys.exit(1)
release_services = sorted(s.strip() for s in matrix_match.group(1).split(','))
artifact_services = sorted(artifacts['go_services'])
if release_services != artifact_services:
print(f'FAIL: release.yml services {release_services} != release-artifacts.json {artifact_services}')
sys.exit(1)
# Verify schema version
if artifacts.get('schema_version') != 1:
print('FAIL: release-artifacts.json schema_version must be 1')
sys.exit(1)
# Verify canonical image ref
canonical = artifacts.get('canonical_image_ref', '')
if 'sec_ai' in canonical:
print(f'FAIL: canonical_image_ref contains wrong namespace: {canonical}')
sys.exit(1)
print(f'OK: release-artifacts.json consistent ({len(release_services)} Go services, {len(artifacts[\"python_services\"])} Python services)')
"
release-gate:
name: Release Branch Hardened Gate
if: startsWith(github.ref, 'refs/heads/release/') || github.ref == 'refs/heads/stable'
needs:
- go-build-and-test
- python-test
- security-regression
- dependency-audit
- test-count-check
- docs-validation
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: "1.25"
- uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: "3.12"
- name: Install dependencies
run: |
pip install -r requirements-ci.txt
go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Bandit (release-strict)
run: |
# On release branches: fail on ANY high-severity finding regardless of confidence
bandit -r services/ -ll --skip B101,B404,B603 -f json -o /tmp/bandit-release.json || true
python3 -c "
import json, sys
with open('/tmp/bandit-release.json') as f:
data = json.load(f)
high = [r for r in data.get('results', [])
if r['issue_severity'] == 'HIGH']
critical = [r for r in data.get('results', [])
if r.get('issue_severity') == 'CRITICAL']
for r in high + critical:
print(f'::error ::{r[\"filename\"]}:{r[\"line_number\"]}: [{r[\"issue_severity\"]}] {r[\"issue_text\"]}')
if high or critical:
print(f'RELEASE GATE FAIL: {len(high)} HIGH + {len(critical)} CRITICAL finding(s)')
print('Release branches require zero HIGH/CRITICAL bandit findings at any confidence level.')
sys.exit(1)
print('OK: release-strict bandit passed')
"
- name: govulncheck (release-strict, CVE-ID waivers)
run: bash .github/scripts/govulncheck-strict.sh
- name: M5 acceptance suite (re-run)
env:
PYTHONPATH: services
run: python -m pytest tests/test_m5_acceptance.py -v --tb=short
- name: Release gate summary
run: |
echo "## Release Gate: PASSED" >> "$GITHUB_STEP_SUMMARY"
echo "Branch: ${{ github.ref_name }}" >> "$GITHUB_STEP_SUMMARY"
echo "All hardened checks passed for release branch." >> "$GITHUB_STEP_SUMMARY"