From 5198c9b6a74be44e69169c19c08abbeb283f8ec7 Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 10 Apr 2026 18:48:12 +0000 Subject: [PATCH] Add integration tests, security/QAT workflows, and SDK framework README - tests/unit/: 55 unit tests for email validation, schedule helpers, CSV parsing - tests/integration/: 71 integration tests covering DAST request script, blacklist script, bash syntax validation, shellcheck, XML API scripts, and live API connectivity (credential-gated) - tests/fixtures/: allowlist, blacklist, glblacklist CSV fixtures for test runs - pytest.ini, requirements-test.txt: test runner configuration - .github/workflows/integration-tests.yml: unit + integration + shell + API tests, split into jobs with artifact uploads and optional live API job on main - .github/workflows/security-scan.yml: Bandit, ShellCheck, Gitleaks, pip-audit, Semgrep, and credentials-file checker; scheduled weekly - .github/workflows/qat.yml: flake8, ShellCheck lint, JSON/YAML validation, PSScriptAnalyzer on Windows, full test suite with result publishing - README.md: rewritten with badge table, SDK framework overview, API reference, quick-start, test docs, and secrets guide - .gitignore: excludes __pycache__, credentials, test artifacts, coverage files https://claude.ai/code/session_015pBhzcxzBhLcAujgXrwsaz --- .github/workflows/integration-tests.yml | 184 ++++++++++++ .github/workflows/qat.yml | 263 ++++++++++++++++++ .github/workflows/security-scan.yml | 207 ++++++++++++++ .gitignore | 40 +++ README.md | 202 +++++++++++++- pytest.ini | 11 + requirements-test.txt | 3 + tests/conftest.py | 49 ++++ tests/fixtures/allowlist.csv | 5 + tests/fixtures/blacklist.csv | 4 + tests/fixtures/glblacklist.csv | 3 + tests/integration/__init__.py | 0 tests/integration/test_api_connectivity.py | 143 ++++++++++ tests/integration/test_blacklist_script.py | 128 +++++++++ .../test_dast_web_request_script.py | 190 +++++++++++++ tests/integration/test_shell_scripts.py | 223 +++++++++++++++ tests/unit/__init__.py | 0 tests/unit/test_csv_parsing.py | 177 ++++++++++++ tests/unit/test_email_validation.py | 69 +++++ tests/unit/test_schedule_helpers.py | 137 +++++++++ 20 files changed, 2028 insertions(+), 10 deletions(-) create mode 100644 .github/workflows/integration-tests.yml create mode 100644 .github/workflows/qat.yml create mode 100644 .github/workflows/security-scan.yml create mode 100644 .gitignore create mode 100644 pytest.ini create mode 100644 requirements-test.txt create mode 100644 tests/conftest.py create mode 100644 tests/fixtures/allowlist.csv create mode 100644 tests/fixtures/blacklist.csv create mode 100644 tests/fixtures/glblacklist.csv create mode 100644 tests/integration/__init__.py create mode 100644 tests/integration/test_api_connectivity.py create mode 100644 tests/integration/test_blacklist_script.py create mode 100644 tests/integration/test_dast_web_request_script.py create mode 100644 tests/integration/test_shell_scripts.py create mode 100644 tests/unit/__init__.py create mode 100644 tests/unit/test_csv_parsing.py create mode 100644 tests/unit/test_email_validation.py create mode 100644 tests/unit/test_schedule_helpers.py diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml new file mode 100644 index 0000000..d8b0552 --- /dev/null +++ b/.github/workflows/integration-tests.yml @@ -0,0 +1,184 @@ +name: Integration Tests + +on: + push: + branches: + - main + - "claude/**" + pull_request: + branches: + - main + +jobs: + # ── Unit tests: pure logic, no external dependencies ─────────────────────── + unit-tests: + name: Unit Tests (Python) + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: pip + + - name: Install test dependencies + run: pip install -r requirements-test.txt + + - name: Run unit tests + run: pytest tests/unit/ -v --tb=short --junit-xml=unit-test-results.xml + + - name: Upload unit test results + uses: actions/upload-artifact@v4 + if: always() + with: + name: unit-test-results + path: unit-test-results.xml + + # ── Integration tests: scripts invoked end-to-end (no API creds needed) ─── + python-integration-tests: + name: Python Script Integration Tests + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: pip + + - name: Install test dependencies + run: pip install -r requirements-test.txt + + - name: Run Python integration tests + run: | + pytest tests/integration/ \ + --ignore=tests/integration/test_api_connectivity.py \ + --ignore=tests/integration/test_shell_scripts.py \ + -v --tb=short \ + --junit-xml=integration-test-results.xml + + - name: Upload integration test results + uses: actions/upload-artifact@v4 + if: always() + with: + name: integration-test-results + path: integration-test-results.xml + + # ── Shell script integration tests (bash syntax + shellcheck) ───────────── + shell-integration-tests: + name: Shell Script Tests + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install shellcheck + run: sudo apt-get update -q && sudo apt-get install -y shellcheck + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install test dependencies + run: pip install -r requirements-test.txt + + - name: Run shell script tests + run: | + pytest tests/integration/test_shell_scripts.py \ + -v --tb=short \ + --junit-xml=shell-test-results.xml + + - name: Upload shell test results + uses: actions/upload-artifact@v4 + if: always() + with: + name: shell-test-results + path: shell-test-results.xml + + # ── Combined coverage report ─────────────────────────────────────────────── + coverage: + name: Test Coverage + runs-on: ubuntu-latest + needs: [unit-tests, python-integration-tests] + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install dependencies + run: pip install -r requirements-test.txt + + - name: Run tests with coverage + run: | + pytest tests/ \ + --ignore=tests/integration/test_api_connectivity.py \ + --cov=Scripts \ + --cov-report=xml \ + --cov-report=term-missing \ + -q + continue-on-error: true + + - name: Upload coverage report + uses: actions/upload-artifact@v4 + with: + name: coverage-report + path: coverage.xml + + # ── Live API connectivity tests (main branch + secrets only) ─────────────── + api-connectivity-tests: + name: Veracode API Connectivity + runs-on: ubuntu-latest + if: > + github.event_name == 'push' && + github.ref == 'refs/heads/main' && + secrets.VERACODE_API_ID != '' + environment: veracode-integration + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install dependencies + run: | + pip install -r requirements-test.txt + pip install veracode-api-signing 2>/dev/null || true + + - name: Configure Veracode credentials + run: | + mkdir -p ~/.veracode + printf '[default]\nveracode_api_key_id = %s\nveracode_api_key_secret = %s\n' \ + "$VERACODE_API_ID" "$VERACODE_API_KEY" > ~/.veracode/credentials + env: + VERACODE_API_ID: ${{ secrets.VERACODE_API_ID }} + VERACODE_API_KEY: ${{ secrets.VERACODE_API_KEY }} + + - name: Run API connectivity tests + env: + VERACODE_API_ID: ${{ secrets.VERACODE_API_ID }} + VERACODE_API_KEY: ${{ secrets.VERACODE_API_KEY }} + run: | + pytest tests/integration/test_api_connectivity.py \ + -m api -v --tb=short \ + --junit-xml=api-test-results.xml + continue-on-error: true + + - name: Upload API test results + uses: actions/upload-artifact@v4 + if: always() + with: + name: api-test-results + path: api-test-results.xml diff --git a/.github/workflows/qat.yml b/.github/workflows/qat.yml new file mode 100644 index 0000000..f1c28e0 --- /dev/null +++ b/.github/workflows/qat.yml @@ -0,0 +1,263 @@ +name: QAT (Quality Assurance Testing) + +on: + push: + branches: + - main + - "claude/**" + pull_request: + branches: + - main + +jobs: + # ── Python linting (flake8) ──────────────────────────────────────────────── + python-lint: + name: Python Lint (flake8) + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install flake8 + run: pip install flake8 + + - name: Run flake8 on Release scripts + run: | + flake8 Scripts/Release/ \ + --max-line-length=120 \ + --extend-ignore=E501,W503,E302,E303 \ + --statistics \ + --count \ + --format=default + continue-on-error: true + + - name: Run flake8 on Dev Python scripts + run: | + flake8 Scripts/Dev/ \ + --max-line-length=120 \ + --extend-ignore=E501,W503,E302,E303,F401,F811 \ + --exclude=Scripts/Dev/Reference,Scripts/Dev/archive \ + --statistics \ + --count + continue-on-error: true + + - name: Run flake8 on tests + run: | + flake8 tests/ \ + --max-line-length=120 \ + --extend-ignore=E501,W503 \ + --statistics \ + --count + + # ── Shell script linting (ShellCheck) ───────────────────────────────────── + shell-lint: + name: Shell Lint (ShellCheck) + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install ShellCheck + run: sudo apt-get update -q && sudo apt-get install -y shellcheck + + - name: Lint Release shell scripts + run: | + echo "=== Linting Release scripts ===" + find Scripts/Release/ -name "*.sh" -print0 | \ + xargs -0 shellcheck --severity=warning --format=tty + continue-on-error: true + + - name: Lint Dev bash scripts + run: | + echo "=== Linting Dev bash scripts ===" + find Scripts/Dev/bash_scripts/ -name "*.sh" -print0 | \ + xargs -0 shellcheck --severity=warning --format=tty + continue-on-error: true + + - name: Lint XML API scripts + run: | + echo "=== Linting XML API scripts ===" + find xml_api_calls/ -name "*.sh" -print0 | \ + xargs -0 shellcheck --severity=warning --format=tty + continue-on-error: true + + # ── JSON validation ──────────────────────────────────────────────────────── + json-validation: + name: JSON File Validation + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Validate all JSON files + run: | + FAILED=0 + while IFS= read -r -d '' file; do + if python3 -c "import json; json.load(open('$file'))" 2>/dev/null; then + echo "PASS: $file" + else + echo "FAIL: $file" + python3 -c "import json; json.load(open('$file'))" 2>&1 || true + FAILED=1 + fi + done < <(find . \ + -name "*.json" \ + -not -path "./.git/*" \ + -not -path "./Scripts/Dev/Test/results.json" \ + -not -path "./Scripts/Dev/Test/filtered_results.json" \ + -not -path "./Scripts/results.json" \ + -not -path "./Scripts/Dev/Reference/*" \ + -print0) + + if [ "$FAILED" -eq 1 ]; then + echo "One or more JSON files failed validation" + exit 1 + fi + echo "All JSON files passed validation" + + # ── YAML validation ──────────────────────────────────────────────────────── + yaml-validation: + name: YAML Validation + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install yamllint + run: pip install yamllint + + - name: Validate GitHub workflow YAML files + run: | + yamllint .github/workflows/ \ + -d "{extends: relaxed, rules: {line-length: {max: 120}}}" + + # ── Full test suite execution ────────────────────────────────────────────── + full-test-suite: + name: Full Test Suite + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install ShellCheck (for shell tests) + run: sudo apt-get update -q && sudo apt-get install -y shellcheck + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: pip + + - name: Install test dependencies + run: pip install -r requirements-test.txt + + - name: Run full test suite (excluding API tests) + run: | + pytest tests/ \ + --ignore=tests/integration/test_api_connectivity.py \ + -v \ + --tb=long \ + --junit-xml=qat-test-results.xml \ + -q + + - name: Upload QAT test results + uses: actions/upload-artifact@v4 + if: always() + with: + name: qat-test-results + path: qat-test-results.xml + + - name: Publish test results summary + uses: EnricoMi/publish-unit-test-result-action@v2 + if: always() + with: + files: qat-test-results.xml + check_name: "QAT Test Results" + continue-on-error: true + + # ── PowerShell script analysis ───────────────────────────────────────────── + powershell-analysis: + name: PowerShell Script Analysis + runs-on: windows-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install PSScriptAnalyzer + shell: pwsh + run: Install-Module -Name PSScriptAnalyzer -Force -Scope CurrentUser + + - name: Analyze PowerShell scripts + shell: pwsh + run: | + $scripts = Get-ChildItem -Path "Scripts" -Filter "*.ps1" -Recurse + $hasErrors = $false + foreach ($script in $scripts) { + Write-Host "Analyzing: $($script.FullName)" + $results = Invoke-ScriptAnalyzer -Path $script.FullName -Severity Warning,Error + if ($results) { + $results | Format-Table -AutoSize + $hasErrors = $true + } else { + Write-Host "PASS: $($script.Name)" + } + } + if ($hasErrors) { + Write-Warning "PSScriptAnalyzer found issues. Review output above." + } + continue-on-error: true + + # ── Script structure and metadata checks ────────────────────────────────── + structure-checks: + name: Script Structure Checks + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Check Python scripts have shebangs or module docstrings + run: | + echo "Checking Python script headers..." + MISSING=0 + for f in Scripts/Release/*.py; do + if ! head -5 "$f" | grep -qE '^#!|^#|^"""'; then + echo "WARNING: $f may be missing a shebang or docstring header" + MISSING=$((MISSING + 1)) + fi + done + echo "Files checked. $MISSING potential header issues found." + continue-on-error: true + + - name: Check shell scripts have shebangs + run: | + echo "Checking shell script shebangs..." + MISSING=0 + for f in Scripts/Release/*.sh; do + if ! head -1 "$f" | grep -q '^#!'; then + echo "WARNING: $f is missing a shebang line" + MISSING=$((MISSING + 1)) + fi + done + echo "Files checked. $MISSING shebangs missing." + continue-on-error: true + + - name: Verify test fixtures exist and are non-empty + run: | + echo "Verifying test fixtures..." + for f in tests/fixtures/allowlist.csv tests/fixtures/blacklist.csv tests/fixtures/glblacklist.csv; do + if [ ! -s "$f" ]; then + echo "FAIL: $f is missing or empty" + exit 1 + fi + echo "PASS: $f" + done diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml new file mode 100644 index 0000000..3ee742e --- /dev/null +++ b/.github/workflows/security-scan.yml @@ -0,0 +1,207 @@ +name: Security Scan + +on: + push: + branches: + - main + - "claude/**" + pull_request: + branches: + - main + schedule: + # Run weekly on Monday at 08:00 UTC + - cron: "0 8 * * 1" + +permissions: + contents: read + security-events: write + actions: read + +jobs: + # ── Python static security analysis (Bandit) ────────────────────────────── + python-bandit: + name: Python Security Analysis (Bandit) + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install Bandit + run: pip install bandit[toml] + + - name: Run Bandit (high + medium severity) + run: | + bandit -r Scripts/ \ + -f json -o bandit-report.json \ + -ll \ + --exclude Scripts/Dev/Reference,Scripts/src/bin \ + || true + + - name: Print Bandit summary + run: | + bandit -r Scripts/ \ + -f txt \ + -ll \ + --exclude Scripts/Dev/Reference,Scripts/src/bin \ + || true + + - name: Upload Bandit report + uses: actions/upload-artifact@v4 + if: always() + with: + name: bandit-security-report + path: bandit-report.json + + # ── Shell script security analysis (ShellCheck) ─────────────────────────── + shell-shellcheck: + name: Shell Security Analysis (ShellCheck) + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Run ShellCheck on Release scripts + uses: ludeeus/action-shellcheck@master + with: + scandir: "./Scripts/Release" + severity: warning + format: tty + continue-on-error: true + + - name: Run ShellCheck on Dev bash scripts + uses: ludeeus/action-shellcheck@master + with: + scandir: "./Scripts/Dev/bash_scripts" + severity: warning + format: tty + continue-on-error: true + + - name: Run ShellCheck on XML API scripts + uses: ludeeus/action-shellcheck@master + with: + scandir: "./xml_api_calls" + severity: warning + format: tty + continue-on-error: true + + # ── Secret scanning (Gitleaks) ──────────────────────────────────────────── + secret-scanning: + name: Secret Scanning (Gitleaks) + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Run Gitleaks + uses: gitleaks/gitleaks-action@v2 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + continue-on-error: true + + # ── Python dependency audit (pip-audit) ─────────────────────────────────── + dependency-audit: + name: Dependency Vulnerability Audit (pip-audit) + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install pip-audit + run: pip install pip-audit + + - name: Audit test dependencies + run: | + pip install -r requirements-test.txt + pip-audit --desc on 2>/dev/null || true + + - name: Audit all Python files for unsafe imports + run: | + echo "Scanning Python scripts for known vulnerable patterns..." + grep -rn "import pickle\|eval(\|exec(\|subprocess.call.*shell=True" \ + Scripts/ --include="*.py" || echo "No flagged patterns found." + + # ── Semgrep SAST ────────────────────────────────────────────────────────── + semgrep: + name: Semgrep SAST + runs-on: ubuntu-latest + if: github.actor != 'dependabot[bot]' + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python for Semgrep + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install Semgrep + run: pip install semgrep + + - name: Run Semgrep on Python scripts + run: | + semgrep --config p/python \ + --config p/security-audit \ + Scripts/ \ + --json -o semgrep-report.json \ + || true + semgrep --config p/python \ + --config p/security-audit \ + Scripts/ \ + --text \ + || true + + - name: Upload Semgrep report + uses: actions/upload-artifact@v4 + if: always() + with: + name: semgrep-report + path: semgrep-report.json + + # ── Credentials file check ───────────────────────────────────────────────── + credentials-check: + name: Credentials File Check + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Check for committed credential files + run: | + echo "Checking for credential files that should not be committed..." + FOUND=0 + + # Check for .veracode/credentials files (except in test fixtures) + if find . -path "./.git" -prune -o \ + -name "credentials" -path "*/.veracode/*" \ + -not -path "*/Dev/Test/*" -print | grep -q .; then + echo "WARNING: .veracode/credentials file found outside of test fixtures" + find . -path "./.git" -prune -o \ + -name "credentials" -path "*/.veracode/*" \ + -not -path "*/Dev/Test/*" -print + FOUND=1 + fi + + # Check for files with hardcoded API key patterns + if grep -rn "veracode_api_key_id\s*=\s*[a-z0-9]\{8\}" \ + --include="*.sh" --include="*.py" --include="*.ps1" \ + Scripts/ 2>/dev/null; then + echo "WARNING: Possible hardcoded API key ID found" + FOUND=1 + fi + + if [ "$FOUND" -eq 0 ]; then + echo "No credential issues detected." + fi + continue-on-error: true diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..7d54dac --- /dev/null +++ b/.gitignore @@ -0,0 +1,40 @@ +# Python +__pycache__/ +*.pyc +*.pyo +*.pyd +.Python +*.egg-info/ +dist/ +build/ +.eggs/ + +# Test outputs +.pytest_cache/ +.coverage +coverage.xml +htmlcov/ +*-test-results.xml +bandit-report.json +semgrep-report.json + +# Virtual environments +.venv/ +venv/ +env/ + +# IDE +.vscode/settings.json +.idea/ + +# OS +.DS_Store +Thumbs.db + +# Credentials (never commit) +.veracode/credentials + +# Generated scan outputs +input.json +pipeline-scan-LATEST.zip +pipeline-scan.jar diff --git a/README.md b/README.md index 4b9e8ba..5f1d441 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,205 @@ -# Veracode-scripts # +# Veracode Security Testing SDK Framework -## General ## -This script is to be used as a interactive installer and agent for making differn't types of calls to Veracode's APIs and an interactive installer for setting up and configuring your Veracode SAST Products and SCA products. +[![Integration Tests](https://github.com/bnreplah/veracode-scripts/actions/workflows/integration-tests.yml/badge.svg)](https://github.com/bnreplah/veracode-scripts/actions/workflows/integration-tests.yml) +[![Security Scan](https://github.com/bnreplah/veracode-scripts/actions/workflows/security-scan.yml/badge.svg)](https://github.com/bnreplah/veracode-scripts/actions/workflows/security-scan.yml) +[![QAT](https://github.com/bnreplah/veracode-scripts/actions/workflows/qat.yml/badge.svg)](https://github.com/bnreplah/veracode-scripts/actions/workflows/qat.yml) +--- -## Includes ## -- Veracode REST and XML API calls -- Veracode REST conversion to XML API output -- SRM (Scriptable Request Modification) API Specification Configuration https://docs.veracode.com/r/Example_Script_for_Scriptable_Request_Modification_Authentication?tocId=GxBzVtHR5GnF~kPAmh0MNw +## Overview -## Use ## +A comprehensive SDK and tooling framework for Veracode application security testing. +It wraps the Veracode REST and XML APIs with helper scripts, automated analysis utilities, and an installation framework — targeting SAST, DAST, SCA, container security, and misconfiguration detection from a single, unified toolset. +### Goals +- **Correlate findings** across DAST, SAST, SCA, container findings, and security misconfigurations +- **Directed analysis** — surface overlapping data paths, common flaw sources in static analysis, and cross-scan vulnerability patterns +- **Automated recommendations** — link findings to security training modules and remediation guidance +- **Cross-platform installation** — thin installer in Bash (`.sh`), PowerShell (`.ps1`), and Go (`.exe`) to bootstrap the full toolset +--- -## Commands ## +## Workflows & Badges +| Workflow | Description | Badge | +|---|---|---| +| **Integration Tests** | Unit tests + Python script integration tests + shell script syntax/shellcheck | [![Integration Tests](https://github.com/bnreplah/veracode-scripts/actions/workflows/integration-tests.yml/badge.svg)](https://github.com/bnreplah/veracode-scripts/actions/workflows/integration-tests.yml) | +| **Security Scan** | Bandit (Python), ShellCheck, Gitleaks secret scanning, pip-audit, Semgrep SAST | [![Security Scan](https://github.com/bnreplah/veracode-scripts/actions/workflows/security-scan.yml/badge.svg)](https://github.com/bnreplah/veracode-scripts/actions/workflows/security-scan.yml) | +| **QAT** | flake8 linting, ShellCheck lint, JSON/YAML validation, PSScriptAnalyzer, full test suite | [![QAT](https://github.com/bnreplah/veracode-scripts/actions/workflows/qat.yml/badge.svg)](https://github.com/bnreplah/veracode-scripts/actions/workflows/qat.yml) | +--- +## What's Included +### Scripts/Release — Production-Ready +| Script | Type | Description | +|---|---|---| +| `DASTWebAppRequest-std.py` | Python | Format and submit Dynamic Web App scan requests from CLI or piped JSON | +| `BlackList-std.py` | Python | Build DAST blocklist/scan settings from CSV files | +| `DAST-ls-v2.sh` | Bash | List DAST scans and results with pagination and verbose reporting | +| `DAST-rescan.sh` | Bash | Trigger rescans for Dynamic Analysis | +| `SearchBuildByName.sh` | Bash | Search Veracode application builds by name across apps | +| `vdb-purl-lte.sh` | Bash | Veracode vulnerability DB PURL lookup (lite) | +| `veracode-installer.sh` | Bash | Install and configure Veracode CLI tooling | -A working repository of custom script integrations for veracode +### Scripts/Dev — In Development + +| Directory | Contents | +|---|---| +| `DASTFramework/` | Modular DAST configuration framework (request builder, status polling, hooks, API scan) | +| `DBlookup/` | CPE/PURL vulnerability database lookup utilities | +| `bash_scripts/` | SCA library search, pipeline scan, sandbox promotion, upload scripts | +| `ps_scripts/` | PowerShell scripts: Java API wrapper management, scan status monitoring | +| `Test/` | Test scripts, sample data, and debugging utilities | + +### xml_api_calls — Legacy XML API + +Sequential workflow scripts for the Veracode XML API: +`0_getapplist` → `1_getapplist` → `2_getbuildlist` → `3_getsandboxlist` → `4_detailedreport` + +--- + +## Veracode APIs Used + +| API | Purpose | +|---|---| +| **Dynamic Analysis REST API** | DAST scan creation, configuration, scheduling, status | +| **Upload/Results API (XML)** | SAST scan submission, build management, detailed reports | +| **SCA REST API** | Workspace/project scanning, library/dependency findings | +| **Identity API** | Team and user management (replacing deprecated XML Admin API) | +| **Pipeline Scan API** | CI/CD integrated scanning with pre-scan file size checks | +| **Veracode CLI** | Modern CLI wrapper for SAST, SCA, and SBOM generation | + +--- + +## Authentication + +Credentials can be supplied via: + +1. **Credentials file** — `~/.veracode/credentials`: + ```ini + [default] + veracode_api_key_id = YOUR_API_ID + veracode_api_key_secret = YOUR_API_KEY + ``` + +2. **Environment variables**: + ```bash + export VERACODE_API_ID="your-api-id" + export VERACODE_API_KEY="your-api-key" + ``` + +3. **SCA Agent token** (for `srcclr`): + ```bash + export SRCCLR_API_TOKEN="your-token" + ``` + +--- + +## Quick Start + +### Install Veracode Tooling +```bash +# Install Veracode CLI +bash Scripts/Release/veracode-installer.sh --force-install-vccli + +# Install SCA CLI agent +bash Scripts/Release/veracode-installer.sh --install-sca-cli + +# Install Java API Wrapper +bash Scripts/Release/veracode-installer.sh --install-java-api-wrapper + +# Install Pipeline Scanner +bash Scripts/Release/veracode-installer.sh --install-pipeline-scanner +``` + +### Create a DAST Analysis Request +```bash +# Interactive mode +python Scripts/Release/DASTWebAppRequest-std.py + +# Non-interactive / pipe mode (stdout JSON for use with http or curl) +python Scripts/Release/DASTWebAppRequest-std.py \ + "My-App-Scan" \ + "https://target.example.com/" \ + "owner@company.com" \ + "Security Team" \ + | http POST "https://api.veracode.com/was/configservice/v1/analyses" \ + --auth-type=veracode_hmac +``` + +### List DAST Scans +```bash +bash Scripts/Release/DAST-ls-v2.sh +``` + +### SCA Library Search +```bash +bash Scripts/Dev/bash_scripts/SCA-Library-ProjectSearch.sh "log4j" +``` + +--- + +## Running Tests + +```bash +# Install test dependencies +pip install -r requirements-test.txt + +# Run all unit tests (no credentials needed) +pytest tests/unit/ -v + +# Run all integration tests (no credentials needed) +pytest tests/integration/ --ignore=tests/integration/test_api_connectivity.py -v + +# Run full suite +pytest tests/ --ignore=tests/integration/test_api_connectivity.py -v + +# Run with coverage +pytest tests/ --ignore=tests/integration/test_api_connectivity.py \ + --cov=Scripts --cov-report=term-missing + +# Run live API connectivity tests (requires credentials) +pytest tests/integration/test_api_connectivity.py -m api -v +``` + +### Test Structure + +``` +tests/ +├── conftest.py # Shared fixtures (tmp_work_dir, paths) +├── fixtures/ +│ ├── allowlist.csv # DAST allowlist test fixture +│ ├── blacklist.csv # DAST blocklist test fixture +│ └── glblacklist.csv # Global blocklist test fixture +├── unit/ +│ ├── test_email_validation.py # Email regex validation logic +│ ├── test_schedule_helpers.py # Scan schedule helper functions +│ └── test_csv_parsing.py # CSV → JSON parsing logic +└── integration/ + ├── test_dast_web_request_script.py # DASTWebAppRequest-std.py end-to-end + ├── test_blacklist_script.py # BlackList-std.py end-to-end + ├── test_shell_scripts.py # Bash syntax + shellcheck for all .sh + └── test_api_connectivity.py # Live Veracode API calls (needs creds) +``` + +### GitHub Secrets Required (for API tests) + +| Secret | Description | +|---|---| +| `VERACODE_API_ID` | Veracode API Key ID | +| `VERACODE_API_KEY` | Veracode API Key Secret | + +--- + +## SRM (Scriptable Request Modification) + +Supports Veracode SRM API specification configuration for authenticated dynamic scans. +Reference: [Veracode SRM Documentation](https://docs.veracode.com/r/Example_Script_for_Scriptable_Request_Modification_Authentication?tocId=GxBzVtHR5GnF~kPAmh0MNw) + +--- + +## License + +See [LICENSE](LICENSE) for details. diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..97294a5 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,11 @@ +[pytest] +testpaths = tests +python_files = test_*.py +python_classes = Test* +python_functions = test_* +addopts = -v --tb=short +markers = + unit: Pure unit tests with no external dependencies + integration: Integration tests invoking scripts end-to-end + api: Tests requiring live Veracode API credentials + slow: Tests that may take significant time to run diff --git a/requirements-test.txt b/requirements-test.txt new file mode 100644 index 0000000..a090932 --- /dev/null +++ b/requirements-test.txt @@ -0,0 +1,3 @@ +# Test dependencies for the Veracode SDK framework +pytest>=7.4.0 +pytest-cov>=4.1.0 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..9b163c0 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,49 @@ +""" +Shared pytest fixtures and configuration for the Veracode SDK test suite. +""" + +import shutil +import pytest +from pathlib import Path + +REPO_ROOT = Path(__file__).parent.parent +SCRIPTS_DIR = REPO_ROOT / "Scripts" +RELEASE_DIR = SCRIPTS_DIR / "Release" +DEV_DIR = SCRIPTS_DIR / "Dev" +FIXTURES_DIR = Path(__file__).parent / "fixtures" + + +@pytest.fixture(scope="session") +def repo_root(): + """Return the repository root path.""" + return REPO_ROOT + + +@pytest.fixture(scope="session") +def release_dir(): + """Return the path to the Release scripts directory.""" + return RELEASE_DIR + + +@pytest.fixture(scope="session") +def dev_dir(): + """Return the path to the Dev scripts directory.""" + return DEV_DIR + + +@pytest.fixture(scope="session") +def fixtures_dir(): + """Return the path to the test fixtures directory.""" + return FIXTURES_DIR + + +@pytest.fixture +def tmp_work_dir(tmp_path): + """ + Create a temp working directory pre-populated with all test CSV fixtures. + Use this for running scripts that need allowlist.csv, blacklist.csv, glblacklist.csv + in the current working directory. + """ + for csv_file in FIXTURES_DIR.glob("*.csv"): + shutil.copy(csv_file, tmp_path / csv_file.name) + return tmp_path diff --git a/tests/fixtures/allowlist.csv b/tests/fixtures/allowlist.csv new file mode 100644 index 0000000..ce42d36 --- /dev/null +++ b/tests/fixtures/allowlist.csv @@ -0,0 +1,5 @@ +directory_restriction_type,http_and_https,url +NONE,TRUE,https://example.com +DIRECTORY_AND_SUBDIRECTORY,TRUE,https://api.example.com +FOLDER_ONLY,TRUE,https://www.example.com +FILE,TRUE,https://docs.example.com diff --git a/tests/fixtures/blacklist.csv b/tests/fixtures/blacklist.csv new file mode 100644 index 0000000..65abf29 --- /dev/null +++ b/tests/fixtures/blacklist.csv @@ -0,0 +1,4 @@ +directory_restriction_type,http_and_https,url +NONE,TRUE,https://blocked.example.com +FILE,FALSE,https://private.example.com +DIRECTORY_AND_SUBDIRECTORY,TRUE,https://secret.example.com diff --git a/tests/fixtures/glblacklist.csv b/tests/fixtures/glblacklist.csv new file mode 100644 index 0000000..3ad9522 --- /dev/null +++ b/tests/fixtures/glblacklist.csv @@ -0,0 +1,3 @@ +directory_restriction_type,http_and_https,url +NONE,TRUE,https://global-blocked.example.com +DIRECTORY_AND_SUBDIRECTORY,FALSE,https://internal.example.com diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/integration/test_api_connectivity.py b/tests/integration/test_api_connectivity.py new file mode 100644 index 0000000..88ab519 --- /dev/null +++ b/tests/integration/test_api_connectivity.py @@ -0,0 +1,143 @@ +""" +Veracode API connectivity integration tests. + +These tests make LIVE calls to the Veracode REST/XML APIs and require +valid credentials to be configured via one of: + - Environment variables: VERACODE_API_ID and VERACODE_API_KEY + - Credentials file: ~/.veracode/credentials + +Mark: @pytest.mark.api +Skip: Automatically skipped if no credentials are found. + +Run selectively with: pytest tests/integration/test_api_connectivity.py -m api -v +""" + +import os +import subprocess +import sys +import shutil +import json +import pytest +from pathlib import Path + +pytestmark = pytest.mark.api + +RELEASE_DIR = Path(__file__).parent.parent.parent / "Scripts" / "Release" +FIXTURES_DIR = Path(__file__).parent.parent / "fixtures" + +CREDS_FILE = Path.home() / ".veracode" / "credentials" + + +def credentials_available() -> bool: + """Return True if Veracode API credentials can be found.""" + has_env = bool( + os.environ.get("VERACODE_API_ID") and os.environ.get("VERACODE_API_KEY") + ) + return has_env or CREDS_FILE.exists() + + +skip_no_creds = pytest.mark.skipif( + not credentials_available(), + reason=( + "Veracode API credentials not found. " + "Set VERACODE_API_ID + VERACODE_API_KEY env vars " + "or configure ~/.veracode/credentials" + ), +) + + +@skip_no_creds +class TestDASTAPIConnectivity: + """Tests that verify connectivity to the Veracode Dynamic Analysis API.""" + + def test_dast_ls_returns_without_auth_error(self): + """DAST-ls-v2.sh should connect and not return a 401 auth error.""" + script = RELEASE_DIR / "DAST-ls-v2.sh" + if not script.exists(): + pytest.skip("DAST-ls-v2.sh not found") + + result = subprocess.run( + ["bash", str(script)], + capture_output=True, + text=True, + timeout=60, + env={**os.environ}, + ) + combined = result.stdout + result.stderr + assert "401" not in combined, f"Authentication error returned:\n{combined}" + assert "Unauthorized" not in combined + + def test_dast_ls_returns_json_or_status(self): + """DAST-ls-v2.sh should return JSON data or an API status message.""" + script = RELEASE_DIR / "DAST-ls-v2.sh" + if not script.exists(): + pytest.skip("DAST-ls-v2.sh not found") + + result = subprocess.run( + ["bash", str(script)], + capture_output=True, + text=True, + timeout=60, + ) + # Either valid JSON response or a recognized API response structure + combined = result.stdout + result.stderr + assert len(combined.strip()) > 0, "No output from DAST ls script" + + +@skip_no_creds +class TestSASTAPIConnectivity: + """Tests that verify connectivity to the Veracode Static Analysis API.""" + + def test_search_build_no_auth_error(self): + """SearchBuildByName.sh should connect without auth errors.""" + script = RELEASE_DIR / "SearchBuildByName.sh" + if not script.exists(): + pytest.skip("SearchBuildByName.sh not found") + + result = subprocess.run( + ["bash", str(script), "connectivity-test-app"], + capture_output=True, + text=True, + timeout=60, + input="", + ) + combined = result.stdout + result.stderr + assert "401" not in combined, f"Auth error:\n{combined}" + + +@skip_no_creds +class TestDASTRequestSubmission: + """ + Tests that attempt to format and validate a DAST analysis request + against the Veracode API schema. + + NOTE: These tests format a request JSON but do NOT submit/create scans + to avoid affecting production data. + """ + + def test_formatted_request_valid_json(self, tmp_path): + """A formatted DAST request should produce valid JSON output.""" + shutil.copy(FIXTURES_DIR / "allowlist.csv", tmp_path / "allowlist.csv") + shutil.copy(FIXTURES_DIR / "blacklist.csv", tmp_path / "blacklist.csv") + + script = RELEASE_DIR / "DASTWebAppRequest-std.py" + result = subprocess.run( + [ + sys.executable, + str(script), + "api-connectivity-test", + "https://target.example.com/", + os.environ.get("VERACODE_ORG_EMAIL", "test@example.com"), + "API Test", + ], + capture_output=True, + text=True, + cwd=str(tmp_path), + timeout=30, + ) + non_empty = [l for l in result.stdout.strip().splitlines() if l.strip()] + assert non_empty, f"No output. stderr: {result.stderr}" + + parsed = json.loads(non_empty[-1]) + assert "name" in parsed + assert "scans" in parsed diff --git a/tests/integration/test_blacklist_script.py b/tests/integration/test_blacklist_script.py new file mode 100644 index 0000000..926ccbd --- /dev/null +++ b/tests/integration/test_blacklist_script.py @@ -0,0 +1,128 @@ +""" +Integration tests for Scripts/Release/BlackList-std.py. + +Tests the script end-to-end via subprocess, verifying: + - Script exits successfully when CSV files are present + - Output contains expected scan configuration fragments + - Blacklist entries from the CSV appear in the output + - input.json is written to the working directory + +These tests do NOT require Veracode API credentials. +The script runs its built-in test() function when DEBUG=True (the default). +""" + +import csv +import sys +import shutil +import subprocess +import pytest +from pathlib import Path + +RELEASE_DIR = Path(__file__).parent.parent.parent / "Scripts" / "Release" +FIXTURES_DIR = Path(__file__).parent.parent / "fixtures" +SCRIPT = RELEASE_DIR / "BlackList-std.py" + + +@pytest.fixture +def work_dir(tmp_path): + """Temp dir with CSV fixtures so the script can find blacklist files.""" + for f in FIXTURES_DIR.glob("*.csv"): + shutil.copy(f, tmp_path / f.name) + return tmp_path + + +def run_script(work_dir): + return subprocess.run( + [sys.executable, str(SCRIPT)], + capture_output=True, + text=True, + cwd=str(work_dir), + ) + + +# --- Execution health --- + +class TestBlacklistScriptExecution: + def test_exits_zero(self, work_dir): + result = run_script(work_dir) + assert result.returncode == 0, f"Non-zero exit:\nstdout: {result.stdout}\nstderr: {result.stderr}" + + def test_produces_output(self, work_dir): + result = run_script(work_dir) + assert result.stdout.strip() != "" + + def test_writes_input_json(self, work_dir): + run_script(work_dir) + assert (work_dir / "input.json").exists() + + def test_input_json_is_nonempty(self, work_dir): + run_script(work_dir) + content = (work_dir / "input.json").read_text() + assert content.strip() != "" + + +# --- Output content --- + +class TestBlacklistScriptOutput: + def test_output_contains_analysis_name_prefix(self, work_dir): + """The test() function prefixes the name with 'veracode-api-test-'.""" + result = run_script(work_dir) + assert "veracode-api-test-" in result.stdout + + def test_output_contains_scan_config(self, work_dir): + result = run_script(work_dir) + assert "scan_config_request" in result.stdout + + def test_output_contains_target_url(self, work_dir): + result = run_script(work_dir) + assert "target_url" in result.stdout + + def test_output_contains_veracode_test_url(self, work_dir): + """The test() function uses http://veracode.com as the target URL.""" + result = run_script(work_dir) + assert "veracode.com" in result.stdout + + def test_output_contains_org_email(self, work_dir): + """The test() function uses example@example.com as the org email.""" + result = run_script(work_dir) + assert "example@example.com" in result.stdout + + def test_output_contains_blacklist_config(self, work_dir): + result = run_script(work_dir) + assert "blacklist_configuration" in result.stdout or "black_list" in result.stdout + + def test_blacklist_urls_appear_in_output(self, work_dir): + """URLs from blacklist.csv should be present in the output.""" + with open(FIXTURES_DIR / "blacklist.csv") as f: + reader = csv.DictReader(f) + urls = [row["url"].strip() for row in reader] + + result = run_script(work_dir) + assert any(url in result.stdout for url in urls), ( + f"None of the blacklist URLs {urls} found in output:\n{result.stdout[:500]}" + ) + + +# --- CSV not found handling --- + +class TestBlacklistMissingCSV: + def test_exits_zero_without_csv_files(self, tmp_path): + """Script should not crash when CSV files are missing (graceful error handling).""" + result = subprocess.run( + [sys.executable, str(SCRIPT)], + capture_output=True, + text=True, + cwd=str(tmp_path), + ) + assert result.returncode == 0 + + def test_reports_csv_load_error_without_csv(self, tmp_path): + """When CSV files are missing the script should report load failures.""" + result = subprocess.run( + [sys.executable, str(SCRIPT)], + capture_output=True, + text=True, + cwd=str(tmp_path), + ) + combined = result.stdout + result.stderr + assert "failed to load" in combined or "veracode-api-test-" in combined diff --git a/tests/integration/test_dast_web_request_script.py b/tests/integration/test_dast_web_request_script.py new file mode 100644 index 0000000..35c1428 --- /dev/null +++ b/tests/integration/test_dast_web_request_script.py @@ -0,0 +1,190 @@ +""" +Integration tests for Scripts/Release/DASTWebAppRequest-std.py. + +Tests the script end-to-end via subprocess, verifying: + - stdout mode with CLI args produces valid JSON + - Request structure matches Veracode Dynamic Analysis API schema + - Input file (input.json) is written correctly + - Org info, scan config, and schedule are included + +These tests do NOT require Veracode API credentials. +The script is invoked with pre-set CLI args so it runs non-interactively. +""" + +import json +import sys +import shutil +import subprocess +import pytest +from pathlib import Path + +RELEASE_DIR = Path(__file__).parent.parent.parent / "Scripts" / "Release" +FIXTURES_DIR = Path(__file__).parent.parent / "fixtures" +SCRIPT = RELEASE_DIR / "DASTWebAppRequest-std.py" + +TEST_ARGS = [ + "integration-test-analysis", + "https://target.example.com/", + "owner@example.com", + "Test Owner", +] + + +@pytest.fixture +def work_dir(tmp_path): + """Temp dir with CSV fixtures so the script can find allowlist/blocklist files.""" + for f in FIXTURES_DIR.glob("*.csv"): + shutil.copy(f, tmp_path / f.name) + return tmp_path + + +def run_script(work_dir, args=None): + """Run DASTWebAppRequest-std.py from work_dir with the given args.""" + cmd = [sys.executable, str(SCRIPT)] + (args or TEST_ARGS) + return subprocess.run(cmd, capture_output=True, text=True, cwd=str(work_dir)) + + +def extract_json(result) -> dict: + """Extract the last JSON object from the script's stdout.""" + non_empty = [l for l in result.stdout.strip().splitlines() if l.strip()] + assert non_empty, f"No output from script. stderr: {result.stderr}" + return json.loads(non_empty[-1]) + + +# --- Basic execution --- + +class TestScriptExecution: + def test_exits_zero(self, work_dir): + result = run_script(work_dir) + assert result.returncode == 0, f"Non-zero exit: {result.stderr}" + + def test_produces_stdout(self, work_dir): + result = run_script(work_dir) + assert result.stdout.strip() != "" + + def test_stdout_ends_with_valid_json(self, work_dir): + result = run_script(work_dir) + parsed = extract_json(result) + assert isinstance(parsed, dict) + + def test_writes_input_json_file(self, work_dir): + run_script(work_dir) + assert (work_dir / "input.json").exists() + + def test_input_json_matches_stdout_json(self, work_dir): + result = run_script(work_dir) + stdout_parsed = extract_json(result) + with open(work_dir / "input.json") as f: + file_parsed = json.load(f) + assert stdout_parsed == file_parsed + + +# --- Request structure --- + +class TestRequestStructure: + def test_name_field_present(self, work_dir): + result = run_script(work_dir) + parsed = extract_json(result) + assert "name" in parsed + + def test_name_matches_arg(self, work_dir): + result = run_script(work_dir) + parsed = extract_json(result) + assert parsed["name"] == TEST_ARGS[0] + + def test_scans_field_present(self, work_dir): + result = run_script(work_dir) + parsed = extract_json(result) + assert "scans" in parsed + + def test_scans_is_nonempty_list(self, work_dir): + result = run_script(work_dir) + parsed = extract_json(result) + assert isinstance(parsed["scans"], list) + assert len(parsed["scans"]) >= 1 + + def test_schedule_field_present(self, work_dir): + result = run_script(work_dir) + parsed = extract_json(result) + assert "schedule" in parsed + + def test_schedule_has_duration(self, work_dir): + result = run_script(work_dir) + parsed = extract_json(result) + assert "duration" in parsed["schedule"] + + def test_org_info_present(self, work_dir): + result = run_script(work_dir) + parsed = extract_json(result) + assert "org_info" in parsed + + def test_org_info_has_email(self, work_dir): + result = run_script(work_dir) + parsed = extract_json(result) + assert "email" in parsed["org_info"] + + def test_org_email_matches_arg(self, work_dir): + result = run_script(work_dir) + parsed = extract_json(result) + assert parsed["org_info"]["email"] == TEST_ARGS[2] + + +# --- Scan configuration --- + +class TestScanConfiguration: + def test_scan_has_scan_config_request(self, work_dir): + result = run_script(work_dir) + parsed = extract_json(result) + scan = parsed["scans"][0] + assert "scan_config_request" in scan + + def test_scan_config_has_target_url(self, work_dir): + result = run_script(work_dir) + parsed = extract_json(result) + scan_config = parsed["scans"][0]["scan_config_request"] + assert "target_url" in scan_config + + def test_target_url_matches_arg(self, work_dir): + result = run_script(work_dir) + parsed = extract_json(result) + target = parsed["scans"][0]["scan_config_request"]["target_url"] + assert target["url"] == TEST_ARGS[1] + + def test_target_url_has_http_and_https(self, work_dir): + result = run_script(work_dir) + parsed = extract_json(result) + target = parsed["scans"][0]["scan_config_request"]["target_url"] + assert "http_and_https" in target + + def test_http_and_https_is_true(self, work_dir): + result = run_script(work_dir) + parsed = extract_json(result) + target = parsed["scans"][0]["scan_config_request"]["target_url"] + # The script sets this to "true" (string) or boolean true + assert str(target["http_and_https"]).lower() == "true" + + +# --- Different analysis names --- + +class TestAnalysisNameVariants: + @pytest.mark.parametrize("name", [ + "my-app-dast-scan", + "prod-api-scan-v2", + "veracode-weekly-analysis", + ]) + def test_custom_analysis_name(self, work_dir, name): + args = [name, "https://example.com/", "scan@company.com", "Owner"] + result = run_script(work_dir, args) + parsed = extract_json(result) + assert parsed["name"] == name + + @pytest.mark.parametrize("url", [ + "https://app.example.com/", + "https://api.example.com/v1/", + "https://staging.example.com/", + ]) + def test_various_target_urls(self, work_dir, url): + args = ["test-scan", url, "test@example.com", "Owner"] + result = run_script(work_dir, args) + parsed = extract_json(result) + assert parsed["scans"][0]["scan_config_request"]["target_url"]["url"] == url diff --git a/tests/integration/test_shell_scripts.py b/tests/integration/test_shell_scripts.py new file mode 100644 index 0000000..a035287 --- /dev/null +++ b/tests/integration/test_shell_scripts.py @@ -0,0 +1,223 @@ +""" +Integration tests for shell scripts under Scripts/Release/ and Scripts/Dev/bash_scripts/. + +Tests: + - bash -n syntax validation for all .sh files + - shellcheck linting (skipped if shellcheck not installed) + - Functional smoke tests: help output, basic invocation + +These tests do NOT require Veracode API credentials. +""" + +import shutil +import subprocess +import pytest +from pathlib import Path + +RELEASE_DIR = Path(__file__).parent.parent.parent / "Scripts" / "Release" +DEV_BASH_DIR = Path(__file__).parent.parent.parent / "Scripts" / "Dev" / "bash_scripts" +XML_API_DIR = Path(__file__).parent.parent.parent / "xml_api_calls" + +_RELEASE_SCRIPTS_CANDIDATES = [ + RELEASE_DIR / "veracode-installer.sh", + RELEASE_DIR / "DAST-ls-v2.sh", + RELEASE_DIR / "DAST-ls.sh", + RELEASE_DIR / "DAST-rescan.sh", + RELEASE_DIR / "SearchBuildByName.sh", + RELEASE_DIR / "vdb-purl-lte.sh", +] + +_DEV_BASH_SCRIPTS_CANDIDATES = [ + DEV_BASH_DIR / "veracode-installer.sh", + DEV_BASH_DIR / "UploadExtended.sh", + DEV_BASH_DIR / "SAST-promoteSandbox.sh", + DEV_BASH_DIR / "SCA-Library-ProjectSearch.sh", + DEV_BASH_DIR / "pipelinescan-sandboxscan-filesizecheck.sh", +] + +# Only include scripts that actually exist on disk so ids always match values +RELEASE_SCRIPTS = [s for s in _RELEASE_SCRIPTS_CANDIDATES if s.exists()] +DEV_BASH_SCRIPTS = [s for s in _DEV_BASH_SCRIPTS_CANDIDATES if s.exists()] +ALL_SCRIPTS = RELEASE_SCRIPTS + DEV_BASH_SCRIPTS + + +# --- Bash syntax validation --- + +class TestBashSyntax: + @pytest.mark.parametrize( + "script", + [s for s in RELEASE_SCRIPTS if s.exists()], + ids=[s.name for s in RELEASE_SCRIPTS], + ) + def test_release_script_syntax(self, script): + """All Release scripts should pass bash -n syntax check.""" + result = subprocess.run( + ["bash", "-n", str(script)], + capture_output=True, + text=True, + ) + assert result.returncode == 0, ( + f"Syntax error in {script.name}:\n{result.stderr}" + ) + + @pytest.mark.parametrize( + "script", + [s for s in DEV_BASH_SCRIPTS if s.exists()], + ids=[s.name for s in DEV_BASH_SCRIPTS], + ) + def test_dev_bash_script_syntax(self, script): + """All Dev bash scripts should pass bash -n syntax check.""" + result = subprocess.run( + ["bash", "-n", str(script)], + capture_output=True, + text=True, + ) + assert result.returncode == 0, ( + f"Syntax error in {script.name}:\n{result.stderr}" + ) + + +# --- ShellCheck linting --- + +@pytest.mark.skipif( + not shutil.which("shellcheck"), + reason="shellcheck not installed — install with: sudo apt-get install shellcheck", +) +class TestShellCheck: + @pytest.mark.parametrize( + "script", + [s for s in RELEASE_SCRIPTS if s.exists()], + ids=[s.name for s in RELEASE_SCRIPTS], + ) + def test_shellcheck_release_script(self, script): + """Release scripts should pass shellcheck at warning severity.""" + result = subprocess.run( + ["shellcheck", "--severity=warning", str(script)], + capture_output=True, + text=True, + ) + assert result.returncode == 0, ( + f"shellcheck issues in {script.name}:\n{result.stdout}" + ) + + @pytest.mark.parametrize( + "script", + [s for s in DEV_BASH_SCRIPTS if s.exists()], + ids=[s.name for s in DEV_BASH_SCRIPTS], + ) + def test_shellcheck_dev_script(self, script): + """Dev bash scripts should pass shellcheck at warning severity.""" + result = subprocess.run( + ["shellcheck", "--severity=warning", str(script)], + capture_output=True, + text=True, + ) + assert result.returncode == 0, ( + f"shellcheck issues in {script.name}:\n{result.stdout}" + ) + + +# --- Functional smoke tests --- + +class TestInstallerScript: + def test_installer_help_prints_usage(self): + """Installer script should print help text when invoked with unknown args.""" + script = RELEASE_DIR / "veracode-installer.sh" + if not script.exists(): + pytest.skip("veracode-installer.sh not found in Release") + result = subprocess.run( + ["bash", str(script), "--help"], + capture_output=True, + text=True, + ) + # --help hits the *) case which calls help() and exits 1 + combined = result.stdout + result.stderr + assert "Veracode" in combined or "install" in combined.lower(), ( + f"Expected help output, got:\n{combined}" + ) + + def test_installer_help_lists_options(self): + """Help output should list known installer flags.""" + script = RELEASE_DIR / "veracode-installer.sh" + if not script.exists(): + pytest.skip("veracode-installer.sh not found in Release") + result = subprocess.run( + ["bash", str(script), "--help"], + capture_output=True, + text=True, + ) + combined = result.stdout + result.stderr + assert "--install-sca-ci" in combined or "--install-sca-cli" in combined + + +class TestSearchBuildByName: + def test_script_has_valid_syntax(self): + script = RELEASE_DIR / "SearchBuildByName.sh" + if not script.exists(): + pytest.skip("SearchBuildByName.sh not found") + result = subprocess.run( + ["bash", "-n", str(script)], + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"Syntax error:\n{result.stderr}" + + def test_script_exists_and_is_readable(self): + script = RELEASE_DIR / "SearchBuildByName.sh" + if not script.exists(): + pytest.skip("SearchBuildByName.sh not found") + assert script.stat().st_size > 0 + + +class TestDASTRescan: + def test_script_has_valid_syntax(self): + script = RELEASE_DIR / "DAST-rescan.sh" + if not script.exists(): + pytest.skip("DAST-rescan.sh not found") + result = subprocess.run( + ["bash", "-n", str(script)], + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"Syntax error:\n{result.stderr}" + + +class TestDASTLsV2: + def test_script_has_valid_syntax(self): + script = RELEASE_DIR / "DAST-ls-v2.sh" + if not script.exists(): + pytest.skip("DAST-ls-v2.sh not found") + result = subprocess.run( + ["bash", "-n", str(script)], + capture_output=True, + text=True, + ) + assert result.returncode == 0, f"Syntax error:\n{result.stderr}" + + def test_script_is_nonempty(self): + script = RELEASE_DIR / "DAST-ls-v2.sh" + if not script.exists(): + pytest.skip("DAST-ls-v2.sh not found") + assert script.stat().st_size > 100 + + +# --- XML API scripts --- + +class TestXMLAPIScripts: + XML_SCRIPTS = list(XML_API_DIR.glob("*.sh")) if XML_API_DIR.exists() else [] + + @pytest.mark.parametrize( + "script", + XML_SCRIPTS, + ids=[s.name for s in XML_SCRIPTS], + ) + def test_xml_api_script_syntax(self, script): + """XML API helper scripts should have valid bash syntax.""" + result = subprocess.run( + ["bash", "-n", str(script)], + capture_output=True, + text=True, + ) + assert result.returncode == 0, ( + f"Syntax error in {script.name}:\n{result.stderr}" + ) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/test_csv_parsing.py b/tests/unit/test_csv_parsing.py new file mode 100644 index 0000000..1434a25 --- /dev/null +++ b/tests/unit/test_csv_parsing.py @@ -0,0 +1,177 @@ +""" +Unit tests for CSV-to-JSON parsing logic. + +Tests the allowlist and blacklist CSV parsing behavior that feeds into +DASTWebAppRequest-std.py and BlackList-std.py, verified against fixture files +and controlled temporary CSVs. +""" + +import csv +import json +import pytest +from pathlib import Path + +FIXTURES_DIR = Path(__file__).parent.parent / "fixtures" + +VALID_RESTRICTION_TYPES = { + "NONE", + "FILE", + "FOLDER_ONLY", + "DIRECTORY_AND_SUBDIRECTORY", +} + + +# --- Helpers mirroring script CSV parsing logic --- + +def parse_allowlist_csv(csv_path: str) -> dict: + """ + Mirror of allowlistConfigCSVtoJSON() from DASTWebAppRequest-std.py. + Returns {"allowed_hosts": [...]} or raises on error. + """ + allowed_hosts = [] + with open(csv_path, "r") as f: + reader = csv.DictReader(f) + for row in reader: + allowed_hosts.append({ + "directory_restriction_type": row["directory_restriction_type"], + "http_and_https": str(row["http_and_https"]).strip().lower(), + "url": row["url"].strip(), + }) + return {"allowed_hosts": allowed_hosts} + + +def parse_blacklist_csv(csv_path: str) -> list: + """ + Mirror of blacklistConfigCSVtoJSON() from BlackList-std.py. + Returns a list of blacklist entry dicts. + """ + entries = [] + with open(csv_path, "r") as f: + reader = csv.DictReader(f) + for row in reader: + entries.append({ + "directory_restriction_type": row["directory_restriction_type"], + "http_and_https": str(row["http_and_https"]).strip().lower(), + "url": row["url"].strip(), + }) + return entries + + +# --- Allowlist fixture tests --- + +class TestAllowlistCSVFixture: + def test_fixture_loads_without_error(self): + result = parse_allowlist_csv(FIXTURES_DIR / "allowlist.csv") + assert result is not None + + def test_result_has_allowed_hosts_key(self): + result = parse_allowlist_csv(FIXTURES_DIR / "allowlist.csv") + assert "allowed_hosts" in result + + def test_fixture_has_multiple_entries(self): + result = parse_allowlist_csv(FIXTURES_DIR / "allowlist.csv") + assert len(result["allowed_hosts"]) >= 2 + + def test_each_entry_has_required_fields(self): + result = parse_allowlist_csv(FIXTURES_DIR / "allowlist.csv") + for host in result["allowed_hosts"]: + assert "directory_restriction_type" in host + assert "http_and_https" in host + assert "url" in host + + def test_urls_are_nonempty(self): + result = parse_allowlist_csv(FIXTURES_DIR / "allowlist.csv") + for host in result["allowed_hosts"]: + assert host["url"] != "" + + def test_http_and_https_is_boolean_string(self): + result = parse_allowlist_csv(FIXTURES_DIR / "allowlist.csv") + for host in result["allowed_hosts"]: + assert host["http_and_https"].lower() in ("true", "false") + + def test_restriction_types_are_valid(self): + result = parse_allowlist_csv(FIXTURES_DIR / "allowlist.csv") + for host in result["allowed_hosts"]: + assert host["directory_restriction_type"] in VALID_RESTRICTION_TYPES + + +# --- Blacklist fixture tests --- + +class TestBlacklistCSVFixture: + def test_fixture_loads_without_error(self): + result = parse_blacklist_csv(FIXTURES_DIR / "blacklist.csv") + assert result is not None + + def test_fixture_has_entries(self): + result = parse_blacklist_csv(FIXTURES_DIR / "blacklist.csv") + assert len(result) >= 1 + + def test_each_entry_has_required_fields(self): + result = parse_blacklist_csv(FIXTURES_DIR / "blacklist.csv") + for entry in result: + assert "directory_restriction_type" in entry + assert "http_and_https" in entry + assert "url" in entry + + def test_restriction_types_are_valid(self): + result = parse_blacklist_csv(FIXTURES_DIR / "blacklist.csv") + for entry in result: + assert entry["directory_restriction_type"] in VALID_RESTRICTION_TYPES + + def test_http_and_https_is_boolean_string(self): + result = parse_blacklist_csv(FIXTURES_DIR / "blacklist.csv") + for entry in result: + assert entry["http_and_https"].lower() in ("true", "false") + + +# --- Edge case tests with tmp files --- + +class TestCSVEdgeCases: + def test_empty_allowlist_returns_empty_list(self, tmp_path): + csv_file = tmp_path / "allowlist.csv" + csv_file.write_text("directory_restriction_type,http_and_https,url\n") + result = parse_allowlist_csv(csv_file) + assert result["allowed_hosts"] == [] + + def test_empty_blacklist_returns_empty_list(self, tmp_path): + csv_file = tmp_path / "blacklist.csv" + csv_file.write_text("directory_restriction_type,http_and_https,url\n") + result = parse_blacklist_csv(csv_file) + assert result == [] + + def test_single_allowlist_entry(self, tmp_path): + csv_file = tmp_path / "allowlist.csv" + csv_file.write_text( + "directory_restriction_type,http_and_https,url\n" + "NONE,TRUE,https://single.example.com\n" + ) + result = parse_allowlist_csv(csv_file) + assert len(result["allowed_hosts"]) == 1 + assert result["allowed_hosts"][0]["url"] == "https://single.example.com" + + def test_multiple_blacklist_entries(self, tmp_path): + csv_file = tmp_path / "blacklist.csv" + csv_file.write_text( + "directory_restriction_type,http_and_https,url\n" + "NONE,TRUE,https://blocked1.example.com\n" + "FILE,FALSE,https://blocked2.example.com\n" + "DIRECTORY_AND_SUBDIRECTORY,TRUE,https://blocked3.example.com\n" + ) + result = parse_blacklist_csv(csv_file) + assert len(result) == 3 + + def test_missing_file_raises_error(self, tmp_path): + with pytest.raises((FileNotFoundError, OSError)): + parse_allowlist_csv(tmp_path / "nonexistent.csv") + + def test_result_serializes_to_json(self): + result = parse_allowlist_csv(FIXTURES_DIR / "allowlist.csv") + serialized = json.dumps(result) + assert isinstance(serialized, str) + reparsed = json.loads(serialized) + assert reparsed["allowed_hosts"] == result["allowed_hosts"] + + def test_glblacklist_fixture_loads(self): + result = parse_blacklist_csv(FIXTURES_DIR / "glblacklist.csv") + assert isinstance(result, list) + assert len(result) >= 1 diff --git a/tests/unit/test_email_validation.py b/tests/unit/test_email_validation.py new file mode 100644 index 0000000..75114a2 --- /dev/null +++ b/tests/unit/test_email_validation.py @@ -0,0 +1,69 @@ +""" +Unit tests for email validation logic used in DASTWebAppRequest-std.py. + +The is_valid_email() function uses the pattern r'^[\\w\\.-]+@[\\w\\.-]+\\.\\w+$'. +These tests verify that behavior without importing the script (which has +module-level side effects including stdin prompts and file I/O). +""" + +import re +import pytest + +# Pattern mirrored from Scripts/Release/DASTWebAppRequest-std.py +EMAIL_PATTERN = r'^[\w\.-]+@[\w\.-]+\.\w+$' + + +def is_valid_email(email: str) -> bool: + """Mirror of is_valid_email() from DASTWebAppRequest-std.py.""" + return bool(re.match(EMAIL_PATTERN, email)) + + +class TestValidEmails: + def test_simple_email(self): + assert is_valid_email("user@example.com") + + def test_subdomain_email(self): + assert is_valid_email("user@mail.example.com") + + def test_dot_in_local_part(self): + assert is_valid_email("first.last@example.com") + + def test_hyphen_in_domain(self): + assert is_valid_email("user@my-company.com") + + def test_numeric_local_part(self): + assert is_valid_email("user123@example.com") + + def test_underscore_in_local(self): + assert is_valid_email("user_name@example.com") + + def test_multi_level_tld(self): + assert is_valid_email("user@example.co.uk") + + +class TestInvalidEmails: + def test_no_at_sign(self): + assert not is_valid_email("userexample.com") + + def test_no_domain_after_at(self): + assert not is_valid_email("user@") + + def test_no_tld(self): + assert not is_valid_email("user@example") + + def test_empty_string(self): + assert not is_valid_email("") + + def test_only_at_sign(self): + assert not is_valid_email("@") + + def test_plus_sign_not_supported(self): + # The script's pattern uses [\w\.-] which does NOT include + + # This documents that the pattern rejects plus-addressed emails + assert not is_valid_email("user+tag@example.com") + + def test_spaces_rejected(self): + assert not is_valid_email("user @example.com") + + def test_double_at(self): + assert not is_valid_email("user@@example.com") diff --git a/tests/unit/test_schedule_helpers.py b/tests/unit/test_schedule_helpers.py new file mode 100644 index 0000000..ee6bb46 --- /dev/null +++ b/tests/unit/test_schedule_helpers.py @@ -0,0 +1,137 @@ +""" +Unit tests for scan scheduling helper functions. + +These mirror the schedule logic from Scripts/Release/DASTWebAppRequest-std.py, +tested in isolation to avoid module-level side effects. +""" + +import pytest + + +# --- Mirrors of schedule helpers from DASTWebAppRequest-std.py --- + +def _is_true(value, var_true: bool = False): + if str(value).casefold() == "true": + return True if var_true else "true" + return False if var_true else "false" + + +def schedule_now(now_b: str = _is_true(False), days: int = 1) -> dict: + """Mirror of scheduleNow() from DASTWebAppRequest-std.py.""" + schedule = {"schedule": { + "now": now_b, + "duration": { + "length": str(days), + "unit": "DAY" + } + }} + if now_b == _is_true(True): + schedule["schedule"]["scheduled"] = True + return schedule + + +def schedule_scan(start_now: bool = False, length: int = 1, unit: str = "DAY", + recurring: bool = False, recurrence_type: str = "WEEKLY", + schedule_end_after: int = 2, recurrence_interval: int = 1, + day_of_week: str = "FRIDAY") -> dict: + """Mirror of scheduleScan() from DASTWebAppRequest-std.py.""" + schedule = {"schedule": { + "duration": {"length": length, "unit": unit} + }} + if recurring: + schedule["schedule"]["scan_recurrence_schedule"] = { + "recurrence_type": recurrence_type, + "schedule_end_after": schedule_end_after, + "recurrence_interval": recurrence_interval, + "day_of_week": day_of_week + } + if start_now: + schedule["schedule"].update({"scheduled": True, "now": True}) + return schedule + + +# --- Tests --- + +class TestIsTrue: + def test_true_string_returns_true_string(self): + assert _is_true("true") == "true" + + def test_false_string_returns_false_string(self): + assert _is_true("false") == "false" + + def test_true_bool_with_var_true_returns_python_true(self): + assert _is_true(True, var_true=True) is True + + def test_false_bool_with_var_true_returns_python_false(self): + assert _is_true(False, var_true=True) is False + + def test_case_insensitive_TRUE(self): + assert _is_true("TRUE") == "true" + + def test_case_insensitive_True(self): + assert _is_true("True") == "true" + + +class TestScheduleNow: + def test_returns_dict(self): + result = schedule_now() + assert isinstance(result, dict) + + def test_has_schedule_key(self): + result = schedule_now() + assert "schedule" in result + + def test_default_now_is_false_string(self): + result = schedule_now() + assert result["schedule"]["now"] == "false" + + def test_now_true_sets_scheduled_flag(self): + result = schedule_now(now_b="true") + assert result["schedule"].get("scheduled") is True + + def test_duration_unit_is_day(self): + result = schedule_now() + assert result["schedule"]["duration"]["unit"] == "DAY" + + def test_custom_days_reflected_as_string(self): + result = schedule_now(days=5) + assert result["schedule"]["duration"]["length"] == "5" + + def test_one_day_default(self): + result = schedule_now() + assert result["schedule"]["duration"]["length"] == "1" + + +class TestScheduleScan: + def test_returns_dict(self): + result = schedule_scan() + assert isinstance(result, dict) + + def test_has_schedule_key(self): + result = schedule_scan() + assert "schedule" in result + + def test_default_unit_is_day(self): + result = schedule_scan() + assert result["schedule"]["duration"]["unit"] == "DAY" + + def test_start_now_sets_flags(self): + result = schedule_scan(start_now=True) + assert result["schedule"]["now"] is True + assert result["schedule"]["scheduled"] is True + + def test_recurring_adds_recurrence_block(self): + result = schedule_scan(recurring=True) + assert "scan_recurrence_schedule" in result["schedule"] + + def test_recurring_day_of_week(self): + result = schedule_scan(recurring=True, day_of_week="MONDAY") + assert result["schedule"]["scan_recurrence_schedule"]["day_of_week"] == "MONDAY" + + def test_non_recurring_has_no_recurrence_block(self): + result = schedule_scan(recurring=False) + assert "scan_recurrence_schedule" not in result["schedule"] + + def test_custom_length(self): + result = schedule_scan(length=7) + assert result["schedule"]["duration"]["length"] == 7