Skip to content

refactor: add otel tracing filter to logging #464

refactor: add otel tracing filter to logging

refactor: add otel tracing filter to logging #464

Workflow file for this run

name: Docs

Check failure on line 1 in .github/workflows/docs-publish.yml

View workflow run for this annotation

GitHub Actions / .github/workflows/docs-publish.yml

Invalid workflow file

(Line: 411, Col: 22): The expression is not closed. An unescaped ${{ sequence was found, but the closing }} sequence was not found.
# Builds, validates, and deploys documentation to orphan deployment branches.
# Mintlify reads from these branches — main stays clean of generated artifacts.
#
# See docs/PUBLISHING.md for the full architecture and strategy.
on:
push:
branches: [main]
paths:
- "docs/**"
- "mellea/**"
- "cli/**"
- "tooling/docs-autogen/**"
- ".github/workflows/docs-publish.yml"
release:
types: [published]
pull_request:
types: [opened, synchronize, reopened, labeled]
paths:
- "docs/**"
- "mellea/**"
- "cli/**"
- "tooling/docs-autogen/**"
- ".github/workflows/docs-publish.yml"
workflow_dispatch:
inputs:
force_publish:
description: "Deploy even from a non-main context (for testing)"
type: boolean
default: false
target_branch:
description: "Override deploy target branch (default: docs/preview)"
type: string
default: "docs/preview"
strict_validation:
description: "Fail the build if validation checks fail"
type: boolean
default: false
permissions: {}
concurrency:
group: docs-publish-${{ github.ref }}
cancel-in-progress: true
env:
UV_FROZEN: "1"
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
jobs:
# ---------------------------------------------------------------------------
# Build & Validate
# ---------------------------------------------------------------------------
build-and-validate:
runs-on: ubuntu-latest
permissions:
contents: read
timeout-minutes: 30
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
fetch-depth: 0
persist-credentials: false
- name: Set up uv
uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7
with:
enable-cache: true
cache-dependency-glob: "uv.lock"
- name: Install dependencies
run: uv sync --frozen --all-extras --group dev
# -- Generate API documentation ------------------------------------------
- name: Generate API documentation
run: uv run python tooling/docs-autogen/build.py
# -- Run docs-autogen unit tests ------------------------------------------
- name: Run CLI reference tests
run: uv run pytest tooling/docs-autogen/test_cli_reference.py -v --tb=short
# -- Validate static docs ------------------------------------------------
- name: Lint static docs (markdownlint)
id: markdownlint
run: |
set -o pipefail
npx --yes markdownlint-cli "docs/docs/**/*.md" --config docs/docs/.markdownlint.json 2>&1 \
| tee /tmp/markdownlint.log
continue-on-error: ${{ inputs.strict_validation != true }}
# -- Validate generated API docs -----------------------------------------
- name: Validate MDX syntax and links
id: validate_mdx
run: |
set -o pipefail
uv run python tooling/docs-autogen/validate.py docs/docs/api --skip-coverage --docs-root docs 2>&1 \
| tee /tmp/validate_mdx.log
continue-on-error: ${{ inputs.strict_validation != true }}
- name: Audit API coverage
id: audit_coverage
run: |
set -o pipefail
uv run python tooling/docs-autogen/audit_coverage.py --docs-dir docs/docs/api --threshold 80 2>&1 \
| tee /tmp/audit_coverage.log
continue-on-error: ${{ inputs.strict_validation != true }}
- name: Docstring quality gate
id: quality_gate
run: |
set -o pipefail
uv run python tooling/docs-autogen/audit_coverage.py \
--docs-dir docs/docs/api \
--quality --fail-on-quality --threshold 100 \
--orphans \
--output /tmp/quality_report.json 2>&1 \
| tee /tmp/quality_gate.log
# -- Upload artifact for deploy job --------------------------------------
- name: Upload quality report
if: always()
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7
with:
name: docstring-quality-report
path: /tmp/quality_report.json
retention-days: 30
- name: Upload docs artifact
if: success() || (inputs.strict_validation != true)
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7
with:
name: docs-site
path: docs/docs/
retention-days: 7
# -- Write job summary ---------------------------------------------------
- name: Write job summary
if: always()
run: |
python3 - <<'PYEOF'
import os, re
def icon(outcome):
return "✅" if outcome == "success" else ("❌" if outcome == "failure" else "⏭️")
def read_log(path):
try:
raw = open(path).read().strip()
# Strip ANSI escape codes (colour output from uv/pytest etc.)
return re.sub(r'\x1b\[[0-9;]*[mK]', '', raw)
except FileNotFoundError:
return ""
markdownlint_outcome = os.environ.get("STEPS_MARKDOWNLINT_OUTCOME", "")
validate_outcome = os.environ.get("STEPS_VALIDATE_MDX_OUTCOME", "")
coverage_outcome = os.environ.get("STEPS_AUDIT_COVERAGE_OUTCOME", "")
quality_gate_outcome = os.environ.get("STEPS_QUALITY_GATE_OUTCOME", "")
lint_log = read_log("/tmp/markdownlint.log")
validate_log = read_log("/tmp/validate_mdx.log")
coverage_log = read_log("/tmp/audit_coverage.log")
quality_gate_log = read_log("/tmp/quality_gate.log")
# Count markdownlint issues (lines matching file:line:col format)
lint_issues = len([l for l in lint_log.splitlines() if re.match(r'.+:\d+:\d+ ', l)])
lint_detail = f"{lint_issues} issue(s)" if lint_issues else "no issues"
# Extract coverage stats from audit_coverage output
cov_pct = re.search(r"Coverage:\s+(\S+%)", coverage_log)
cov_sym = re.search(r"Documented:\s+(\d+)", coverage_log)
cov_tot = re.search(r"Total classes \+ functions:\s+(\d+)", coverage_log)
cov_detail = (
f"{cov_pct.group(1)} ({cov_sym.group(1)}/{cov_tot.group(1)} symbols)"
if cov_pct and cov_sym and cov_tot else ""
)
# Parse per-check error counts from validate output.
# Each check prints "N errors found" on the next line when it fails.
def parse_validate_detail(log):
counts = {}
for label, key in [
("Source links", "source"), ("MDX syntax", "syntax"),
("Internal links", "links"), ("Anchor collisions", "anchors"),
("Stale files", "stale"),
]:
m = re.search(rf"{label}: (?:PASS|FAIL)(?:\s+(\d+) errors found)?", log, re.DOTALL)
if m:
counts[key] = int(m.group(1)) if m.group(1) else 0
total = sum(counts.values())
if not total:
return "no issues"
parts = []
if counts.get("syntax"): parts.append(f"{counts['syntax']} syntax error(s)")
if counts.get("links"): parts.append(f"{counts['links']} broken link(s)")
if counts.get("anchors"): parts.append(f"{counts['anchors']} anchor collision(s)")
if counts.get("source"): parts.append(f"{counts['source']} source link error(s)")
if counts.get("stale"): parts.append(f"{counts['stale']} stale file(s)")
return ", ".join(parts)
mdx_detail = parse_validate_detail(validate_log)
# Parse per-kind counts from the quality gate log.
# _print_quality_report emits section headers like:
# " Missing docstrings (12)"
# " Missing Args section (5)"
# Capture label -> count from those lines, then build a compact
# per-kind breakdown for the summary table cell.
kind_short = {
"Missing docstrings": "missing",
"Short docstrings": "short",
"Missing Args section": "no_args",
"Missing Returns section": "no_returns",
"Missing Yields section (generator)": "no_yields",
"Missing Raises section": "no_raises",
"Missing class Args section": "no_class_args",
"Duplicate Args: in class + __init__ (Option C violation)": "dup_init_args",
"Param name mismatches (documented but not in signature)": "param_mismatch",
"TypedDict phantom fields (documented but not declared)": "td_phantom",
"TypedDict undocumented fields (declared but missing from Attributes:)": "td_undoc",
"Missing parameter type annotations (type absent from API docs)": "missing_param_type",
"Missing return type annotations (type absent from API docs)": "missing_return_type",
"Param type mismatch (docstring vs annotation)": "param_type_mismatch",
"Return type mismatch (docstring vs annotation)": "return_type_mismatch",
}
section_re = re.compile(r"^\s{2}(.+?)\s+\((\d+)\)\s*$", re.MULTILINE)
kind_counts = {}
for m in section_re.finditer(quality_gate_log):
label, count = m.group(1), int(m.group(2))
short = kind_short.get(label)
if short:
kind_counts[short] = count
if kind_counts:
parts = [f"{v} {k}" for k, v in kind_counts.items()]
quality_gate_detail = ", ".join(parts)
else:
# Fall back to the summary annotation message
qm = re.search(r"::(notice|warning|error) title=Docstring quality::(.+)", quality_gate_log)
quality_gate_detail = re.sub(r"\s*—\s*see job summary.*$", "", qm.group(2)) if qm else ""
CONTRIB_URL = (
"https://github.com/generative-computing/mellea/blob/main"
"/docs/docs/guide/CONTRIBUTING.md"
)
REPO = os.environ.get("GITHUB_REPOSITORY", "")
RUN_ID = os.environ.get("GITHUB_RUN_ID", "")
ARTIFACT_URL = f"https://github.com/{REPO}/actions/runs/{RUN_ID}#artifacts"
lines = [
"## Docs Build — Validation Summary\n",
"| Check | Result | Details |",
"|-------|--------|---------|",
f"| Markdownlint | {icon(markdownlint_outcome)} {markdownlint_outcome} | {lint_detail} |",
f"| MDX Validation | {icon(validate_outcome)} {validate_outcome} | {mdx_detail} |",
f"| API Coverage | {icon(coverage_outcome)} {coverage_outcome} | {cov_detail} |",
f"| Docstring Quality | {icon(quality_gate_outcome)} {quality_gate_outcome} | {quality_gate_detail} |",
]
lines.append("")
# When the quality gate fails, surface a direct link to the fix reference.
# Per-kind Ref: URLs in the log output are inside a ```text``` block and
# don't render as links there.
if quality_gate_outcome == "failure":
lines += [
"> ❌ **Docstring quality gate failed.** "
f"See the [CI docstring checks reference]({CONTRIB_URL}#ci-docstring-checks-reference) "
"for per-kind fix instructions, or expand **Docstring quality details** below for the full list. \n"
f"> The full machine-readable report is available as the [`docstring-quality-report` artifact]({ARTIFACT_URL}).",
"",
]
for title, log, limit in [
("Markdownlint output", lint_log, 5_000),
("MDX validation output", validate_log, 5_000),
("API coverage output", coverage_log, 5_000),
("Docstring quality details", quality_gate_log, 1_000_000),
]:
if log:
lines += [
f"<details><summary>{title}</summary>\n",
"```text",
log[:limit] + (" [truncated]" if len(log) > limit else ""),
"```",
"</details>\n",
]
with open(os.environ["GITHUB_STEP_SUMMARY"], "a") as f:
f.write("\n".join(lines))
PYEOF
env:
STEPS_MARKDOWNLINT_OUTCOME: ${{ steps.markdownlint.outcome }}
STEPS_VALIDATE_MDX_OUTCOME: ${{ steps.validate_mdx.outcome }}
STEPS_AUDIT_COVERAGE_OUTCOME: ${{ steps.audit_coverage.outcome }}
STEPS_QUALITY_GATE_OUTCOME: ${{ steps.quality_gate.outcome }}
# ---------------------------------------------------------------------------
# Deploy to orphan branch
# ---------------------------------------------------------------------------
deploy:
needs: build-and-validate
runs-on: ubuntu-latest
permissions:
contents: write
timeout-minutes: 10
# Deploy on: push to main, release, force_publish via dispatch,
# or PRs labelled "docs-preview" (→ docs/preview branch).
if: >-
github.event_name == 'push' ||
github.event_name == 'release' ||
(github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'docs-preview')) ||
(github.event_name == 'workflow_dispatch' && inputs.force_publish)
steps:
- name: Download docs artifact
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8
with:
name: docs-site
path: docs-site/
- name: Determine target branch
id: target
run: |
if [ "$EVENT_NAME" = "release" ]; then
echo "branch=docs/production" >> "$GITHUB_OUTPUT"
elif [ "$EVENT_NAME" = "pull_request" ]; then
echo "branch=docs/preview" >> "$GITHUB_OUTPUT"
elif [ "$EVENT_NAME" = "workflow_dispatch" ] && [ -n "${INPUTS_TARGET_BRANCH}" ]; then
echo "branch=${INPUTS_TARGET_BRANCH}" >> "$GITHUB_OUTPUT"
else
echo "branch=docs/staging" >> "$GITHUB_OUTPUT"
fi
env:
EVENT_NAME: ${{ github.event_name }}
INPUTS_TARGET_BRANCH: ${{ inputs.target_branch }}
- name: Add DO NOT EDIT warning
run: |
cat > docs-site/_DO_NOT_EDIT.md << 'EOF'
# DO NOT EDIT THIS BRANCH
This branch is **fully automated**. Every file here is generated by
the `docs-publish` GitHub Actions workflow and force-pushed on each run.
**Any manual edits will be overwritten without warning.**
To change documentation:
- Static guides: edit files under `docs/docs/` on `main`
- API reference: improve docstrings in Python source (`mellea/`, `cli/`)
- Pipeline config: see `tooling/docs-autogen/` on `main`
For details, see `docs/PUBLISHING.md` on `main`.
EOF
- name: Deploy to ${{ steps.target.outputs.branch }}
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_branch: ${{ steps.target.outputs.branch }}
publish_dir: docs-site/
force_orphan: true
user_name: "github-actions[bot]"
user_email: "github-actions[bot]@users.noreply.github.com"
commit_message: |
docs: publish from ${{ github.sha }}
Branch: ${{ github.ref_name }}
Trigger: ${{ github.event_name }}${{ github.event.pull_request.number && format(' (PR #{0})', github.event.pull_request.number) || '' }}
Run: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}
- name: Write deploy summary
if: always()
run: |
TARGET="${STEPS_TARGET_OUTPUTS_BRANCH}"
REPO="${GITHUB_REPOSITORY}"
SHA="${GITHUB_SHA}"
if [ "${JOB_STATUS}" = "success" ]; then
STATUS="✅ Deployed"
DETAIL="Published to [\`${TARGET}\`](https://github.com/${REPO}/tree/${TARGET})"
else
STATUS="❌ Failed"
DETAIL="Attempted deploy to \`${TARGET}\`"
fi
cat >> "$GITHUB_STEP_SUMMARY" << EOF
## Docs Deploy — ${STATUS}
| | |
|-|-|
| Branch | \`${TARGET}\` |
| Source | \`${SHA:0:7}\` |
| Trigger | ${EVENT_NAME}${PR_SUFFIX} |
${DETAIL}
EOF
env:
STEPS_TARGET_OUTPUTS_BRANCH: ${{ steps.target.outputs.branch }}
JOB_STATUS: ${{ job.status }}
EVENT_NAME: ${{ github.event_name }}
PR_SUFFIX: ${{ github.event.pull_request.number && format(' (PR #{0})', github.event.pull_request.number) || '' }}