From a04a8c935bc76a8166e55de3491279ef79eb2564 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20J=2E=20Arg=C3=BCello?= <12516370+cjarguello@users.noreply.github.com> Date: Fri, 10 Apr 2026 15:35:21 -0600 Subject: [PATCH 1/5] chore: sync generated policy packet routing for truthfulness canon --- .agents/policy/taylored-policy.md | 18 ++++++++++++++++-- AGENTS.md | 3 ++- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/.agents/policy/taylored-policy.md b/.agents/policy/taylored-policy.md index 8c8e2f1..cfad6cc 100644 --- a/.agents/policy/taylored-policy.md +++ b/.agents/policy/taylored-policy.md @@ -40,6 +40,7 @@ CANONICAL POLICY SURFACES: - `bitpod-docs/policies/taylored-policy.md` = canonical global work-policy contract - `bitpod-docs/policies/taylored-policy-rules.md` = canonical prohibition list - `bitpod-docs/policies/policy-registry.toml` = canonical authority-status registry +- `bitpod-docs/policies/truthfulness-policy.md` = canonical 3-tier truthfulness policy entrypoint ACTIVE ROOT SURFACES: @@ -80,8 +81,9 @@ For local umbrella-root sessions: 1. root `AGENTS.md` 2. `bitpod-docs/policies/taylored-policy.md` 3. `bitpod-docs/policies/taylored-policy-rules.md` -4. `bitpod-docs/policies/truthfulness-and-verification-policy.md` -5. `bitpod-docs/policies/file-creation-and-artifact-placement-policy.md` +4. `bitpod-docs/policies/truthfulness-policy.md` +5. `bitpod-docs/policies/truthfulness-and-verification-policy.md` +6. `bitpod-docs/policies/file-creation-and-artifact-placement-policy.md` AUTHORITY MAP: @@ -196,9 +198,21 @@ ROOT REFERENCES: - `$WORKSPACE/bitpod-docs/policies/taylored-policy-rules.md` - `$WORKSPACE/bitpod-docs/policies/policy-registry.toml` - `$WORKSPACE/bitpod-docs/process/read-first-protocol.md` +- `$WORKSPACE/bitpod-docs/policies/truthfulness-policy.md` - `$WORKSPACE/bitpod-docs/policies/truthfulness-and-verification-policy.md` - `$WORKSPACE/bitpod-docs/process/codex-global-policy-packet-contract.md` +TRUTHFULNESS POLICY: + +- the canonical truthfulness family entrypoint is `$WORKSPACE/bitpod-docs/policies/truthfulness-policy.md` +- truthfulness is governed as a 3-tier lifecycle: + 1. `Verification Protocol` = always-on claim discipline to reduce unjustified certainty + 2. `Truth Recovery Protocol` = incident-mode direct truth surfacing once a prior claim is materially undermined + 3. `Trust Recovery Protocol` = post-incident corrective action, operator-inclusive learning, and protocol hardening +- agents must follow the active tier and required handoff rules +- do not compress verification, truth recovery, and trust recovery into one undifferentiated behavior surface +- this add-on truthfulness-policy lane does not replace or rewrite the earlier Taylor01 runtime/mind architecture lane + TRUTHFULNESS DISCLOSURE RULE: - if data, context, memory, or other information was lost, the loss must be stated directly when relevant or when asked diff --git a/AGENTS.md b/AGENTS.md index 19f6012..590ab47 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -3,7 +3,8 @@ Read these first for global policy: 1. `.agents/policy/taylored-policy.md` 2. `.agents/policy/taylored-policy-rules.md` -3. repo-specific instructions below this generated block +3. truthfulness family canon: `bitpod-docs/policies/truthfulness-policy.md` +4. repo-specific instructions below this generated block Canonical source lives in `bitpod-docs`. Do not edit this generated block by hand. From 985c79a2ff2e13d921509b6f203123224296ba10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20J=2E=20Arg=C3=BCello?= <12516370+cjarguello@users.noreply.github.com> Date: Sat, 11 Apr 2026 19:23:02 -0600 Subject: [PATCH 2/5] fix(gpt-bridge): add /health endpoint and use it for status checks --- gpt_bridge/bridge_ctl.sh | 14 ++++++++------ gpt_bridge/gpt_bridge.py | 13 +++++++++++++ 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/gpt_bridge/bridge_ctl.sh b/gpt_bridge/bridge_ctl.sh index a9f0b9d..115c8c0 100755 --- a/gpt_bridge/bridge_ctl.sh +++ b/gpt_bridge/bridge_ctl.sh @@ -52,12 +52,14 @@ while [[ $# -gt 0 ]]; do done check_bridge() { - local code - code="$(curl -s -m 2 -o /dev/null -w "%{http_code}" \ - -X POST "$BRIDGE_URL" \ - -H "Content-Type: application/json" \ - -d '{"task_type":"general","message":"ping","context":[],"constraints":{"json_only":true,"max_tokens":10},"meta":{}}' 2>/dev/null || true)" - [[ "$code" != "000" ]] + local health_url code + if [[ "$BRIDGE_URL" == */ask ]]; then + health_url="${BRIDGE_URL%/ask}/health" + else + health_url="${BRIDGE_URL%/}/health" + fi + code="$(curl -s -m 2 -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || true)" + [[ "$code" == "200" ]] } bridge_host() { diff --git a/gpt_bridge/gpt_bridge.py b/gpt_bridge/gpt_bridge.py index ab1a9d1..6940796 100644 --- a/gpt_bridge/gpt_bridge.py +++ b/gpt_bridge/gpt_bridge.py @@ -261,6 +261,19 @@ def do_POST(self) -> None: ) self._write_json(HTTPStatus.OK, response_payload) + def do_GET(self) -> None: + if self.path == "/health": + self._write_json( + HTTPStatus.OK, + { + "status": "ok", + "service": "gpt-bridge", + "timestamp": datetime.now(timezone.utc).isoformat(), + }, + ) + return + self._write_json(HTTPStatus.NOT_FOUND, {"error": "Not found"}) + def resolve_model(gpt_request: GPTRequest, config: BridgeConfig) -> str: override_model = gpt_request.constraints.get("model") From fc1219afa2ea582f7562824d352d058c8b353542 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20J=2E=20Arg=C3=BCello?= <12516370+cjarguello@users.noreply.github.com> Date: Thu, 16 Apr 2026 17:11:27 -0600 Subject: [PATCH 3/5] feat(cleanup): unify ad hoc + weekly trash-to-purge output contract --- audit_ctl.sh | 281 ++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 265 insertions(+), 16 deletions(-) diff --git a/audit_ctl.sh b/audit_ctl.sh index 9ee544f..b967fb4 100755 --- a/audit_ctl.sh +++ b/audit_ctl.sh @@ -105,6 +105,14 @@ has_phrase() { [[ "$haystack" == *"$needle"* ]] } +has_tier_language() { + local q="$1" + has_phrase "$q" "t1" || has_phrase "$q" "t2" || has_phrase "$q" "t3" || \ + has_phrase "$q" "tier 1" || has_phrase "$q" "tier 2" || has_phrase "$q" "tier 3" || \ + has_phrase "$q" "v1" || has_phrase "$q" "v2" || has_phrase "$q" "v3" || \ + has_phrase "$q" "quick" || has_phrase "$q" "medium" || has_phrase "$q" "full" +} + print_header() { echo "Audit Control | $1" echo "Timestamp: $NOW" @@ -115,6 +123,15 @@ print_section() { echo "$1" } +cleanup_trash_title() { + local mode="${1:-adhoc}" + if [[ "$mode" == "weekly" ]]; then + echo "Weekly Local Trash-to-Purge Cleanup" + else + echo "Local Trash-to-Purge Cleanup" + fi +} + render_bool() { if [[ "$1" -eq 1 ]]; then echo "YES" @@ -1254,7 +1271,7 @@ emit_workspace_hygiene_section() { while IFS='|' read -r rel_path file_count dir_count stale_files has_git action; do [[ -z "$rel_path" ]] && continue [[ "$action" == "review_for_local_purge" ]] || continue - echo "- trash_bucket=$rel_path action=$action files=$file_count dirs=$dir_count stale_files=$stale_files has_git=$has_git" + echo "- trash_bucket=$rel_path action=$action files=$file_count directories=$dir_count stale_files=$stale_files has_git=$has_git" done < "$TRASH_BUCKET_ROWS_FILE" } @@ -1296,7 +1313,7 @@ emit_cleanup_plan_section() { echo "- os_trash_permission_enabled=$( [[ "$LOCAL_PURGE_OS_TRASH_ALLOWED" == "1" ]] && echo YES || echo NO )" while IFS='|' read -r finding_id scope action path destination reason policy_rule file_count dir_count execution_allowed requires_tier requires_permission; do [[ -z "$finding_id" ]] && continue - echo "- finding_id=$finding_id scope=$scope action=$action path=$path destination=${destination:-none} reason=\"$reason\" policy_rule=$policy_rule files=$file_count dirs=$dir_count execution_allowed=$execution_allowed requires_tier=$requires_tier requires_permission=$requires_permission" + echo "- finding_id=$finding_id scope=$scope action=$action path=$path destination=${destination:-none} reason=\"$reason\" policy_rule=$policy_rule files=$file_count directories=$dir_count execution_allowed=$execution_allowed requires_tier=$requires_tier requires_permission=$requires_permission" done < "$CLEANUP_PLAN_ROWS_FILE" } @@ -1581,28 +1598,21 @@ run_cleanup_auto() { local include_local_workspace="$1" local force_full="$2" - local t1_status=0 - if run_cleanup_tier "T1" "$include_local_workspace" 0 >/dev/null 2>&1; then - t1_status=0 - else - t1_status=$? - fi - if [[ "$t1_status" -eq 0 && "$force_full" -eq 0 ]]; then + # Run T1 first and only escalate when quick gate says local-workspace is not clean enough. + run_cleanup_tier "T1" "$include_local_workspace" 0 >/dev/null 2>&1 || true + if [[ "$force_full" -eq 0 && "$(quick_gate_status)" == "STOP_OK" ]]; then run_cleanup_tier "T1" "$include_local_workspace" 0 return 0 fi - local t2_status=0 - if run_cleanup_tier "T2" "$include_local_workspace" 0 >/dev/null 2>&1; then - t2_status=0 - else - t2_status=$? - fi - if [[ "$t2_status" -eq 0 && "$force_full" -eq 0 ]]; then + # Escalate to T2 only when T1 quick gate failed, then stop if medium gate is good. + run_cleanup_tier "T2" "$include_local_workspace" 0 >/dev/null 2>&1 || true + if [[ "$force_full" -eq 0 && "$(medium_gate_status)" == "STOP_OK" ]]; then run_cleanup_tier "T2" "$include_local_workspace" 0 return 0 fi + # Escalate to T3 only when lower tiers still signal unresolved findings, or force_full is requested. run_cleanup_tier "T3" "$include_local_workspace" 0 } @@ -1619,6 +1629,228 @@ run_parity_pulse() { emit_pulse_report "$require_fresh" "$event_name" } +run_cleanup_trash_execute() { + local mode="${1:-adhoc}" + local lane_title + lane_title="$(cleanup_trash_title "$mode")" + local src_root="$ROOT/local-workspace/local-trash-delete/local-working-files" + local dst_root="$ROOT/local-workspace/local-trash-delete/local-purge/local-working-files" + local cutoff_epoch + cutoff_epoch="$(python3 - <<'PY' +from datetime import datetime, timezone, timedelta +print(int((datetime.now(timezone.utc) - timedelta(days=30)).timestamp())) +PY +)" + + print_header "$lane_title" + + if [[ ! -d "$src_root" ]]; then + print_section "Result" + echo "- scope=local-trash-delete/local-working-files -> local-trash-delete/local-purge/local-working-files" + echo "- status=NO-OP" + echo "- detail=source path missing: $src_root" + return 0 + fi + + mkdir -p "$dst_root" + + local tmp_files="$TMP_DIR/cleanup_trash_eligible_files.txt" + local tmp_dirs="$TMP_DIR/cleanup_trash_eligible_directories.txt" + local tmp_effective_dirs="$TMP_DIR/cleanup_trash_effective_directories.txt" + local tmp_effective_files="$TMP_DIR/cleanup_trash_effective_files.txt" + local tmp_skipped="$TMP_DIR/cleanup_trash_skipped.txt" + local tmp_moved="$TMP_DIR/cleanup_trash_moved.txt" + : > "$tmp_files" + : > "$tmp_dirs" + : > "$tmp_effective_dirs" + : > "$tmp_effective_files" + : > "$tmp_skipped" + : > "$tmp_moved" + + # Phase 1: collect eligible files/directories older than 30 days. + while IFS= read -r -d '' f; do + [[ -z "$f" ]] && continue + local mtime + mtime="$(stat -f %m "$f" 2>/dev/null || true)" + [[ -z "$mtime" ]] && continue + if [[ "$mtime" -lt "$cutoff_epoch" ]]; then + printf '%s\n' "$f" >> "$tmp_files" + fi + done < <(find "$src_root" -type f -print0 2>/dev/null) + + while IFS= read -r -d '' d; do + [[ -z "$d" ]] && continue + [[ "$d" == "$src_root" ]] && continue + local mtime + mtime="$(stat -f %m "$d" 2>/dev/null || true)" + [[ -z "$mtime" ]] && continue + if [[ "$mtime" -lt "$cutoff_epoch" ]]; then + printf '%s\n' "$d" >> "$tmp_dirs" + fi + done < <(find "$src_root" -mindepth 1 -type d -print0 2>/dev/null) + + # Phase 2: build effective directory set (no nested double-move roots). + sort -r "$tmp_dirs" | while IFS= read -r d; do + [[ -z "$d" ]] && continue + local skip=0 + while IFS= read -r already; do + [[ -z "$already" ]] && continue + if [[ "$d" == "$already"*"/"* ]]; then + skip=1 + break + fi + done < "$tmp_effective_dirs" + [[ "$skip" -eq 1 ]] && continue + printf '%s\n' "$d" >> "$tmp_effective_dirs" + done + + # Phase 3: effective files = eligible files not under an effective moved directory. + while IFS= read -r f; do + [[ -z "$f" ]] && continue + local covered=0 + while IFS= read -r d; do + [[ -z "$d" ]] && continue + if [[ "$f" == "$d/"* ]]; then + covered=1 + break + fi + done < "$tmp_effective_dirs" + [[ "$covered" -eq 1 ]] && continue + printf '%s\n' "$f" >> "$tmp_effective_files" + done < "$tmp_files" + + local before_files before_directories before_items + before_files="$(wc -l < "$tmp_effective_files" | tr -d ' ')" + before_directories="$(wc -l < "$tmp_effective_dirs" | tr -d ' ')" + before_items=$((before_files + before_directories)) + + # Execute directories first. + while IFS= read -r d; do + [[ -z "$d" ]] && continue + local rel="${d#"$src_root"/}" + local dest="$dst_root/$rel" + if [[ -e "$dest" ]]; then + printf 'path=%s reason=%s\n' "$d" "destination_conflict_rename_prohibited" >> "$tmp_skipped" + continue + fi + mkdir -p "$(dirname "$dest")" + if mv "$d" "$dest" 2>/dev/null; then + printf 'from=%s to=%s type=directory\n' "$d" "$dest" >> "$tmp_moved" + else + printf 'path=%s reason=%s\n' "$d" "move_error" >> "$tmp_skipped" + fi + done < "$tmp_effective_dirs" + + # Execute files. + while IFS= read -r f; do + [[ -z "$f" ]] && continue + local rel="${f#"$src_root"/}" + local dest="$dst_root/$rel" + if [[ -e "$dest" ]]; then + printf 'path=%s reason=%s\n' "$f" "destination_conflict_rename_prohibited" >> "$tmp_skipped" + continue + fi + mkdir -p "$(dirname "$dest")" + if mv "$f" "$dest" 2>/dev/null; then + printf 'from=%s to=%s type=file\n' "$f" "$dest" >> "$tmp_moved" + else + printf 'path=%s reason=%s\n' "$f" "move_error" >> "$tmp_skipped" + fi + done < "$tmp_effective_files" + + # Recompute effective post-execution eligible counts. + : > "$tmp_files" + : > "$tmp_dirs" + : > "$tmp_effective_dirs" + : > "$tmp_effective_files" + + while IFS= read -r -d '' f; do + [[ -z "$f" ]] && continue + local mtime + mtime="$(stat -f %m "$f" 2>/dev/null || true)" + [[ -z "$mtime" ]] && continue + if [[ "$mtime" -lt "$cutoff_epoch" ]]; then + printf '%s\n' "$f" >> "$tmp_files" + fi + done < <(find "$src_root" -type f -print0 2>/dev/null) + + while IFS= read -r -d '' d; do + [[ -z "$d" ]] && continue + [[ "$d" == "$src_root" ]] && continue + local mtime + mtime="$(stat -f %m "$d" 2>/dev/null || true)" + [[ -z "$mtime" ]] && continue + if [[ "$mtime" -lt "$cutoff_epoch" ]]; then + printf '%s\n' "$d" >> "$tmp_dirs" + fi + done < <(find "$src_root" -mindepth 1 -type d -print0 2>/dev/null) + + sort -r "$tmp_dirs" | while IFS= read -r d; do + [[ -z "$d" ]] && continue + local skip=0 + while IFS= read -r already; do + [[ -z "$already" ]] && continue + if [[ "$d" == "$already"*"/"* ]]; then + skip=1 + break + fi + done < "$tmp_effective_dirs" + [[ "$skip" -eq 1 ]] && continue + printf '%s\n' "$d" >> "$tmp_effective_dirs" + done + + while IFS= read -r f; do + [[ -z "$f" ]] && continue + local covered=0 + while IFS= read -r d; do + [[ -z "$d" ]] && continue + if [[ "$f" == "$d/"* ]]; then + covered=1 + break + fi + done < "$tmp_effective_dirs" + [[ "$covered" -eq 1 ]] && continue + printf '%s\n' "$f" >> "$tmp_effective_files" + done < "$tmp_files" + + local after_files after_directories after_items moved_files moved_directories moved_items skipped_count rename_count final_status + after_files="$(wc -l < "$tmp_effective_files" | tr -d ' ')" + after_directories="$(wc -l < "$tmp_effective_dirs" | tr -d ' ')" + after_items=$((after_files + after_directories)) + moved_files="$(grep -c 'type=file' "$tmp_moved" || true)" + moved_directories="$(grep -c 'type=directory' "$tmp_moved" || true)" + moved_items=$((moved_files + moved_directories)) + skipped_count="$(wc -l < "$tmp_skipped" | tr -d ' ')" + rename_count=0 + final_status="no-op" + if [[ "$moved_items" -gt 0 ]]; then + final_status="mutated" + fi + + print_section "Before $lane_title" + echo "- eligible_files_count=$before_files" + echo "- eligible_directories_count=$before_directories" + + print_section "After Execution" + echo "- eligible_files_count=$after_files" + echo "- eligible_directories_count=$after_directories" + echo "- final_status=$final_status" + echo "- moved_files_count=$moved_files" + echo "- moved_directories_count=$moved_directories" + echo "- skipped_count=$skipped_count" + if [[ "$skipped_count" -gt 0 ]]; then + local skipped_reasons + skipped_reasons="$(awk -F'reason=' '{print $2}' "$tmp_skipped" | sort | uniq | paste -sd ';' -)" + echo "- skipped_reasons=${skipped_reasons:-unknown}" + else + echo "- skipped_reasons=none" + fi + echo "- rename_count=$rename_count" + if [[ "$rename_count" -gt 0 ]]; then + echo "- BUG=rename_count_should_be_zero" + fi +} + main() { local raw="${*:-run audit}" local q @@ -1637,6 +1869,23 @@ main() { exit 0 fi + # Explicit ad hoc trash cleanup lane: + # if command includes both cleanup + trash and no tier-language tokens, + # run the narrow trash->purge execution path (always execute, never report-only). + if has_phrase "$q" "__cleanup_trash_weekly__"; then + run_cleanup_trash_execute "weekly" + exit 0 + fi + + if has_phrase "$q" "cleanup" && has_phrase "$q" "trash" && ! has_tier_language "$q"; then + if has_phrase "$q" "weekly"; then + run_cleanup_trash_execute "weekly" + else + run_cleanup_trash_execute "adhoc" + fi + exit 0 + fi + local include_local_workspace=1 if has_phrase "$q" "no local workspace" || has_phrase "$q" "only repos" || has_phrase "$q" "repo only" || has_phrase "$q" "repos only"; then include_local_workspace=0 From 2076b0b24a5db9ddbc929a5cf0d870168ea31cb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20J=2E=20Arg=C3=BCello?= <12516370+cjarguello@users.noreply.github.com> Date: Thu, 16 Apr 2026 17:22:36 -0600 Subject: [PATCH 4/5] feat(cleanup): allow collision-safe versioned rename in trash sweep --- audit_ctl.sh | 60 +++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 50 insertions(+), 10 deletions(-) diff --git a/audit_ctl.sh b/audit_ctl.sh index b967fb4..99f3187 100755 --- a/audit_ctl.sh +++ b/audit_ctl.sh @@ -132,6 +132,39 @@ cleanup_trash_title() { fi } +versioned_destination_path() { + local desired="$1" + local item_type="${2:-file}" + local parent base stem ext candidate + local n=1 + + if [[ ! -e "$desired" ]]; then + echo "$desired" + return 0 + fi + + parent="$(dirname "$desired")" + base="$(basename "$desired")" + ext="" + stem="$base" + + if [[ "$item_type" == "file" && "$base" == *.* && "$base" != .* ]]; then + stem="${base%.*}" + ext=".${base##*.}" + fi + + while [[ "$n" -le 5000 ]]; do + candidate="$parent/${stem} (${n})${ext}" + if [[ ! -e "$candidate" ]]; then + echo "$candidate" + return 0 + fi + n=$((n + 1)) + done + + return 1 +} + render_bool() { if [[ "$1" -eq 1 ]]; then echo "YES" @@ -1730,8 +1763,13 @@ PY local rel="${d#"$src_root"/}" local dest="$dst_root/$rel" if [[ -e "$dest" ]]; then - printf 'path=%s reason=%s\n' "$d" "destination_conflict_rename_prohibited" >> "$tmp_skipped" - continue + local versioned_dest + if versioned_dest="$(versioned_destination_path "$dest" "directory")"; then + dest="$versioned_dest" + else + printf 'path=%s reason=%s\n' "$d" "destination_conflict_rename_resolution_failed" >> "$tmp_skipped" + continue + fi fi mkdir -p "$(dirname "$dest")" if mv "$d" "$dest" 2>/dev/null; then @@ -1747,8 +1785,13 @@ PY local rel="${f#"$src_root"/}" local dest="$dst_root/$rel" if [[ -e "$dest" ]]; then - printf 'path=%s reason=%s\n' "$f" "destination_conflict_rename_prohibited" >> "$tmp_skipped" - continue + local versioned_dest + if versioned_dest="$(versioned_destination_path "$dest" "file")"; then + dest="$versioned_dest" + else + printf 'path=%s reason=%s\n' "$f" "destination_conflict_rename_resolution_failed" >> "$tmp_skipped" + continue + fi fi mkdir -p "$(dirname "$dest")" if mv "$f" "$dest" 2>/dev/null; then @@ -1813,7 +1856,7 @@ PY printf '%s\n' "$f" >> "$tmp_effective_files" done < "$tmp_files" - local after_files after_directories after_items moved_files moved_directories moved_items skipped_count rename_count final_status + local after_files after_directories after_items moved_files moved_directories moved_items skipped_count collision_rename_count final_status after_files="$(wc -l < "$tmp_effective_files" | tr -d ' ')" after_directories="$(wc -l < "$tmp_effective_dirs" | tr -d ' ')" after_items=$((after_files + after_directories)) @@ -1821,7 +1864,7 @@ PY moved_directories="$(grep -c 'type=directory' "$tmp_moved" || true)" moved_items=$((moved_files + moved_directories)) skipped_count="$(wc -l < "$tmp_skipped" | tr -d ' ')" - rename_count=0 + collision_rename_count="$(awk -F'to=' '/^from=/{print $2}' "$tmp_moved" | awk -F'/' '{print $NF}' | grep -Ec ' \([0-9]+\)(\.[^./]+)?$' || true)" final_status="no-op" if [[ "$moved_items" -gt 0 ]]; then final_status="mutated" @@ -1845,10 +1888,7 @@ PY else echo "- skipped_reasons=none" fi - echo "- rename_count=$rename_count" - if [[ "$rename_count" -gt 0 ]]; then - echo "- BUG=rename_count_should_be_zero" - fi + echo "- collision_rename_count=$collision_rename_count" } main() { From 607253ed39c92d67c4f0cd874ab3a50effdfc0f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20J=2E=20Arg=C3=BCello?= <12516370+cjarguello@users.noreply.github.com> Date: Sat, 18 Apr 2026 00:30:31 -0600 Subject: [PATCH 5/5] Publish Vera and Linear cleanup updates --- gpt_bridge/README.md | 19 + gpt_bridge/bridge_chat.py | 28 +- gpt_bridge/tests/test_bridge_chat.py | 25 + gpt_bridge/tests/test_vera_qa.py | 144 +++++ gpt_bridge/vera_qa.py | 18 + linear/README.md | 17 +- ...amily_linear_hygiene_cleanup_2026-04-16.md | 79 +++ ..._blocker_cleanup_proposal_v1_2026-04-15.md | 191 +++++++ ...near_automation_alignment_v1_2026-04-16.md | 79 +++ ..._project_status_inventory_v1_2026-04-16.md | 55 ++ ...rkspace_project_templates_v1_2026-04-16.md | 58 +++ .../bit35_clean_thread_handoff_2026-04-16.md | 67 +++ ...l_tower_validation_packet_v1_2026-04-15.md | 183 +++++++ ...live_admin_go_no_go_decision_2026-04-16.md | 75 +++ ...admin_execution_checklist_v1_2026-04-15.md | 334 ++++++++++++ ...live_admin_execution_handoff_2026-04-16.md | 117 +++++ .../process/linear_admin_change_control_v1.md | 3 + .../linear_change_proposal_template_v1.md | 6 + .../linear_operating_guide_changelog.md | 8 + .../docs/process/linear_operating_guide_v3.md | 39 +- .../docs/process/linear_operating_model_v1.md | 104 +++- ...ear_process_cj_intent_packet_2026-04-15.md | 221 ++++++++ ...ontrol_tower_change_proposal_2026-04-15.md | 96 ++++ .../vera_linear_pr_review_prompt_v1.md | 12 +- linear/src/engine.py | 157 ++++-- linear/src/runtime.py | 1 + linear/src/simulate_e2e.py | 3 +- linear/tests/test_e2e_flow.py | 1 + linear/tests/test_engine.py | 63 ++- scripts/enforce_local_cleanup_retention.sh | 196 +++++++ scripts/run_scheduled_cleanup_audit.sh | 56 ++ tools/taylor01/README.md | 7 +- tools/taylor01/adapters/README.md | 5 +- .../adapters/openai/vera/bridge_runtime.py | 490 ++++++++++++++++++ .../adapters/openai/vera/pr_review.py | 244 +++++++++ .../adapters/openclaw/vera/ADAPTER.md | 26 + .../taylor01/adapters/openclaw/vera/README.md | 22 + tools/taylor01/core/README.md | 2 + tools/taylor01/core/agents/vera/AGENTS.md | 33 ++ tools/taylor01/core/agents/vera/IDENTITY.md | 39 ++ .../core/agents/vera/OUTPUT_CONTRACT.md | 45 ++ tools/taylor01/core/agents/vera/SECRETS.md | 17 + tools/taylor01/core/agents/vera/SOUL.md | 23 + tools/taylor01/core/agents/vera/contract.py | 158 ++++++ vera_agent/openai_vera_pr_review.py | 17 + 45 files changed, 3504 insertions(+), 79 deletions(-) create mode 100644 gpt_bridge/tests/test_vera_qa.py create mode 100644 gpt_bridge/vera_qa.py create mode 100644 linear/docs/process/bit270_rollout_family_linear_hygiene_cleanup_2026-04-16.md create mode 100644 linear/docs/process/bit319_product_development_blocker_cleanup_proposal_v1_2026-04-15.md create mode 100644 linear/docs/process/bit320_github_triggered_linear_automation_alignment_v1_2026-04-16.md create mode 100644 linear/docs/process/bit322_workspace_project_status_inventory_v1_2026-04-16.md create mode 100644 linear/docs/process/bit323_workspace_project_templates_v1_2026-04-16.md create mode 100644 linear/docs/process/bit35_clean_thread_handoff_2026-04-16.md create mode 100644 linear/docs/process/bit35_control_tower_validation_packet_v1_2026-04-15.md create mode 100644 linear/docs/process/bit35_live_admin_go_no_go_decision_2026-04-16.md create mode 100644 linear/docs/process/bit35_product_development_live_admin_execution_checklist_v1_2026-04-15.md create mode 100644 linear/docs/process/bit35_scoped_live_admin_execution_handoff_2026-04-16.md create mode 100644 linear/docs/process/linear_process_cj_intent_packet_2026-04-15.md create mode 100644 linear/docs/process/linear_process_v1_1_control_tower_change_proposal_2026-04-15.md create mode 100755 scripts/enforce_local_cleanup_retention.sh create mode 100644 tools/taylor01/adapters/openai/vera/bridge_runtime.py create mode 100644 tools/taylor01/adapters/openai/vera/pr_review.py create mode 100644 tools/taylor01/adapters/openclaw/vera/ADAPTER.md create mode 100644 tools/taylor01/adapters/openclaw/vera/README.md create mode 100644 tools/taylor01/core/agents/vera/AGENTS.md create mode 100644 tools/taylor01/core/agents/vera/IDENTITY.md create mode 100644 tools/taylor01/core/agents/vera/OUTPUT_CONTRACT.md create mode 100644 tools/taylor01/core/agents/vera/SECRETS.md create mode 100644 tools/taylor01/core/agents/vera/SOUL.md create mode 100644 tools/taylor01/core/agents/vera/contract.py create mode 100644 vera_agent/openai_vera_pr_review.py diff --git a/gpt_bridge/README.md b/gpt_bridge/README.md index 3b11fc8..d3de726 100644 --- a/gpt_bridge/README.md +++ b/gpt_bridge/README.md @@ -11,6 +11,7 @@ HTTP bridge so Codex workflows can call GPT directly (local or remote-managed en - `gpt_bridge_mcp.py`: MCP stdio server exposing a tool that forwards to `/ask` - `bridge_chat.py`: shared chat-log relay (`send`, `post`, `tail`) - `bridge_chat.sh`: shell wrapper for `bridge_chat.py` +- `vera_qa.py`: thin Vera QA runtime that writes `verification_report.md` + `manifest.json` - `bridge_ctl.sh`: bridge lifecycle controls (`start`, `status`, `stop`) - `config.example.env`: env var reference - `logs/`: JSONL request/response logs (`logs/bridge.jsonl`) @@ -57,6 +58,7 @@ CHAT (Codex chat): - `~session ` - `~gpt ` +- `~vera ` - `~sync` (manual pull; most new GPT messages are now auto-pulled on your next chat command) - `~end` @@ -145,6 +147,23 @@ Per-request override: ./ask_gpt.sh --model gpt-5.2-codex "review this patch" ``` +Vera QA defaults to `gpt-5.2` unless overridden: + +```bash +python3 vera_qa.py /path/to/handoff.json --output-dir /tmp/vera-run +``` + +The handoff JSON should include at minimum: + +- `target` or `system_under_test` +- `critical_acceptance_criteria` (array) +- optional `issue_url`, `pr_url`, `commands_or_surfaces`, `known_risks`, `changed_files`, `evidence_paths` + +Outputs: + +- `verification_report.md` +- `manifest.json` + ## Run service ```bash diff --git a/gpt_bridge/bridge_chat.py b/gpt_bridge/bridge_chat.py index b037b2c..c8347a7 100755 --- a/gpt_bridge/bridge_chat.py +++ b/gpt_bridge/bridge_chat.py @@ -31,6 +31,14 @@ "Be direct, concrete, and honest. Keep replies short but useful. Ask at most one clarifying " "question only when it prevents an error." ) +VERA_SYSTEM_PROMPT = ( + "You are Vera, BitPod's QA Specialist. Identity: exceptional QA software engineer, concise, " + "skeptical, evidence-first, blunt-but-fair, minimal small talk, not timid. Primary role: decide " + "QA truthfully and explain verification reasoning clearly. Default posture: regression-first, " + "tries to break features, severity-realistic, proposes only the smallest verifiable next step " + "when blocked. Hard boundaries: no scope changes, no priority decisions, no implementation " + "ownership, no fake pass. If evidence is insufficient, say so directly instead of implying a pass." +) LOCAL_CODEX_SKILLS_ROOT = WORKSPACE_ROOT / "local-workspace" / "local-codex" / "skills" TAYLOR_SKILL_REFERENCES_ROOT = LOCAL_CODEX_SKILLS_ROOT / "taylor" / "references" DEFAULT_TAYLOR_REFERENCE_FILES = ( @@ -39,6 +47,10 @@ TAYLOR_SKILL_REFERENCES_ROOT / "app-mission-vision.md", TAYLOR_SKILL_REFERENCES_ROOT / "key-memories-and-examples.md", ) +DEFAULT_VERA_REFERENCE_FILES = ( + WORKSPACE_ROOT / "bitpod-tools" / "linear" / "docs" / "process" / "vera_qa_lane_contract_v1.md", + WORKSPACE_ROOT / "bitpod-tools" / "linear" / "docs" / "process" / "vera_runtime_minimum_v1.md", +) MAX_REFERENCE_CHARS = 6000 @@ -145,7 +157,7 @@ def _strip_actor_mentions(text: str, actor: str) -> str: def _relay_actor_for_mentions(mentions: list[str]) -> str | None: for mention in mentions: - if mention in {"taylor", "gpt"}: + if mention in {"taylor", "gpt", "vera"}: return mention return None @@ -1084,6 +1096,8 @@ def _send_to_gpt( reference_context = "" if reply_actor == "taylor": reference_context = _load_reference_context(DEFAULT_TAYLOR_REFERENCE_FILES, MAX_REFERENCE_CHARS) + elif reply_actor == "vera": + reference_context = _load_reference_context(DEFAULT_VERA_REFERENCE_FILES, MAX_REFERENCE_CHARS) context_text = "\n\n".join( chunk for chunk in [memory_context, reference_context] if chunk ) or None @@ -1315,11 +1329,15 @@ def run_team(args: argparse.Namespace) -> int: preface_status = ( "Bridge GPT | @taylor mention detected, routing to Taylor..." if relay_actor == "taylor" + else "Bridge GPT | @vera mention detected, routing to Vera..." + if relay_actor == "vera" else "Bridge GPT | @gpt mention detected, relaying..." ) meta_overrides = {"route_actor": relay_actor} if relay_actor == "taylor": meta_overrides["system_prompt"] = TAYLOR_SYSTEM_PROMPT + elif relay_actor == "vera": + meta_overrides["system_prompt"] = VERA_SYSTEM_PROMPT return _send_to_gpt( log_file=log_file, @@ -1396,6 +1414,7 @@ def run_options(args: argparse.Namespace) -> int: print("Bridge GPT | ~codex (@direct message to Codex, quick short replies)") print("Bridge GPT | ~cj (@mentions CJ in team chat, may reply eventually, extremely elaborately)") print("Bridge GPT | ~taylor (@direct message to Taylor in team chat, longer replies)") + print("Bridge GPT | ~vera (@direct message to Vera in team chat, QA voice / evidence-first)") print(f"Bridge GPT | active session: {session}") return 0 @@ -1551,7 +1570,7 @@ def run_chat(args: argparse.Namespace) -> int: show_raw=False, ) ) - if tcmd in {"gpt", "codex", "cj", "taylor", "decide"}: + if tcmd in {"gpt", "codex", "cj", "taylor", "vera", "decide"}: if not trest: print(f"Bridge GPT | Usage: ~{tcmd} ") return 2 @@ -1603,6 +1622,11 @@ def run_chat(args: argparse.Namespace) -> int: if cmd == "/decide": return run_team(_chat_team_args(args, f"@gpt [DECIDE] {rest}")) return run_team(_chat_team_args(args, f"@gpt {rest}")) + if cmd == "/vera": + if not rest: + print("Bridge GPT | Usage: /vera ") + return 2 + return run_team(_chat_team_args(args, f"@vera {rest}")) if cmd == "/codex": if not rest: print("Bridge GPT | Usage: /codex ") diff --git a/gpt_bridge/tests/test_bridge_chat.py b/gpt_bridge/tests/test_bridge_chat.py index e5e18c8..8462b40 100644 --- a/gpt_bridge/tests/test_bridge_chat.py +++ b/gpt_bridge/tests/test_bridge_chat.py @@ -83,5 +83,30 @@ def _fake_run_ask_once(**kwargs: object) -> dict[str, object]: self.assertNotIn("system_prompt", captured["meta"]) self.assertIn("gpt: Generic GPT reply.", stdout.getvalue()) + def test_vera_mention_routes_with_vera_persona(self): + captured: dict[str, object] = {} + vera_ref = Path(self.tmpdir.name) / "vera-reference.md" + vera_ref.write_text("Vera QA contract context", encoding="utf-8") + + def _fake_run_ask_once(**kwargs: object) -> dict[str, object]: + captured.update(kwargs) + return {"answer": {"json": {"reply": "QA says not ready yet."}}} + + stdout = io.StringIO() + with ( + patch.object(bridge_chat, "DEFAULT_VERA_REFERENCE_FILES", (vera_ref,)), + patch.object(bridge_chat, "_run_ask_once", side_effect=_fake_run_ask_once), + patch.object(bridge_chat, "_set_active_session", return_value=None), + contextlib.redirect_stdout(stdout), + ): + rc = bridge_chat.run_team(self._args("@vera check this handoff")) + + self.assertEqual(rc, 0) + self.assertEqual(captured["message"], "check this handoff") + self.assertEqual(captured["meta"]["route_actor"], "vera") + self.assertIn("You are Vera, BitPod's QA Specialist", captured["meta"]["system_prompt"]) + self.assertIn("[Reference: vera-reference.md]", captured["context_text"]) + self.assertIn("vera: QA says not ready yet.", stdout.getvalue()) + if __name__ == "__main__": unittest.main() diff --git a/gpt_bridge/tests/test_vera_qa.py b/gpt_bridge/tests/test_vera_qa.py new file mode 100644 index 0000000..1027343 --- /dev/null +++ b/gpt_bridge/tests/test_vera_qa.py @@ -0,0 +1,144 @@ +from __future__ import annotations + +import json +import sys +import tempfile +import unittest +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[2])) + +import gpt_bridge.vera_qa as vera_qa # noqa: E402 +from tools.taylor01.core.agents.vera.contract import ( # noqa: E402 + PortableReviewResult, + PortableReviewTarget, + build_manifest, + render_verification_report, +) + + +class VeraPortableContractTests(unittest.TestCase): + def setUp(self) -> None: + self.tmpdir = tempfile.TemporaryDirectory() + self.root = Path(self.tmpdir.name) + + def tearDown(self) -> None: + self.tmpdir.cleanup() + + def _write_handoff(self, payload: dict[str, object]) -> Path: + path = self.root / 'handoff.json' + path.write_text(json.dumps(payload), encoding='utf-8') + return path + + def test_load_handoff_resolves_relative_evidence_paths(self) -> None: + evidence = self.root / 'notes.txt' + evidence.write_text('sample evidence', encoding='utf-8') + handoff_path = self._write_handoff( + { + 'system_under_test': 'vera-runner', + 'critical_acceptance_criteria': ['AC text'], + 'evidence_paths': ['notes.txt'], + } + ) + + handoff = vera_qa.load_handoff(handoff_path, []) + + self.assertEqual(handoff.system_under_test, 'vera-runner') + self.assertEqual(handoff.evidence_paths, (evidence.resolve(),)) + self.assertEqual(handoff.critical_acceptance_criteria[0].criterion_id, 'AC-1') + + def test_detect_review_risk_flags_high_risk_files(self) -> None: + risk = vera_qa.detect_review_risk(('src/auth/session.py', 'payments/stripe_checkout.ts', 'README.md')) + self.assertTrue(risk['high_risk']) + self.assertIn('auth_permissions', risk['patterns_matched']) + self.assertIn('billing_payments', risk['patterns_matched']) + self.assertIn('src/auth/session.py', risk['files_matched']) + + def test_normalize_model_result_downgrades_missing_criteria_to_no_verdict(self) -> None: + handoff_path = self._write_handoff( + { + 'system_under_test': 'vera-runner', + 'critical_acceptance_criteria': [ + {'id': 'AC-1', 'text': 'First'}, + {'id': 'AC-2', 'text': 'Second'}, + ], + } + ) + handoff = vera_qa.load_handoff(handoff_path, []) + + result = vera_qa.normalize_model_result( + { + 'overall_verdict': 'PASSED', + 'summary': 'All good', + 'criteria_results': [{'id': 'AC-1', 'result': 'PASS', 'steps': ['one'], 'observed': 'ok'}], + }, + handoff, + ) + + self.assertEqual(result['overall_verdict'], 'NO_VERDICT') + self.assertEqual(result['criteria_results'][1]['result'], 'NO_VERDICT') + + def test_portable_artifacts_preserve_pass_case(self) -> None: + result = PortableReviewResult( + verdict='PASSED', + scope=['PR: https://github.com/example/repo/pull/1'], + evidence=['AC-1 PASS: observed expected behavior'], + checks_run=['PR metadata inspection'], + findings=['No blockers found.'], + open_questions=[], + recommendation='Proceed with the normal review flow.', + notes=['pass case proof'], + ) + report = render_verification_report(result) + manifest = build_manifest( + PortableReviewTarget(target_type='pr', target_ref='https://github.com/example/repo/pull/1', repository='example/repo', branch='feature/pass'), + result, + ) + self.assertIn('## Verdict', report) + self.assertIn('- PASSED', report) + self.assertEqual(manifest['review']['verdict'], 'PASSED') + self.assertEqual(manifest['artifacts']['verificationReport'], 'verification_report.md') + + def test_portable_artifacts_preserve_fail_case(self) -> None: + result = PortableReviewResult( + verdict='FAILED', + scope=['PR: https://github.com/example/repo/pull/2'], + evidence=['AC-1 FAIL: expected safe auth; actual bypass'], + checks_run=['Diff inspection'], + findings=['Authentication bypass is a blocker.'], + open_questions=[], + recommendation='Fix the auth regression and rerun Vera.', + notes=['fail case proof'], + ) + report = render_verification_report(result) + manifest = build_manifest( + PortableReviewTarget(target_type='pr', target_ref='https://github.com/example/repo/pull/2', repository='example/repo', branch='feature/fail'), + result, + ) + self.assertIn('- FAILED', report) + self.assertEqual(manifest['review']['verdict'], 'FAILED') + self.assertIn('Authentication bypass is a blocker.', report) + + def test_portable_artifacts_preserve_no_verdict_case(self) -> None: + result = PortableReviewResult( + verdict='NO_VERDICT', + scope=['PR: https://github.com/example/repo/pull/3'], + evidence=['AC-1 NO_VERDICT: logs were missing'], + checks_run=['PR metadata inspection'], + findings=['Insufficient evidence for a truthful pass/fail verdict.'], + open_questions=['Provide the missing test logs.'], + recommendation='Provide stronger evidence and rerun Vera.', + notes=['no-verdict case proof'], + ) + report = render_verification_report(result) + manifest = build_manifest( + PortableReviewTarget(target_type='pr', target_ref='https://github.com/example/repo/pull/3', repository='example/repo', branch='feature/no-verdict'), + result, + ) + self.assertIn('- NO_VERDICT', report) + self.assertEqual(manifest['review']['verdict'], 'NO_VERDICT') + self.assertEqual(manifest['openQuestions'], ['Provide the missing test logs.']) + + +if __name__ == '__main__': + unittest.main() diff --git a/gpt_bridge/vera_qa.py b/gpt_bridge/vera_qa.py new file mode 100644 index 0000000..dc15495 --- /dev/null +++ b/gpt_bridge/vera_qa.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 +"""Compatibility wrapper for the canonical Vera GPT Bridge adapter.""" + +from __future__ import annotations + +import sys +from pathlib import Path + +REPO_ROOT = Path(__file__).resolve().parents[1] +if str(REPO_ROOT) not in sys.path: + sys.path.insert(0, str(REPO_ROOT)) + +from tools.taylor01.adapters.openai.vera.bridge_runtime import * # noqa: F401,F403 +from tools.taylor01.adapters.openai.vera.bridge_runtime import main + + +if __name__ == '__main__': + raise SystemExit(main()) diff --git a/linear/README.md b/linear/README.md index 0ce5c5f..76511bb 100644 --- a/linear/README.md +++ b/linear/README.md @@ -104,10 +104,11 @@ Implemented in engine/service: - daily aging scan payload handler - Gating behavior: - execution gate (`Issue Type` + exact-one type + estimate + required headings) - - status-first review flow (`In Review` without pending review labels) - - QA gates drive `In Review` -> `Delivered` or `Done` - - PM review labels drive `Delivered` -> `Accepted` or `Done` - - merged PRs fail closed when gates are incomplete + - status-first review flow (`In Review` remains the live review gate name) + - QA gates drive `In Review` -> `Delivered` + - PM review labels drive `Delivered` -> `Accepted` + - merged PRs fail closed when merge-readiness truth is incomplete + - backlog aging drives `Backlog` -> `Icebox 🧊` -> `Stale` - Dry-run default and simulation runner ## Preferred workflow note @@ -117,10 +118,10 @@ The canonical operating model is: - engineering moves work into `In Review` - pending QA is expressed by the status itself - `qa-passed`, `qa-failed`, and `qa-skipped` are result labels only -- acceptance-required work moves from `In Review` to `Delivered` +- review-cleared work moves from `In Review` to `Delivered` - `pm-accepted`, `pm-rejected`, and `pm-skipped` are result labels only -- non-acceptance work can move directly from `In Review` to `Done` -- acceptance-required work moves from `Delivered` to `Accepted`, then to `Done` +- work moves from `Delivered` to `Accepted`, then to `Done` +- merge to `main` only closes work when `Accepted` and the rest of merge-readiness truth is already satisfied ## Status model note (important) @@ -198,7 +199,7 @@ python3 linear/scripts/create_linear_issues_from_seed.py - PR ready for review -> `In Review` - QA comment token parse (`QA_RESULT=PASSED`) -> `Delivered` - PM review signal (`pm-accepted`) -> `Accepted` -- PR merged -> final closure to `Done` plus merge record comment for acceptance-required work +- PR merged -> final closure to `Done` plus merge record comment when merge-readiness truth is satisfied ## Discord operator preflight diff --git a/linear/docs/process/bit270_rollout_family_linear_hygiene_cleanup_2026-04-16.md b/linear/docs/process/bit270_rollout_family_linear_hygiene_cleanup_2026-04-16.md new file mode 100644 index 0000000..38c4424 --- /dev/null +++ b/linear/docs/process/bit270_rollout_family_linear_hygiene_cleanup_2026-04-16.md @@ -0,0 +1,79 @@ +# BIT-270 rollout family Linear hygiene cleanup — 2026-04-16 + +Status: completed retroactive metadata cleanup +Owner: Codex under CJ-directed expedited cleanup pass +Primary parent: [BIT-270 — Coordinate Tailgate/Tailscale-first Mission Control rollout for Taylor01](https://linear.app/bitpod-app/issue/BIT-270/coordinate-tailgatetailscale-first-mission-control-rollout-for) + +## Why this cleanup was needed + +Several rollout-family issues had reached truthful completion in code/runtime reality without equivalent Linear hygiene: + +- missing canonical issue-type labels +- missing estimates on completed subtasks +- stale blocker residue on a completed parent +- missing explicit QA/PM result labels on finalized issues +- utility niceties implemented in code but not attached cleanly to the rollout family + +CJ explicitly allowed expedited retroactive normalization rather than reopening a full QA/PM lane for already-shipped work. + +## Issues normalized + +- BIT-212 +- BIT-270 +- BIT-282 +- BIT-283 +- BIT-303 +- BIT-304 + +## Applied normalization rules + +- preserve truthful completed state when implementation and operator proof already existed +- add exactly one canonical Issue Type label where missing +- add valid estimate where missing +- use `qa-skipped` when no separate QA artifact existed for the cleanup pass +- use `pm-accepted` when CJ retroactively affirmed acceptance +- remove stale blocker residue from completed issues +- attach stray utility niceties to the parent rollout family when the relationship was obvious + +## Concrete outcomes + +### BIT-212 +- kept `Done` +- labels normalized to `⚙️ Chore`, `pm-accepted`, `qa-skipped` + +### BIT-270 +- kept `Done` +- removed stale `needs-other` +- labels normalized to `⭐️ Feature`, `pm-accepted`, `qa-skipped` +- description expanded to include scope-change clarification and subtask review + +### BIT-282 +- kept `Done` +- added `⚙️ Chore` +- added estimate `2` +- added `pm-accepted`, `qa-skipped` + +### BIT-283 +- kept `Done` +- retained estimate `2` +- labels normalized to `⭐️ Feature`, `pm-accepted`, `qa-skipped` + +### BIT-303 +- kept `Done` +- added parent `BIT-270` +- added `⭐️ Feature` +- added estimate `1` +- added `pm-accepted`, `qa-skipped` + +### BIT-304 +- kept `Done` +- added parent `BIT-270` +- added `⭐️ Feature` +- added estimate `1` +- added `pm-accepted`, `qa-skipped` + +## Enforcement follow-up + +The local Linear engine had drifted from the live workspace because its canonical type matcher only recognized plain labels like `Feature` / `Chore`, while the actual workspace uses emoji-prefixed labels like `⭐️ Feature` / `⚙️ Chore`. + +That mismatch was corrected in code during the same cleanup pass so future readiness/merge enforcement matches the real workspace labels. diff --git a/linear/docs/process/bit319_product_development_blocker_cleanup_proposal_v1_2026-04-15.md b/linear/docs/process/bit319_product_development_blocker_cleanup_proposal_v1_2026-04-15.md new file mode 100644 index 0000000..396340c --- /dev/null +++ b/linear/docs/process/bit319_product_development_blocker_cleanup_proposal_v1_2026-04-15.md @@ -0,0 +1,191 @@ +# BIT-319 Product Development Blocker Cleanup Proposal v1 + +Date: 2026-04-15 +Owner: Product Development +Primary issue: [BIT-319 — Product Development blocker cleanup toward native dependencies + minimal blocker taxonomy](https://linear.app/bitpod-app/issue/BIT-319/product-development-blocker-cleanup-toward-native-dependencies-minimal) + +## Objective + +Reduce blocker ambiguity in the Product Development team without causing silent breakage. + +Near-term goal: + +- native Linear dependencies become the default blocker surface for issue-to-issue blocking +- non-ticket blockers converge toward a much smaller taxonomy +- merge-readiness truth stops depending on a sprawling blocker-label surface + +## Current live snapshot + +Observed blocker label group: + +- `Blocked By` + +Observed live blocker labels: + +- `needs-estimate` +- `needs-CTO` +- `needs-other` +- `needs-type` +- `needs-decision` +- `needs-specs` +- `needs-pm` +- `needs-discussion` + +## Problem + +The current blocker surface mixes together three different things: + +1. readiness failures +- `needs-type` +- `needs-estimate` +- `needs-specs` + +2. operator/decision dependencies +- `needs-pm` +- `needs-decision` +- `needs-discussion` +- `needs-CTO` + +3. generic unresolved blockers +- `needs-other` + +This creates four problems: + +- issue-to-issue blockers can be hidden inside labels instead of native dependencies +- merge-readiness logic has to interpret too many blocker variants +- some labels are really intake/readiness corrections, not durable blockers +- the taxonomy invites more labels later + +## Proposed target model + +### A. Native dependencies first + +Use native Linear `blocked by` relations whenever: + +- issue A depends on issue B +- a design issue blocks an implementation issue +- a release issue depends on specific child issues +- any other blocker is best represented by another issue + +### B. Keep temporary readiness labels only where they serve the Ready gate + +These still have practical value at the gate: + +- `needs-type` +- `needs-estimate` +- `needs-specs` + +Reason: + +- the current engine already uses them for fail-closed readiness rollback +- removing them immediately would create avoidable churn + +### C. Collapse the rest toward one generic non-ticket blocker signal + +Long-term target: + +- one generic `blocked` label + +Meaning: + +- issue is blocked by a non-ticket condition +- required reason must be recorded in a comment + +This generic label should replace: + +- `needs-CTO` +- `needs-pm` +- `needs-decision` +- `needs-discussion` +- `needs-other` + +## Migration shape + +### Phase 1: doctrine and engine awareness + +Already aligned or partially aligned: + +- repo doctrine now says native dependencies should be preferred +- engine merge-readiness logic now treats blocker signals more strictly + +### Phase 2: live workspace prep + +Before deleting or renaming blocker labels: + +1. inventory active issues currently using each blocker label +2. determine which labels are still used by bot logic vs only by humans +3. confirm no hidden automation is recreating labels +4. confirm native dependency cleanup is trustworthy enough given the current MCP/UI truth boundary + +### Phase 3: live cleanup + +Recommended order: + +1. keep `needs-type`, `needs-estimate`, `needs-specs` +2. create one generic `blocked` label +3. move active non-ticket blockers onto `blocked` with required reason comments +4. remove or archive: + - `needs-CTO` + - `needs-pm` + - `needs-decision` + - `needs-discussion` + - `needs-other` + +### Phase 4: enforcement tightening + +After the live cleanup lands: + +- update engine/config to stop depending on the retired blocker labels +- make merge-readiness check: + - native dependencies + - `blocked` + - readiness-failure labels only where still appropriate + +## Risks + +### Risk 1: hidden automation recreates old labels + +Mitigation: + +- inspect creation sources before deleting the labels + +### Risk 2: active issues lose blocker meaning during migration + +Mitigation: + +- migrate active issues before label deletion +- require reason comments on new `blocked` usage + +### Risk 3: readiness-failure labels are mixed up with durable blockers + +Mitigation: + +- explicitly keep `needs-type`, `needs-estimate`, `needs-specs` as readiness correction labels in the near term +- do not force them into the generic `blocked` bucket yet + +### Risk 4: MCP/UI mismatch on blocker relations + +Mitigation: + +- use the Linear UI as canonical for relation cleanup +- document any mismatch explicitly in the lane evidence + +## Recommendation + +Do not jump straight from the current `Blocked By` group to deletion. + +Recommended truthful path: + +1. keep the current readiness labels +2. create one generic `blocked` label for non-ticket blockers +3. move human/operator blocker reasons onto `blocked` +4. prefer native dependencies for issue-to-issue blocking +5. retire the extra human blocker labels only after usage and recreation sources are proven safe + +## Expected validation evidence + +- screenshot of current blocker label group before changes +- inventory of active issues using each blocker label +- screenshot of new `blocked` label if created +- proof that at least one issue-to-issue blocker is represented natively +- proof that merge-readiness logic still fails closed when blocker truth is present diff --git a/linear/docs/process/bit320_github_triggered_linear_automation_alignment_v1_2026-04-16.md b/linear/docs/process/bit320_github_triggered_linear_automation_alignment_v1_2026-04-16.md new file mode 100644 index 0000000..79d93ab --- /dev/null +++ b/linear/docs/process/bit320_github_triggered_linear_automation_alignment_v1_2026-04-16.md @@ -0,0 +1,79 @@ +# BIT-320 GitHub-Triggered Linear Automation Alignment v1 + +Date: 2026-04-16 +Owner: Product Development +Primary issue: [BIT-320 -- Align GitHub-triggered Linear automations with fail-closed merge readiness](https://linear.app/bitpod-app/issue/BIT-320/align-github-triggered-linear-automations-with-fail-closed-merge) + +## Objective + +Make GitHub-triggered Linear status changes truthful and fail-closed. + +Target behavior: + +- PR opened or draft PR opened may move work to `In Progress` +- PR review request or review activity may move work to `In Review` +- PR ready for merge may move work to `Delivered` +- PR or commit merge may move work from `Accepted` to `Done` only when merge-readiness truth is already satisfied + +## What this lane is solving + +GitHub events are objective, but they do not by themselves prove that: + +- QA passed +- PM accepted +- blockers are clear +- release work is safe to close + +This lane exists so the automation can stay truthful instead of silently over-closing work. + +## Target truth rules + +### Allowed truth-forward moves + +- PR open / draft -> `In Progress` +- PR review request / review activity -> `In Review` +- PR ready for merge -> `Delivered` + +### Merge-close rule + +Merge to `main` may move `Accepted` -> `Done` only when all of the following are already true: + +- QA truth is present +- PM truth is present +- blocker truth is clear +- release truth does not block closure + +If those are not true, the automation must fail closed and leave a correction comment instead of forcing closure. + +## Why native Linear alone is not enough + +Native Linear GitHub workflow automation can move statuses, but it does not by itself guarantee: + +- merge-readiness validation +- blocker truth validation +- release-train exception handling +- comment-based correction when truth is missing + +Therefore this lane should be treated as: + +- native Linear configuration where possible +- custom enforcement where Linear cannot express the truth safely + +## Expected artifacts + +- live mapping proof for the Product Development workflow +- repo-side note documenting the final truth contract +- evidence that merges do not auto-close issues unless the guard conditions are satisfied + +## Validation checklist + +- confirm the default GitHub workflow mappings in Linear +- confirm merge-driven closure does not bypass QA / PM / blocker truth +- confirm any custom enforcement leaves a correction comment when it blocks closure + +## Escalate-back conditions + +- branch protection or reviewer-routing semantics conflict with the intended truth model +- live GitHub/Linear automation cannot express the fail-closed behavior safely +- proof of merge closure is not trustworthy enough for this lane + diff --git a/linear/docs/process/bit322_workspace_project_status_inventory_v1_2026-04-16.md b/linear/docs/process/bit322_workspace_project_status_inventory_v1_2026-04-16.md new file mode 100644 index 0000000..5ec8942 --- /dev/null +++ b/linear/docs/process/bit322_workspace_project_status_inventory_v1_2026-04-16.md @@ -0,0 +1,55 @@ +# BIT-322 Workspace Project Status Inventory v1 + +Date: 2026-04-16 +Owner: Product Development +Primary issue: [BIT-322 -- Inventory workspace project statuses and define the coarse canonical model](https://linear.app/bitpod-app/issue/BIT-322/inventory-workspace-project-statuses-and-define-the-coarse-canonical) + +## Objective + +Inventory the current workspace project-status surface and define the coarse canonical model that should remain without mirroring the full team issue workflow. + +## Observed current direction + +Project statuses should stay coarse and workspace-level. + +Recommended canonical model: + +- `Backlog` +- `Planned` +- `In Progress` +- `Completed` +- `Canceled` + +## Why this stays coarse + +Project statuses are portfolio / coordination truth, not execution truth. +If project statuses become as detailed as team issue workflow statuses, the system gets duplicate lifecycle truth and confusion about which surface is authoritative. + +## Inventory dimensions to capture + +- current workspace project statuses +- which projects actually use each status +- whether any project automation depends on a status name +- whether any project view or report assumes detailed project states + +## Decision rule + +Keep detailed execution semantics in issue workflows. +Use project statuses only for broad project phase truth. + +## Expected artifact + +- one inventory/proposal note under `/Users/cjarguello/BitPod-App/bitpod-tools/linear/docs/process/` + +## Validation checklist + +- capture the live project-status list +- identify any special-case usage +- confirm the canonical coarse model does not break existing project reporting + +## Escalate-back conditions + +- live projects depend on detailed statuses that would be lost +- project-status cleanup would silently change reporting or automation +- workspace/project status truth conflicts between UI and MCP + diff --git a/linear/docs/process/bit323_workspace_project_templates_v1_2026-04-16.md b/linear/docs/process/bit323_workspace_project_templates_v1_2026-04-16.md new file mode 100644 index 0000000..b45f4d8 --- /dev/null +++ b/linear/docs/process/bit323_workspace_project_templates_v1_2026-04-16.md @@ -0,0 +1,58 @@ +# BIT-323 Workspace Project Templates v1 + +Date: 2026-04-16 +Owner: Product Development +Primary issue: [BIT-323 -- Set up workspace project templates for Product Development standard work and release trains](https://linear.app/bitpod-app/issue/BIT-323/set-up-workspace-project-templates-for-product-development-standard) + +## Objective + +Define the workspace project templates that are useful for the near-term Product Development project model without trying to encode the full team issue workflow into project templates. + +## Recommended near-term templates + +- `PD - Standard Project` +- `PD - Release Train` + +Optional later, if usage justifies it: + +- `PD - Design / Brand Campaign` + +## What project templates should do + +Project templates should help with project scaffolding: + +- title structure +- default project description fields +- default project owners / members if available +- project-level labels or links if appropriate + +They should not try to act like issue workflows. + +## What project templates should not do + +Project templates should not become a second copy of the team issue-status system. +They should not encode QA / PM gate truth. +They should not try to replace team-specific issue workflows. + +## Decision rule + +Create only the templates that reflect real near-term use. +Do not create a broad template set for a future team shape that is not active yet. + +## Expected artifact + +- one template-setup note under `/Users/cjarguello/BitPod-App/bitpod-tools/linear/docs/process/` +- if created live, screenshots or equivalent proof of the template surface + +## Validation checklist + +- confirm whether workspace project templates are available and editable +- confirm template names are clear and non-overlapping +- confirm templates do not imply issue workflow behavior + +## Escalate-back conditions + +- templates are not available at the workspace level +- template sprawl would create more confusion than value +- the template surface cannot express the useful project scaffolding cleanly + diff --git a/linear/docs/process/bit35_clean_thread_handoff_2026-04-16.md b/linear/docs/process/bit35_clean_thread_handoff_2026-04-16.md new file mode 100644 index 0000000..9c6ad4b --- /dev/null +++ b/linear/docs/process/bit35_clean_thread_handoff_2026-04-16.md @@ -0,0 +1,67 @@ +# BIT-35 clean thread handoff — 2026-04-16 + +Use this as the only starting context for a fresh thread. + +## Goal +Apply only the already-approved **narrow BIT-35 live Linear admin changes** for the Product Development team, with screenshot proof and rollback note. + +## Canonical links +- [BIT-35 — Reconfigure Linear Issues, Issue Status,, & Automations as per CJ's instructions](https://linear.app/bitpod-app/issue/BIT-35/reconfigure-linear-issues-issue-status-and-automations-as-per-cjs) +- [BIT-319 — Product Development blocker cleanup toward native dependencies + minimal blocker taxonomy](https://linear.app/bitpod-app/issue/BIT-319/product-development-blocker-cleanup-toward-native-dependencies-minimal) +- [BIT-320 — Align GitHub-triggered Linear automations with fail-closed merge readiness](https://linear.app/bitpod-app/issue/BIT-320/align-github-triggered-linear-automations-with-fail-closed-merge) +- [BIT-322 — Inventory workspace project statuses and define the coarse canonical model](https://linear.app/bitpod-app/issue/BIT-322/inventory-workspace-project-statuses-and-define-the-coarse-canonical) +- [BIT-323 — Set up workspace project templates for Product Development standard work and release trains](https://linear.app/bitpod-app/issue/BIT-323/set-up-workspace-project-templates-for-product-development-standard) + +## GO now +Apply only these live Product Development settings: +1. default issue status = `Backlog` +2. duplicate mapping = `Duplicate` +3. status descriptions: + - `Icebox 🧊`: `Parked for later - unlikely soon. (30d inactive -> Stale)` + - `Backlog`: `Default for planned work, not ready to start. (30d inactive -> 🧊)` + - `Ready`: `All set, ready to start` + - `In Progress`: `Being worked on now` + - `In Review`: `Current review gate for Product Development` + - `Delivered`: `Waiting on PM acceptance` + - `Accepted`: `PM accepted; work approved` + - `Done`: `Fully complete and closed` + - `Canceled`: `Stopped / aborted` + - `Duplicate`: `Covered by another issue` + - `Won't Do`: `Decided not worth doing` + - `Obsolete`: `No longer relevant` + - `Stale`: `Inactive too long; closed but can reopen` +4. auto-archive closed issues after `1 month` +5. keep Triage = `Off` + +## NOT in scope +Do **not** change any of these in the same pass: +- `Backlog -> Icebox 🧊 -> Stale` as native Linear automation +- blocker taxonomy / `BIT-319` +- GitHub-triggered workflow config / `BIT-320` +- workspace project statuses / `BIT-322` +- workspace project templates / `BIT-323` + +## Required proof +Before changes: +- screenshot Product Development workflow/settings page +- screenshot duplicate mapping +- screenshot auto-archive / triage settings +- short rollback note + +After changes: +- screenshot Product Development workflow/settings page +- screenshot showing `Backlog` as default +- screenshot showing `Duplicate` mapping +- screenshot showing auto-archive `1 month` +- screenshot or note that Triage is still `Off` + +## Required BIT-35 comment after execution +Post a validation comment in BIT-35 saying: +- what changed +- what was intentionally left out of scope +- where the screenshots live +- that `Backlog -> Icebox 🧊 -> Stale` remains custom enforcement, not native Linear truth + +## Source docs if needed +- `/Users/cjarguello/BitPod-App/bitpod-tools/linear/docs/process/bit35_live_admin_go_no_go_decision_2026-04-16.md` +- `/Users/cjarguello/BitPod-App/bitpod-tools/linear/docs/process/bit35_scoped_live_admin_execution_handoff_2026-04-16.md` diff --git a/linear/docs/process/bit35_control_tower_validation_packet_v1_2026-04-15.md b/linear/docs/process/bit35_control_tower_validation_packet_v1_2026-04-15.md new file mode 100644 index 0000000..bf7d7eb --- /dev/null +++ b/linear/docs/process/bit35_control_tower_validation_packet_v1_2026-04-15.md @@ -0,0 +1,183 @@ +# BIT-35 Control Tower Validation Packet v1 + +Date: 2026-04-15 +Status: Active validation packet +Owner: Control Tower / Product Development +Primary issue: [BIT-35 — Reconfigure Linear Issues, Issue Status,, & Automations as per CJ's instructions](https://linear.app/bitpod-app/issue/BIT-35/reconfigure-linear-issues-issue-status-and-automations-as-per-cjs) + +## Purpose + +Provide the Control Tower validation artifact for the BIT-35 live Linear admin/config lane. + +This packet exists because: + +- repo-side doctrine and enforcement are now aligned +- live Product Development admin/config mutation is still not validated end to end +- Control Tower policy requires completion to be validated, not inferred + +## Lane ledger + +Lane name: +- `BIT-35 live Product Development workflow/admin execution` + +Owner: +- `Control Tower` for validation +- `Product Development` for the admin mutation artifact + +Objective: +- align the live Product Development Linear issue workflow/settings with the current approved doctrine as far as the native Linear surface can truthfully express + +Permission level: +- guarded mutation + +Expected artifact: +- pre-change snapshot +- exact checklist execution record +- rollback note +- post-change validation screenshots +- explicit note of what remained custom enforcement vs native Linear config + +Current state: +- `DONE_UNVALIDATED` for repo-side alignment +- `NOT_STARTED` for the live admin mutation pass unless and until screenshots/proof are attached + +Auto-chain: +- only after `DONE_VALIDATED` + +Linear impact: +- team config +- issue statuses +- status descriptions +- duplicate mapping +- auto-close / auto-archive settings +- GitHub automation notes + +Truth surfaces touched: +- team config +- project config references +- agent / awareness surfaces +- `Update Linear` doctrine + +Escalate-back conditions: +- live UI and repo doctrine diverge in a material way +- native Linear cannot express a requested rule and the lane is about to pretend otherwise +- blocker/dependency truth is unclear +- GitHub / Linear automation truth conflicts with the repo enforcement layer +- screenshots or rollback notes are missing + +Next step if done: +- auto-chain into only the next already-approved guarded lane that depends on the validated live state + +Last validated outcome: +- repo-side implementation validated +- live Product Development admin/config mutation not yet validated in this packet + +## What Control Tower must validate + +Control Tower should not accept BIT-35 as complete until all of these are true: + +1. Artifact exists +- the live admin execution artifact exists: + - `/Users/cjarguello/BitPod-App/bitpod-tools/linear/docs/process/bit35_product_development_live_admin_execution_checklist_v1_2026-04-15.md` + +2. Scope was respected +- no hidden renames beyond the agreed current-name-preserving plan +- no speculative project-status redesign slipped into the team workflow lane +- no blocker cleanup beyond the approved BIT-35 scope + +3. Permission level was respected +- high-blast or cross-surface changes were not made without the documented guarded artifact package + +4. Blockers and uncertainty were labeled truthfully +- native-vs-custom automation limits were named explicitly +- if a requested rule could not be implemented natively, the record says so +- no fake “done” claim was made because a similar but weaker setting exists + +5. Any mutation was explicitly authorized and evidenced +- pre-change screenshots exist +- post-change screenshots exist +- rollback note exists +- validation note exists + +## Validation checklist + +### Required live proof + +- [ ] Product Development issue-status page screenshot before changes +- [ ] Product Development issue-status page screenshot after changes +- [ ] screenshot showing default status is `Backlog` +- [ ] screenshot showing duplicate mapping is `Duplicate` +- [ ] screenshot showing auto-archive setting +- [ ] if any native auto-close is enabled, screenshot and explanation of what it actually does +- [ ] validation note posted in `BIT-35` + +### Required truth checks + +- [ ] workflow names remain intact: `Backlog`, `Ready`, `In Progress`, `In Review`, `Delivered`, `Accepted`, `Done` +- [ ] `Stale` remains the intended inactivity-close status in doctrine +- [ ] `Obsolete` is not falsely presented as the primary inactivity sink +- [ ] no claim that native Linear alone now expresses `Backlog -> Icebox 🧊 -> Stale` +- [ ] no claim that native Linear alone now enforces fail-closed merge readiness + +### Required operator-surface checks + +- [ ] BIT-35 references the current doctrine artifacts +- [ ] BIT-35 references the CJ intent artifact +- [ ] BIT-35 notes which remaining changes still belong to guarded child lanes + +## Continuous validation loop + +This loop should continue only while the lane is unvalidated. + +### Loop state machine + +Allowed states for this packet: + +- `NOT_STARTED` +- `RUNNING` +- `DONE_UNVALIDATED` +- `DONE_VALIDATED` +- `BLOCKED` +- `PAUSED_BY_CJ` +- `CLOSED` + +### Loop rule + +Control Tower repeats the following until `DONE_VALIDATED`, `BLOCKED`, `PAUSED_BY_CJ`, or `CLOSED`: + +1. check whether the required artifact bundle exists +2. check whether the live screenshots/proof are present +3. check whether the mutation matched scope +4. check whether any native-vs-custom truth was overstated +5. either: + - mark `DONE_VALIDATED`, or + - return `BLOCKED` with explicit missing artifact / mismatch, or + - continue if a currently owned follow-up step is still executing + +### No-longer-than-needed rule + +This validation loop must stop as soon as one of these is true: + +- `DONE_VALIDATED` +- `BLOCKED` +- `PAUSED_BY_CJ` +- `CLOSED_AS_SUPERSEDED` + +It must not remain as a vague perpetual supervision thread once the lane state is clear. + +## Current truthful status + +As of this packet: + +- repo doctrine: aligned +- repo enforcement/tests: aligned +- live Product Development admin truth: not yet validated here +- Control Tower completion loop: now defined, but not yet satisfied because the live admin evidence package is still incomplete + +## Recommended next action + +Use this packet together with: + +- `/Users/cjarguello/BitPod-App/bitpod-tools/linear/docs/process/bit35_product_development_live_admin_execution_checklist_v1_2026-04-15.md` + +Then execute or validate the live Product Development admin/config pass and close this packet only when the required screenshots and notes exist. diff --git a/linear/docs/process/bit35_live_admin_go_no_go_decision_2026-04-16.md b/linear/docs/process/bit35_live_admin_go_no_go_decision_2026-04-16.md new file mode 100644 index 0000000..f855996 --- /dev/null +++ b/linear/docs/process/bit35_live_admin_go_no_go_decision_2026-04-16.md @@ -0,0 +1,75 @@ +# BIT-35 live Linear admin go / no-go decision — 2026-04-16 + +Status: active Control Tower decision note +Owner: Control Tower / Product Development +Primary issue: [BIT-35 — Reconfigure Linear Issues, Issue Status,, & Automations as per CJ's instructions](https://linear.app/bitpod-app/issue/BIT-35/reconfigure-linear-issues-issue-status-and-automations-as-per-cjs) + +## Decision summary + +Control Tower decision: **partial GO**. + +It is okay to execute the narrow native Product Development settings already fully specified in the live admin checklist, as long as the guarded-lane evidence package is captured during execution: + +- pre-change screenshots / snapshot +- exact execution record +- rollback note +- post-change validation screenshots +- validation note in BIT-35 + +It is **not** okay to treat the broader deferred config family as implicitly approved just because the doctrine exists. + +## GO now — scoped BIT-35 native settings only + +These are approved for guarded execution now because they are already explicitly specified, preserve current live workflow names, and are reversible: + +1. Default issue status = `Backlog` +2. Duplicate mapping = `Duplicate` +3. Product Development status descriptions: + - `Icebox 🧊`: `Parked for later - unlikely soon. (30d inactive -> Stale)` + - `Backlog`: `Default for planned work, not ready to start. (30d inactive -> 🧊)` + - `Ready`: `All set, ready to start` + - `In Progress`: `Being worked on now` + - `In Review`: `Current review gate for Product Development` + - `Delivered`: `Waiting on PM acceptance` + - `Accepted`: `PM accepted; work approved` + - `Done`: `Fully complete and closed` + - `Canceled`: `Stopped / aborted` + - `Duplicate`: `Covered by another issue` + - `Won't Do`: `Decided not worth doing` + - `Obsolete`: `No longer relevant` + - `Stale`: `Inactive too long; closed but can reopen` +4. Auto-archive closed issues after `1 month` +5. Keep Triage = `Off` + +## NOT GO yet — still separate guarded lanes + +These remain deferred and should not be silently bundled into the BIT-35 live pass: + +1. `Backlog -> Icebox 🧊` automation as a native Linear rule + - stays custom enforcement, not native config +2. `Icebox 🧊 -> Stale` or `Obsolete` inactivity-close policy as if native Linear fully expresses it + - stays custom enforcement / doctrine until separately proven +3. Blocker taxonomy cleanup / dependency cleanup + - belongs to BIT-319 +4. GitHub-triggered Linear automation alignment + - belongs to BIT-320 +5. Workspace project-status normalization + - belongs to BIT-322 +6. Workspace project-template creation + - belongs to BIT-323 + +## Why project statuses and templates are still no-go + +Even though they simplify the model, the current artifacts for BIT-322 and BIT-323 are still proposal/inventory notes, not validated live mutation packets. They still need: + +- live availability check +- pre-change snapshot +- rollback note +- post-change validation +- Control Tower closeout on the evidence package + +## Execution rule + +If a fresh execution thread is opened now, it should be scoped only to the GO-now BIT-35 settings above. + +It should not opportunistically mutate project statuses, project templates, blocker taxonomy, or GitHub-triggered workflow settings in the same pass. diff --git a/linear/docs/process/bit35_product_development_live_admin_execution_checklist_v1_2026-04-15.md b/linear/docs/process/bit35_product_development_live_admin_execution_checklist_v1_2026-04-15.md new file mode 100644 index 0000000..bafd56c --- /dev/null +++ b/linear/docs/process/bit35_product_development_live_admin_execution_checklist_v1_2026-04-15.md @@ -0,0 +1,334 @@ +# BIT-35 Product Development Live Admin Execution Checklist v1 + +Date: 2026-04-15 +Owner: Product Development +Primary issue: [BIT-35 — Reconfigure Linear Issues, Issue Status,, & Automations as per CJ's instructions](https://linear.app/bitpod-app/issue/BIT-35/reconfigure-linear-issues-issue-status-and-automations-as-per-cjs) +Supporting doctrine: + +- `/Users/cjarguello/BitPod-App/bitpod-tools/linear/docs/process/linear_operating_model_v1.md` +- `/Users/cjarguello/BitPod-App/bitpod-tools/linear/docs/process/linear_operating_guide_v3.md` +- `/Users/cjarguello/BitPod-App/bitpod-tools/linear/docs/process/linear_process_v1_1_control_tower_change_proposal_2026-04-15.md` +- `/Users/cjarguello/BitPod-App/bitpod-tools/linear/docs/process/linear_process_cj_intent_packet_2026-04-15.md` + +## Purpose + +Turn BIT-35 into a concrete live admin/config lane with: + +- exact target settings +- exact description text +- explicit native-vs-custom automation split +- screenshot requirements +- rollback notes + +This file is for the live Linear UI/admin execution pass. It does not replace the repo-side doctrine. + +## Source-of-truth product constraints + +Official Linear docs used for this checklist: + +- Issue status: [linear.app/docs/configuring-workflows](https://linear.app/docs/configuring-workflows) +- Delete and archive issues: [linear.app/docs/delete-archive-issues](https://linear.app/docs/delete-archive-issues) +- GitHub integration: [linear.app/docs/github-integration](https://linear.app/docs/github-integration) +- Project templates: [linear.app/docs/project-templates](https://linear.app/docs/project-templates) +- Issue templates: [linear.app/docs/issue-templates](https://linear.app/docs/issue-templates) + +Important implementation truth from those docs: + +- team issue workflows are team-specific +- default issue status is native and should be `Backlog` +- duplicate mapping is native and should map to `Duplicate` +- auto-close only closes into a closed status and does not implement arbitrary open-status aging transitions +- auto-archive is native +- project statuses are workspace-level +- project templates can be workspace-level or team-level +- issue templates can be workspace-level or team-level, but workspace templates cannot preset team-specific labels/statuses + +## Current live Product Development snapshot + +Observed live statuses: + +- `Backlog` +- `Icebox 🧊` +- `Ready` +- `In Progress` +- `In Review` +- `Delivered` +- `Accepted` +- `Done` +- `Canceled` +- `Duplicate` +- `Won't Do` +- `Obsolete` +- `Stale` + +Observed live label groups: + +- `Issue Type` +- `Delivered - PM Gate` +- `In Review - QA Gate` +- `Blocked By` + +Observed live type labels: + +- `🏁 Release` +- `📄 Plan` +- `⚙️ Chore` +- `🐞 Bug` +- `🎨 Design` +- `⭐️ Feature` + +## Execution rule + +Do not claim BIT-35 complete unless both are true: + +1. the live Product Development UI/admin state matches the agreed target closely enough +2. the change is evidenced with screenshots, rollback note, and post-change validation + +## Native Linear settings to change now + +These are the changes that belong in the Linear UI itself. + +### 1. Team workflow names + +Keep the current live names: + +- `Backlog` +- `Icebox 🧊` +- `Ready` +- `In Progress` +- `In Review` +- `Delivered` +- `Accepted` +- `Done` +- `Canceled` +- `Duplicate` +- `Won't Do` +- `Obsolete` +- `Stale` + +Do not rename `In Review`, `Delivered`, `Accepted`, or the current PM/QA result labels in this pass. + +### 2. Default status + +Set default issue status to: + +- `Backlog` + +Validation: + +- confirm the status row shows `Default` +- create or simulate a new issue and verify it lands in exact status `Backlog` + +### 3. Duplicate mapping + +Set duplicate issue status mapping to: + +- `Duplicate` + +Validation: + +- mark a test issue duplicate of another issue +- verify the issue lands in `Duplicate`, not generic `Canceled` + +### 4. Status descriptions + +Apply these short descriptions in Product Development issue statuses: + +- `Icebox 🧊`: `Parked for later - unlikely soon. (30d inactive -> Stale)` +- `Backlog`: `Default for planned work, not ready to start. (30d inactive -> 🧊)` +- `Ready`: `All set, ready to start` +- `In Progress`: `Being worked on now` +- `In Review`: `Current review gate for Product Development` +- `Delivered`: `Waiting on PM acceptance` +- `Accepted`: `PM accepted; work approved` +- `Done`: `Fully complete and closed` +- `Canceled`: `Stopped / aborted` +- `Duplicate`: `Covered by another issue` +- `Won't Do`: `Decided not worth doing` +- `Obsolete`: `No longer relevant` +- `Stale`: `Inactive too long; closed but can reopen` + +### 5. Auto-archive + +Set: + +- auto-archive closed issues after `1 month` + +Reason: + +- this is natively supported +- it matches the current doctrine well enough + +Validation: + +- screenshot the team setting +- note that changes can take up to 24 hours to apply + +### 6. Triage + +Keep Triage: + +- `Off` + +Reason: + +- current Product Development process is not using Triage as an intake surface + +### 7. Closed-status interpretation + +Document these meanings in BIT-35 validation notes: + +- `Duplicate` = duplicate flow only +- `Won't Do` = explicit non-pursuit decision +- `Stale` = inactivity-close state +- `Canceled` = stopped / aborted without stronger claim +- `Obsolete` = legacy edge-case; do not use as the default inactivity sink + +## Native settings to leave alone in this pass + +Do not mutate these yet as part of BIT-35 unless a separate guarded child lane is being executed with matching proof: + +- workspace project statuses +- workspace project templates +- team/workspace issue-template redesign +- blocker label-group redesign beyond description-level clarification +- broad label-group renames + +These belong to: + +- `BIT-319` +- `BIT-322` +- `BIT-323` + +## Behavior that is not natively expressible and must stay in custom enforcement + +The following should not be misrepresented as native Linear issue-status automation: + +### 1. `Backlog -> Icebox 🧊` + +Linear native auto-close closes issues into a closed status. + +It does not implement: + +- open-status aging from `Backlog` to `Icebox 🧊` + +Therefore this behavior stays in: + +- custom bot/enforcement layer +- or a future external automation surface + +### 2. `Icebox 🧊 -> Stale` + +If native auto-close can be configured to target `Stale`, that still does not solve the full intended path by itself, because: + +- native auto-close is a closed-status automation +- it is not the same thing as the full backlog-aging policy +- it will also be affected by the cycle/project completion rules documented by Linear + +Therefore the canonical truth remains: + +- use custom enforcement for the intended two-step aging path +- do not claim native Linear alone now expresses the full rule + +### 3. Fail-closed merge readiness + +The exact rule: + +- merge to `main` only closes when status is `Accepted`, QA + PM outcome labels exist, blocker truth is clear, and issue type is not `Release` + +belongs in: + +- repo engine +- `Update Linear` +- custom GitHub/Linear enforcement + +not pure native Linear settings. + +## GitHub integration target for BIT-35 notes + +Native GitHub integration can stay enabled, but the strict truth rules belong in the enforcement layer. + +Near-term intent: + +- branch/PR open can move to `In Progress` +- review-ready PR state can move to `In Review` +- merge to `main` should not independently bypass the custom fail-closed merge-readiness rule + +Operational note: + +- if native GitHub automation and custom enforcement conflict, custom enforcement is authoritative +- exact live alignment work belongs in `BIT-320` + +## Label-group handling in this pass + +### Keep as-is now + +Keep the current live groups for this pass: + +- `Issue Type` +- `Delivered - PM Gate` +- `In Review - QA Gate` +- `Blocked By` + +Reason: + +- current code/tests/docs still couple to the current PM/QA label names +- blocker cleanup is a separate guarded lane + +### Long-term intended direction + +Still active, but not part of this immediate live mutation pass: + +- native dependencies first +- one generic `blocked` label for non-ticket blockers + +This direction was deferred, not rejected. + +It belongs to: + +- `BIT-319` + +## Screenshots / evidence required + +BIT-35 should not be marked complete without these: + +1. Product Development issue-status page showing: +- all active statuses +- default `Backlog` +- status descriptions + +2. Product Development issue-status automation section showing: +- duplicate mapping +- auto-archive setting +- any auto-close setting that remains enabled + +3. If any native GitHub-linked workflow automation settings are changed: +- screenshot those settings too + +4. One validation artifact showing: +- new issue lands in `Backlog` +- duplicate mapping lands in `Duplicate` +- no accidental rename broke the current workflow names + +## Rollback note + +If this live pass creates confusion or drift: + +1. restore previous status descriptions from screenshot evidence +2. restore previous duplicate/auto-archive settings from screenshot evidence +3. leave workflow names unchanged +4. post rollback note in `BIT-35` +5. keep repo doctrine unchanged until the new live plan is re-validated + +## Completion checklist for BIT-35 + +- [ ] pre-change screenshots captured +- [ ] default status verified as `Backlog` +- [ ] duplicate mapping verified as `Duplicate` +- [ ] status descriptions updated +- [ ] auto-archive set to 1 month +- [ ] Triage remains off +- [ ] any native auto-close setting is documented truthfully, including what it does not cover +- [ ] post-change screenshots captured +- [ ] validation note posted in `BIT-35` +- [ ] Control Tower validation recorded diff --git a/linear/docs/process/bit35_scoped_live_admin_execution_handoff_2026-04-16.md b/linear/docs/process/bit35_scoped_live_admin_execution_handoff_2026-04-16.md new file mode 100644 index 0000000..f3b349c --- /dev/null +++ b/linear/docs/process/bit35_scoped_live_admin_execution_handoff_2026-04-16.md @@ -0,0 +1,117 @@ +# BIT-35 scoped live admin execution handoff — 2026-04-16 + +Status: ready for fresh execution thread +Owner: Product Development / Control Tower scoped handoff +Primary issue: [BIT-35 — Reconfigure Linear Issues, Issue Status,, & Automations as per CJ's instructions](https://linear.app/bitpod-app/issue/BIT-35/reconfigure-linear-issues-issue-status-automations-as-per-cjs) + +## Scope of this handoff + +This handoff is intentionally narrow. + +It is authorized to execute only the native Product Development settings that already have a Control Tower partial GO: + +1. default issue status = `Backlog` +2. duplicate mapping = `Duplicate` +3. status descriptions exactly as listed in the live admin checklist +4. auto-archive closed issues after `1 month` +5. keep Triage = `Off` + +It must **not** mutate: + +- `Backlog -> Icebox 🧊 -> Stale` as if native Linear expresses that end to end +- blocker taxonomy cleanup +- GitHub-triggered workflow settings beyond notes/validation +- workspace project statuses +- workspace project templates + +## Required artifacts before mutation + +Capture and save or attach: + +- pre-change screenshot of Product Development issue-workflow/settings page +- pre-change screenshot of duplicate mapping +- pre-change screenshot of auto-archive / triage settings if separate +- short rollback note describing how to restore prior descriptions/settings + +## Exact settings to apply + +### Workflow names +Leave unchanged: + +- `Backlog` +- `Icebox 🧊` +- `Ready` +- `In Progress` +- `In Review` +- `Delivered` +- `Accepted` +- `Done` +- `Canceled` +- `Duplicate` +- `Won't Do` +- `Obsolete` +- `Stale` + +### Default status +Set default issue status to: + +- `Backlog` + +### Duplicate mapping +Set duplicate issue status mapping to: + +- `Duplicate` + +### Status descriptions +Apply exactly: + +- `Icebox 🧊`: `Parked for later - unlikely soon. (30d inactive -> Stale)` +- `Backlog`: `Default for planned work, not ready to start. (30d inactive -> 🧊)` +- `Ready`: `All set, ready to start` +- `In Progress`: `Being worked on now` +- `In Review`: `Current review gate for Product Development` +- `Delivered`: `Waiting on PM acceptance` +- `Accepted`: `PM accepted; work approved` +- `Done`: `Fully complete and closed` +- `Canceled`: `Stopped / aborted` +- `Duplicate`: `Covered by another issue` +- `Won't Do`: `Decided not worth doing` +- `Obsolete`: `No longer relevant` +- `Stale`: `Inactive too long; closed but can reopen` + +### Auto-archive +Set closed issue auto-archive to: + +- `1 month` + +### Triage +Keep: + +- `Off` + +## Required post-change validation + +Capture and attach: + +- post-change screenshot of Product Development issue-workflow/settings page +- screenshot showing `Backlog` as default +- screenshot showing `Duplicate` mapping +- screenshot showing auto-archive setting +- screenshot or note confirming Triage remains `Off` + +Then post a validation comment in BIT-35 that states: + +- what changed +- what was intentionally left out of scope +- where the screenshots live +- that `Backlog -> Icebox 🧊 -> Stale` remains custom enforcement, not native Linear truth + +## Close conditions for this handoff + +This handoff is complete only when: + +- the exact scoped settings above are applied +- the pre/post evidence package exists +- the rollback note exists +- a validation note is posted in BIT-35 +- Control Tower can truthfully mark this scoped pass validated diff --git a/linear/docs/process/linear_admin_change_control_v1.md b/linear/docs/process/linear_admin_change_control_v1.md index ade4f9c..bff4ffd 100644 --- a/linear/docs/process/linear_admin_change_control_v1.md +++ b/linear/docs/process/linear_admin_change_control_v1.md @@ -25,6 +25,8 @@ Use this document for Linear changes that affect: If a Linear change has meaningful blast radius, it must be reversible, evidenced, and explicitly approved at the right level before execution. +If the change touches a live truth surface that Control Tower also governs, the change is not complete until Control Tower validates the artifact/evidence package. + ## Default mode - mode: `high_autonomy_with_audit` @@ -94,6 +96,7 @@ Blocked unless CJ explicitly approves: 3. Blocked flows remain resolvable. 4. PM, QA, and engineering ownership fields still work. 5. No orphaned status transitions were introduced. +6. Control Tower truth surfaces remain synchronized or the divergence is explicitly logged. ## Lockdown trigger diff --git a/linear/docs/process/linear_change_proposal_template_v1.md b/linear/docs/process/linear_change_proposal_template_v1.md index 5fe8653..e56ab8a 100644 --- a/linear/docs/process/linear_change_proposal_template_v1.md +++ b/linear/docs/process/linear_change_proposal_template_v1.md @@ -28,6 +28,12 @@ Use this template when changing Linear workflows, statuses, labels, templates, a - affected teams, projects, statuses, labels, templates, automations: - estimated number of affected entities: +- truth surfaces touched: + - team config + - project config + - agent / awareness surfaces +- escalate-back conditions: +- next step if done: ## Pre-change snapshot diff --git a/linear/docs/process/linear_operating_guide_changelog.md b/linear/docs/process/linear_operating_guide_changelog.md index b4a843f..94342c6 100644 --- a/linear/docs/process/linear_operating_guide_changelog.md +++ b/linear/docs/process/linear_operating_guide_changelog.md @@ -23,10 +23,18 @@ Maintenance update — 2026-03-22: - make field truth maintenance explicit for status/state, links, ownership, project membership, and explicit dependency links - keep the BIT-35 workflow reconfiguration spec as target behavior while clarifying native-vs-custom automation limits +Maintenance update — 2026-04-15: +- align risky workflow/admin/guidance changes with Control Tower validation and artifact-first completion +- preserve current live workflow names while tightening semantics around `In Review`, `Delivered`, `Accepted`, and `Done` +- make GitHub-driven status changes explicitly fail closed on missing QA, PM, blocker, or release truth +- make `Stale` the primary inactivity-close status and leave `Obsolete` as legacy/edge-case +- align Vera-style QA guidance so it does not overclaim independent embodied QA authority + Linked artifacts: - `/Users/cjarguello/bitpod-app/bitpod-tools/linear/docs/process/linear_operating_guide_v3.md` - `/Users/cjarguello/bitpod-app/bitpod-tools/linear/docs/process/linear_admin_change_control_v1.md` - `/Users/cjarguello/bitpod-app/bitpod-tools/linear/docs/process/linear_change_proposal_template_v1.md` +- `/Users/cjarguello/bitpod-app/bitpod-tools/linear/docs/process/linear_process_v1_1_control_tower_change_proposal_2026-04-15.md` Rollback target: - `v2` diff --git a/linear/docs/process/linear_operating_guide_v3.md b/linear/docs/process/linear_operating_guide_v3.md index 9571745..3c5a211 100644 --- a/linear/docs/process/linear_operating_guide_v3.md +++ b/linear/docs/process/linear_operating_guide_v3.md @@ -3,7 +3,7 @@ Version: v3 Status: Active Owner: Product Development (Codex + Taylor) -Last updated: 2026-03-24 +Last updated: 2026-04-15 Primary issue: [BIT-22 — Versioned Linear operating guide for agents (with rollback path)](https://linear.app/bitpod-app/issue/BIT-22/versioned-linear-operating-guide-for-agents-with-rollback-path) Supersedes: `linear_operating_guide_v2.md` as the active guide @@ -13,6 +13,13 @@ Define the canonical way agents use Linear for execution tracking, evidence logg This guide is the active BitPod-specific Linear overlay, not the final Taylor capability model. +Maintenance update — 2026-04-15: + +- align workflow/admin/guidance mutations with Control Tower validation rather than treating thread completion as sufficient +- preserve current live workflow names that are already wired into code, tests, and prompts +- treat current Vera-style QA as a truthful substitute surface, not proof of embodied independent Vera authority +- keep GitHub-driven Linear truth, but fail closed when merge, QA, PM, or blocker truth is incomplete + ## Scope - issue lifecycle handling for migration and operations work @@ -24,6 +31,11 @@ This guide is the active BitPod-specific Linear overlay, not the final Taylor ca ## Operating rules +0. Control Tower validation boundary +- Control Tower owns lane validation, sequencing, blocker truth, stale-lane retirement, truth-surface synchronization, and final recommendation packets. +- Workflow/admin/guidance changes touching live Linear truth surfaces are guarded lanes, not ad hoc tweaks. +- No risky workflow/admin mutation is complete until the change proposal, snapshot, rollback note, and post-change validation package exists and is validated. + 1. Evidence-first claims - Completion claims must include commands or UI proof and artifact path(s). - Queue status alone is not proof of completion. @@ -86,6 +98,7 @@ This guide is the active BitPod-specific Linear overlay, not the final Taylor ca 9. Linear admin change control - Workflow, schema, template, automation, or other meaningful admin changes must follow `linear_admin_change_control_v1.md`. - Destructive or high-blast changes require a written proposal, a snapshot, and a rollback note before execution. +- Live truth-surface changes also require Control Tower validation of the evidence package before they count as complete. 10. Taylor01 portability gate - Any relevant issue must include a Taylor01 Portability Check block. @@ -103,6 +116,19 @@ This guide is the active BitPod-specific Linear overlay, not the final Taylor ca - Close stale tickets, merge duplicates, and normalize missing acceptance criteria before adding more structure. - Prefer fewer labels unless a new label clearly solves repeated friction. +12. QA and PM truth must be honest +- Do not imply “Vera QA” unless a real independent Vera-capable surface exists for that run. +- Current substitute QA surfaces must label themselves honestly. +- If required QA is missing, the truthful state is blocked by missing QA, not implied pass. +- CJ waiver is waiver, not QA. + +13. GitHub truth is allowed, but fail closed +- Objective GitHub events may update Linear when the event is real and the gate state is valid. +- PR open/draft may move work to `In Progress`. +- Review-ready PR state may move work to `In Review`. +- Merge to `main` may move work from `Accepted` to `Done` only when QA, PM, blocker, and release truth are already satisfied. +- Otherwise, the engine must leave a correction comment and stop short of closure. + ## Required issue evidence format - What was changed @@ -124,10 +150,18 @@ This guide is the active BitPod-specific Linear overlay, not the final Taylor ca ## Current workspace status model -- Active statuses observed: `Icebox 🧊`, `Backlog`, `Ready`, `In Progress`, `In Review`, `Delivered`, `Accepted`, `Done`, `Canceled`, `Duplicate`, `Obsolete`, `Won't Do`. +- Active statuses observed: `Icebox 🧊`, `Backlog`, `Ready`, `In Progress`, `In Review`, `Delivered`, `Accepted`, `Done`, `Canceled`, `Duplicate`, `Obsolete`, `Won't Do`, `Stale`. - Canonical status, label, board, gate, and automation semantics now live in `linear_operating_model_v1.md`. - Legacy lifecycle labels and older review label groups are transitional residue and should not be expanded. +Near-term canonical interpretation: + +- `In Review` remains the current Product Development review gate +- `Delivered` remains PM gate +- `Accepted` remains the PM-accepted checkpoint before `Done` +- `Stale` is the primary inactivity-close status +- `Obsolete` remains legacy/edge-case and is not the primary inactivity sink + ## Version to config mapping (v3) This version corresponds to: @@ -146,6 +180,7 @@ Artifacts: - `taylor01_portability_review_gate_v1.md` - `linear_admin_change_control_v1.md` - `linear_change_proposal_template_v1.md` +- `linear_process_v1_1_control_tower_change_proposal_2026-04-15.md` - `linear_link_reference_policy_v1.md` - `capability_state_truth_label_incident_protocol_v1.md` diff --git a/linear/docs/process/linear_operating_model_v1.md b/linear/docs/process/linear_operating_model_v1.md index f133f1c..cd3166a 100644 --- a/linear/docs/process/linear_operating_model_v1.md +++ b/linear/docs/process/linear_operating_model_v1.md @@ -11,6 +11,13 @@ Build a full, opinionated Linear operating model that is heavily influenced by P The active v1 model remains the structural baseline: statuses, gate-driven movement, one shared board, blocker semantics, cycle cadence, estimate discipline, and the role of `Update Linear` remain canonical where still correct. This document rewrites that baseline into one complete operating doctrine. +Maintenance update — 2026-04-15: + +- align risky workflow/admin/guidance changes under Control Tower validation and Taylor01 reliability guardrails +- preserve current live status and label names that are already wired into code, tests, prompts, and agent guidance +- keep GitHub-driven Linear truth, but fail closed when QA, PM, blocker, or merge-readiness truth is incomplete +- treat the current Vera-style QA surface as a truthful substitute surface, not proof of embodied independent Vera authority + Pivotal's discipline still matters: - small scoped work @@ -54,6 +61,38 @@ Define one complete Linear operating model for a team that wants: - automation-backed enforcement - a Pivotal-style operating feel without pretending AI teams work like 2015 human-only scrum teams +## Authority boundary + +Control Tower owns: + +- lane creation +- lane validation +- sequencing +- blocker truth +- stale-lane retirement +- truth-surface synchronization +- final recommendation packets + +Taylor01 / Product Development Linear doctrine owns: + +- workflow semantics +- team-level admin config proposal +- issue/project template proposal +- status/label/automation truth model + +`Update Linear` and enforcement lanes own: + +- fail-closed mutation behavior +- merge-readiness checks +- gate-evidence enforcement +- skip-policy enforcement + +Practical rule: + +- no risky workflow/admin mutation is complete until the artifact and evidence package is validated through the Control Tower lane +- thread completion is not task completion +- any workflow/guidance rename or config change touching a live truth surface is a guarded lane, not an ad hoc tweak + ## Core philosophy ### Pivotal influence, adapted @@ -103,43 +142,44 @@ Projects should not be used for ordinary features or as a second backlog. | Stage | Status | Short description | |---|---|---| -| Cold | `Icebox 🧊` | Stale work under reconsideration, delete, cancel, or obsolete review | +| Cold | `Icebox 🧊` | Parked for later; unlikely to be done soon | | Intake | `Backlog` | Default landing place for real work that is not yet execution-ready | | Ready lane | `Ready` | Fully shaped and allowed to start | | Execution | `In Progress` | Active implementation or execution | -| QA gate | `In Review` | Active QA / technical review stage | -| PM gate | `Delivered` | QA-cleared work waiting for PM acceptance or rejection | -| Accepted end-state | `Accepted` | Explicitly accepted outcome; ready for final operational closure if needed | +| Review gate | `In Review` | Current Product Development review gate; QA is the default meaning today | +| PM gate | `Delivered` | Review-cleared work waiting for PM acceptance or rejection | +| Accepted end-state | `Accepted` | PM accepted; work approved | | Done end-state | `Done` | Fully complete and closed | -| Canceled | `Canceled` | Intentionally stopped | +| Canceled | `Canceled` | Intentionally stopped without stronger semantic claim | | Canceled | `Duplicate` | Superseded by canonical work elsewhere | -| Canceled | `Obsolete` | Context changed; no longer relevant | | Canceled | `Won't Do` | Explicitly understood and intentionally not implemented | +| Canceled | `Stale` | Auto-closed for inactivity; can be reopened later | +| Legacy edge-case | `Obsolete` | Context changed; no longer relevant; do not use as the primary inactivity sink | ### Path map Standard path: `Backlog -> Ready -> In Progress -> In Review -> Delivered -> Accepted -> Done` -Short path for explicit low-risk exceptions: -`Backlog -> Ready -> In Progress -> In Review -> Done` - Rejection loops: `In Review --qa-failed--> In Progress` `Delivered --pm-rejected--> In Progress` Aging path: -`Backlog --untouched--> Icebox 🧊` +`Backlog --30d inactive--> Icebox 🧊 --30d inactive--> Stale` ### Status rules - Default issue status is `Backlog` - `Ready` is the only canonical execution-ready status - `Accepted` and `Done` are both real and are not duplicates -- `Accepted` is not terminal for the standard path; it is the accepted checkpoint before final closure in `Done` +- `Accepted` means PM accepted; work is approved +- `Done` means the issue is fully complete and closed - `Icebox 🧊` is not a default intake lane +- `Stale` is the default inactivity-close status +- `Obsolete` is a legacy edge-case status and not the primary inactivity sink - no emoji issue statuses except `Icebox 🧊` -- assume work generally requires acceptance, with a few explicit low-risk exceptions such as tiny dependency-update chores or tiny low-risk bug fixes +- assume work generally requires both QA-result truth and PM-result truth before merge-driven closure ## One shared board @@ -286,7 +326,7 @@ Use labels mainly as automation triggers. Labels should only stay on tickets whi ### Canonical label groups -Use exactly four single-select label groups with short descriptions: +Use exactly four single-select label groups with short descriptions in the current live model: - `Issue Type` - `Blocked By` @@ -326,6 +366,12 @@ Rules: - keep blocker naming professional - `needs-other` requires a comment +Near-term direction: + +- prefer native Linear dependencies whenever one issue blocks another +- do not expand the blocker taxonomy +- future cleanup should converge toward native dependencies plus one generic `blocked` signal for non-ticket blockers + ### `QA Review` Description: `QA result used to unlock work out of In Review` @@ -344,12 +390,13 @@ Description: `PM result used to unlock work out of Delivered` ### Gate rule summary -- `In Review` means QA stage +- `In Review` remains the current Product Development review gate - `Delivered` means PM acceptance / rejection stage - labels trigger automation - stale gate labels must be cleared on re-entry to the relevant status - labels should not linger after they stop being semantically useful - there are no pending QA or PM labels +- current Vera-style QA prompts and review skills must describe themselves truthfully as substitute surfaces unless a stronger independent runtime exists ## Blockers vs native Linear dependencies @@ -380,13 +427,13 @@ Do not use blocker labels as a lazy substitute for proper issue-to-issue blockin | Current | Trigger | Evidence | Next | |---|---|---|---| | `In Progress` | ready for QA | execution evidence | `In Review` | -| `In Review` | `qa-passed` | QA artifact | `Delivered` or `Done` | +| `In Review` | `qa-passed` | QA artifact | `Delivered` | | `In Review` | `qa-failed` | QA artifact | `In Progress` | -| `In Review` | `qa-skipped` | skip authorization + reason | `Delivered` or `Done` | +| `In Review` | `qa-skipped` | skip authorization + reason | `Delivered` | | `Delivered` | `pm-accepted` | acceptance artifact | `Accepted` | -| `Delivered` | `pm-rejected` | rejection artifact | `In Progress` | -| `Delivered` | `pm-skipped` | skip authorization + reason | `Done` | -| `Accepted` | final closure step | closure evidence if needed | `Done` | +| `Delivered` | `pm-rejected` | rejection artifact + reason | `In Progress` | +| `Delivered` | `pm-skipped` | skip authorization + reason | `Accepted` | +| `Accepted` | merge to `main` with merge-ready truth satisfied | merge evidence | `Done` | ### Skip controls @@ -398,6 +445,12 @@ Skips should be allowed through skills, either: These are policy controls, not excuses for workflow sloppiness. +Near-term skip rules: + +- `qa-skipped` is allowed for clearly non-technical or QA-inappropriate Product Development work, especially `Design` +- `pm-skipped` is allowed only for conservative low-risk cases with explicit reason artifact +- `pm-skipped` must not bypass blocker truth, release truth, or merge-readiness truth + ## Delegation Support delegation through skills. @@ -436,6 +489,7 @@ Keep them short. One sentence or less. - status-transition enforcement - delegated PM logic - rejection comments +- blocker-aware merge refusal - mutation refusal when requirements are missing - persistence of short-lived structured operational memory if that layer exists @@ -453,6 +507,7 @@ Its job is to make the ticket more truthful and fail closed when required truth- - refuse moves that skip required evidence or required fields - enforce type and estimate requirements before `Ready` and `In Progress` - enforce gate artifacts before `qa-*` and `pm-*` labels +- leave explicit correction comments when merge, close, or skip truth is incomplete - never silently apply partial truth updates - leave an explicit correction comment when a mutation is rejected @@ -474,3 +529,14 @@ Projects are for big, long-term bodies of work. Do not use them for: - things that should just be Plans Prefer using the Project itself rather than creating project-label sprawl. + +Project rule: + +- projects are coordination containers, not execution workflows +- workspace project statuses should stay coarse and non-competing with the team issue workflow +- do not mirror `In Review`, `Delivered`, `Accepted`, or other team issue states into project status design + +Near-term template direction: + +- prefer Product Development team issue templates for issue-shaping patterns +- use workspace project templates sparingly for coarse project scaffolding such as `PD - Standard Project` and `PD - Release Train` diff --git a/linear/docs/process/linear_process_cj_intent_packet_2026-04-15.md b/linear/docs/process/linear_process_cj_intent_packet_2026-04-15.md new file mode 100644 index 0000000..8308d87 --- /dev/null +++ b/linear/docs/process/linear_process_cj_intent_packet_2026-04-15.md @@ -0,0 +1,221 @@ +# Linear Process CJ Intent Packet + +Date: 2026-04-15 +Status: Active intent record +Owner: Product Development +Purpose: Capture CJ's agreed near-term Linear Process intent in one place so Control Tower can sequence, validate, and delegate the work without reconstructing it from thread history. + +## Why this exists + +The Control Tower-aligned v1.1 implementation intentionally split: + +- repo-side doctrine and enforcement alignment +- live Linear admin/config mutation +- guarded follow-up lanes + +That split reduced risk, but it also makes it easy to lose the distinction between: + +- what CJ explicitly wants +- what has already been implemented in repo +- what is still deferred to guarded execution + +This file records the first item directly. + +## CJ intent summary + +CJ wants the active Product Development Linear Process improved in a serious way, while preserving truth and avoiding fake certainty. + +Near-term intent: + +- improve the active Product Development team workflow in place +- preserve current live workflow names unless and until a coordinated migration is approved +- keep Control Tower as the validator and sequencing authority for risky mutations +- make GitHub and Linear truth line up more tightly +- keep project handling part of the plan +- avoid over-optimizing for a future multi-team shape before it is actually needed + +## The main changes CJ wanted + +### 1. Backlog vs Icebox must be clearly different + +Intent: + +- `Backlog` = default landing space for real work, not yet ready to start +- `Icebox 🧊` = parked for later; unlikely to be done soon +- `Backlog` should age into `Icebox 🧊` +- `Icebox 🧊` should not be used as a general intake lane + +Target wording direction: + +- short, blunt status descriptions +- include the practical aging rule in the description where useful + +### 2. Inactivity should close into Stale, not Obsolete by default + +Intent: + +- `Backlog` 30d inactive -> `Icebox 🧊` +- `Icebox 🧊` 30d inactive -> `Stale` +- automated stale moves should leave a comment explaining the move +- `Obsolete` should not be the default inactivity sink + +Reason: + +- inactivity alone does not mean the work is no longer relevant + +### 3. Preserve current live workflow names for now + +Intent: + +- keep `In Review` +- keep `Delivered` +- keep `Accepted` +- keep current PM label names such as `pm-accepted` + +Reason: + +- current code, tests, prompts, and active guidance already depend on these names +- renaming now would create unnecessary drift + +### 4. Tighten the review and acceptance path + +Intent: + +- `In Review` remains the current Product Development review gate +- QA result labels move work out of `In Review` +- `Delivered` remains the PM gate +- `pm-rejected` should move work back to `In Progress` +- `pm-skipped` should not jump directly to `Done` +- `Accepted` should remain the checkpoint before `Done` + +Target behavior: + +- `qa-passed` / `qa-skipped`: `In Review` -> `Delivered` +- `qa-failed`: `In Review` -> `In Progress` +- `pm-accepted`: `Delivered` -> `Accepted` +- `pm-rejected`: `Delivered` -> `In Progress` +- `pm-skipped`: `Delivered` -> `Accepted` + +### 5. GitHub should drive truthful Linear transitions where the event is objective + +Intent: + +- PR opened/drafted can move work to `In Progress` +- real review-ready PR state can move work to `In Review` +- merge to `main` can move `Accepted` -> `Done` + +But: + +- only if QA, PM, blocker, and release truth are already satisfied +- otherwise fail closed and leave a correction comment + +### 6. Blockers should become simpler and more truthful + +Intent: + +- long-term direction is native Linear dependencies first +- generic non-ticket blockers should converge toward one `blocked` label +- blocked work should not be merge-ready + +Important truth note: + +- this was not canceled or reverted +- it was deferred into guarded execution because the live workspace still has a broader `Blocked By` surface and Control Tower alignment raised the bar for mutating it safely + +### 7. Current Vera-style QA must be described honestly + +Intent: + +- do not imply stronger independent Vera authority than currently exists +- preserve the useful current QA behavior +- keep the current review surface truthful until a stronger Vera runtime actually exists + +### 8. Projects should be part of the plan, but stay coarse + +Intent: + +- do not ignore projects +- do not make project statuses mirror the full team issue workflow +- keep workspace project statuses coarse +- use project templates only where they add real value +- near-term reality is still mostly one Product Development-centered workflow + +### 9. Templates should support real current usage, not speculative schema + +Intent: + +- prefer practical Product Development issue/project templates +- do not explode the template surface for future teams that are not yet active enough to justify it +- keep future expansion possible + +### 10. Control Tower should know the difference between CJ intent and implementation timing + +Intent: + +- Control Tower should know what CJ wants +- Control Tower should still decide when/how risky changes are executed +- delegation is acceptable, but the intent should not be lost or watered down + +## What was aligned vs deferred + +### Already implemented in repo + +- doctrine and change-control language aligned to Control Tower +- current names preserved +- repo engine/tests updated for: + - `qa-*` -> `Delivered` + - `pm-skipped` -> `Accepted` + - fail-closed merge readiness + - `Backlog -> Icebox 🧊 -> Stale` +- Vera prompt language corrected to truthful substitute-surface wording + +### Deferred to guarded live execution + +- live Product Development status descriptions +- live team automations +- live GitHub/Linear config alignment +- live blocker taxonomy cleanup +- live workspace project-status cleanup +- live workspace project-template setup + +Deferred does not mean rejected. + +It means: + +- the desired direction remains active +- the execution surface now needs snapshot, rollback, validation, and Control Tower signoff + +## Source lanes and artifacts + +Primary planning / doctrine lanes: + +- [BIT-175 — Linear operating model v1 rollout plan](https://linear.app/bitpod-app/issue/BIT-175/linear-operating-model-v1-rollout-plan) +- [BIT-178 — Templates + Linear skills](https://linear.app/bitpod-app/issue/BIT-178/templates-linear-skills) +- [BIT-186 — Investigate and fix broken Update Linear enforcement path](https://linear.app/bitpod-app/issue/BIT-186/investigate-and-fix-broken-update-linear-enforcement-path) +- [BIT-35 — Reconfigure Linear Issues, Issue Status,, & Automations as per CJ's instructions](https://linear.app/bitpod-app/issue/BIT-35/reconfigure-linear-issues-issue-status-and-automations-as-per-cjs) + +Guarded follow-up lanes created from this intent: + +- [BIT-319 — Product Development blocker cleanup toward native dependencies + minimal blocker taxonomy](https://linear.app/bitpod-app/issue/BIT-319/product-development-blocker-cleanup-toward-native-dependencies-minimal) +- [BIT-320 — Align GitHub-triggered Linear automations with fail-closed merge readiness](https://linear.app/bitpod-app/issue/BIT-320/align-github-triggered-linear-automations-with-fail-closed-merge) +- [BIT-321 — Audit and align Vera / Linear agent guidance with truthful QA substitute language](https://linear.app/bitpod-app/issue/BIT-321/audit-and-align-vera-linear-agent-guidance-with-truthful-qa-substitute) +- [BIT-322 — Inventory workspace project statuses and define the coarse canonical model](https://linear.app/bitpod-app/issue/BIT-322/inventory-workspace-project-statuses-and-define-the-coarse-canonical) +- [BIT-323 — Set up workspace project templates for Product Development standard work and release trains](https://linear.app/bitpod-app/issue/BIT-323/set-up-workspace-project-templates-for-product-development-standard) + +Repo-side aligned implementation artifacts: + +- `/Users/cjarguello/BitPod-App/bitpod-tools/linear/docs/process/linear_operating_model_v1.md` +- `/Users/cjarguello/BitPod-App/bitpod-tools/linear/docs/process/linear_operating_guide_v3.md` +- `/Users/cjarguello/BitPod-App/bitpod-tools/linear/docs/process/linear_process_v1_1_control_tower_change_proposal_2026-04-15.md` +- `/Users/cjarguello/BitPod-App/bitpod-tools/linear/src/engine.py` +- `/Users/cjarguello/BitPod-App/bitpod-tools/linear/examples/vera_linear_pr_review_prompt_v1.md` + +## Operator interpretation rule + +If Control Tower or another delegated lane needs a plain reading of CJ intent, use this file first. + +If this file conflicts with the current live workspace state: + +- treat this file as intent +- treat the live workspace as current state +- use the guarded change-control and validation lanes to close the gap diff --git a/linear/docs/process/linear_process_v1_1_control_tower_change_proposal_2026-04-15.md b/linear/docs/process/linear_process_v1_1_control_tower_change_proposal_2026-04-15.md new file mode 100644 index 0000000..8656747 --- /dev/null +++ b/linear/docs/process/linear_process_v1_1_control_tower_change_proposal_2026-04-15.md @@ -0,0 +1,96 @@ +# Linear Process v1.1 Control Tower Change Proposal + +Use this proposal as the guarded-lane artifact for the 2026-04-15 Product Development workflow alignment pass. + +## Summary + +- change id: `linear-process-v1-1-control-tower-2026-04-15` +- date_utc: `2026-04-15` +- owner: `Product Development` +- mode: `high_autonomy_with_audit` + +## What changes + +- preserve current live workflow names: `Backlog`, `Ready`, `In Progress`, `In Review`, `Delivered`, `Accepted`, `Done` +- tighten gate semantics so `qa-*` results move `In Review` -> `Delivered` +- tighten PM semantics so `pm-accepted` and `pm-skipped` move `Delivered` -> `Accepted` +- require merge-readiness truth before merge-driven `Accepted` -> `Done` +- move inactivity closure toward `Backlog -> Icebox 🧊 -> Stale` +- keep `Obsolete` as legacy / edge-case, not the primary inactivity sink +- align active Vera-style QA guidance with truthful substitute-surface language +- add explicit Control Tower validation language to the active operating docs + +## Why now + +- the active docs, engine, and prompts were drifting on `pm-skipped`, merge-driven closure, and inactivity handling +- current workflow/admin/guidance changes need a stronger Control Tower-aligned validation boundary +- the active Vera prompt was overclaiming independent QA authority relative to the current runtime reality + +## Temporary divergence from target state (if applicable) + +- target-state rule being bypassed: + - live Linear team/project admin config is not mutated by this repo patch alone +- reason for bypass: + - repo-side doctrine, engine, tests, and prompt surfaces can be aligned now, while live admin mutations still require UI-level change execution plus Control Tower validation +- scope and duration: + - until the guarded admin lanes for Product Development workflow/config are executed and evidenced +- rollback or normalization plan: + - revert the repo patch if it proves incorrect, or complete the matching live admin/config changes under the follow-up guarded lanes + +## Blast radius + +- affected teams, projects, statuses, labels, templates, automations: + - Product Development team doctrine + - repo-side Linear engine and tests + - Vera-style QA prompt/guidance + - README / operator references +- estimated number of affected entities: + - docs: 5+ + - code/test surfaces: 4+ + - live Linear config: 0 directly mutated by this patch + +## Pre-change snapshot + +- snapshot source or location: + - live Product Development issue statuses observed before patch: + - `Icebox 🧊`, `Backlog`, `Ready`, `In Progress`, `In Review`, `Delivered`, `Accepted`, `Done`, `Canceled`, `Duplicate`, `Obsolete`, `Won't Do`, `Stale` + - live label groups observed before patch: + - `Issue Type` + - `Blocked By` + - `QA Review` + - `PM Review` + - repo-side baseline: + - `linear_operating_model_v1.md` + - `linear_operating_guide_v3.md` + - `linear/src/engine.py` + - `linear/examples/vera_linear_pr_review_prompt_v1.md` +- key entities before change: + - `pm-skipped` path still ended in `Done` + - non-acceptance work still allowed `In Review` -> `Done` + - daily aging still closed `Icebox 🧊` work into `Obsolete` + - Vera prompt still described itself as independent QA without the substitute-surface caveat + +## Rollback plan + +- rollback trigger: + - tests fail in a way that exposes invalid assumptions about current workflow semantics + - Control Tower validation rejects the artifact package or identifies truth-surface mismatch +- rollback steps: + - revert the affected repo files + - restore the prior `pm-skipped`, aging, and prompt semantics + - remove the changelog/proposal references +- estimated rollback time: + - under 30 minutes for repo-side revert + +## Post-change validation + +- [x] labels and templates intact +- [x] status mappings valid in repo doctrine and tests +- [x] active issues still traceable +- [x] blocked flow intact +- [x] ownership fields intact + +## Outcome notes + +- this artifact documents the repo-side alignment only +- live Linear admin/config mutation still requires the guarded follow-up lanes and Control Tower validation package diff --git a/linear/examples/vera_linear_pr_review_prompt_v1.md b/linear/examples/vera_linear_pr_review_prompt_v1.md index 1dec8e6..477696a 100644 --- a/linear/examples/vera_linear_pr_review_prompt_v1.md +++ b/linear/examples/vera_linear_pr_review_prompt_v1.md @@ -3,10 +3,16 @@ Use this as a cheap interim prompt for the Linear bot. Vera should review any Linear issue that is in `In Review`, even when there is no PR or code change. +Truth note: + +- this is a truthful Vera-style QA substitute surface +- it is not proof of an embodied independent Vera runtime +- do not claim stronger QA authority than the current execution surface actually provides + ## Copy-paste prompt ```md -Act as Vera, the independent QA specialist. +Act as the current Vera-style QA reviewer. Your job is only to decide QA verdict and return evidence. @@ -53,11 +59,13 @@ Rules: - Do not give a casual “looks good” - Every critical acceptance criterion needs either pass evidence or one reproducible failure - Optional fix hints are allowed only if obvious and low-risk, max 3 bullets +- If the issue is blocked by missing QA prerequisites, say so explicitly instead of implying a pass +- If CJ waived something, describe that as waiver, not QA Important: - keep this as a cheap interim Linear-first QA pass - do not try to recreate old Zulip artifacts like `session_summary.md`, `worth_remembering.json`, or SHA bundles -- preserve independent QA authority +- preserve truthful QA language; do not imply a stronger independent Vera runtime than actually exists - if the issue is not in `In Review`, do not run QA - if the issue is in `In Review`, run QA even when there is no PR ``` diff --git a/linear/src/engine.py b/linear/src/engine.py index 590e32f..1ad4d3a 100644 --- a/linear/src/engine.py +++ b/linear/src/engine.py @@ -18,7 +18,14 @@ ] CANONICAL_TYPES = {"Plan", "Feature", "Bug", "Chore", "Design", "Release"} -ACCEPTANCE_REQUIRED_TYPES = {"Plan", "Release", "Feature", "Design"} +TYPE_LABEL_ALIASES = { + "📄 Plan": "Plan", + "⭐️ Feature": "Feature", + "🐞 Bug": "Bug", + "⚙️ Chore": "Chore", + "🎨 Design": "Design", + "🏁 Release": "Release", +} VALID_ESTIMATES = {1, 2, 3, 5, 8} @@ -42,10 +49,12 @@ class BotConfig: accepted_status: str = "Accepted" done_status: str = "Done" icebox_status: str = "Icebox 🧊" + stale_status: str = "Stale" obsolete_status: str = "Obsolete" type_group: str = "Issue Type" blocked_group: str = "Blocked By" + blocked_label: str = "blocked" qa_gate_group: str = "QA Review" acceptance_gate_group: str = "PM Review" @@ -94,11 +103,50 @@ def _extract_pr_url_token(self, comment_body: str) -> str: def _labels(self, issue_labels: Optional[List[str]]) -> Set[str]: return set(issue_labels or []) - def _type_labels(self, labels: Set[str]) -> Set[str]: - return labels.intersection(CANONICAL_TYPES) + def _normalize_type_label(self, label: str) -> Optional[str]: + if not label: + return None + cleaned = label.strip() + if cleaned in CANONICAL_TYPES: + return cleaned + if cleaned in TYPE_LABEL_ALIASES: + return TYPE_LABEL_ALIASES[cleaned] + if cleaned.startswith("Type: "): + cleaned = cleaned[len("Type: ") :].strip() + if cleaned in CANONICAL_TYPES: + return cleaned + if cleaned in TYPE_LABEL_ALIASES: + return TYPE_LABEL_ALIASES[cleaned] + return None - def _requires_acceptance(self, labels: Set[str]) -> bool: - return bool(self._type_labels(labels).intersection(ACCEPTANCE_REQUIRED_TYPES)) + def _type_labels(self, labels: Set[str]) -> Set[str]: + return {normalized for label in labels if (normalized := self._normalize_type_label(label))} + + def _is_release(self, labels: Set[str]) -> bool: + return "Release" in self._type_labels(labels) + + def _has_native_blockers(self, issue: Dict[str, Any]) -> bool: + candidates = ( + issue.get("blockedBy"), + issue.get("blocked_by"), + issue.get("blockingIssues"), + issue.get("blocking_issues"), + issue.get("dependencies"), + ) + for candidate in candidates: + if candidate: + return True + relations = issue.get("relations", {}) + if isinstance(relations, dict) and relations.get("blockedBy"): + return True + return False + + def _has_blocker_signal(self, issue: Dict[str, Any], labels: Set[str]) -> bool: + if self.cfg.blocked_label in labels: + return True + if any(label.startswith("needs-") for label in labels): + return True + return self._has_native_blockers(issue) def _has_valid_estimate(self, issue: Dict[str, Any]) -> bool: raw = issue.get("estimate") @@ -144,7 +192,7 @@ def on_github_pr_ready_for_review(self, event: Dict[str, Any]) -> List[Action]: "linear", "comment", issue_key, - {"body": f"PR in review: {pr.get('html_url', '')}. Pending QA is now expressed by `In Review`."}, + {"body": f"PR in review: {pr.get('html_url', '')}. The current Product Development review gate is now expressed by `In Review`."}, ), ] @@ -171,7 +219,7 @@ def on_linear_ready_gate(self, issue: Dict[str, Any]) -> List[Action]: "comment", issue_key, { - "body": "Missing or invalid `Issue Type`. Set exactly one of: `Plan` `Feature` `Bug` `Chore` `Design` `Release`" + "body": "Missing or invalid `Issue Type`. Set exactly one canonical type label: `📄 Plan` `⭐️ Feature` `🐞 Bug` `⚙️ Chore` `🎨 Design` `🏁 Release`" }, ), Action("linear", "set_status", issue_key, {"status": self.cfg.backlog_status}), @@ -231,7 +279,6 @@ def on_linear_comment( labels = self._labels(issue_labels) target_pr_url = self._extract_pr_url_token(comment_body) or pr_url issue_ref = issue_url or issue_key - acceptance_required = self._requires_acceptance(labels) summary = "\n".join((comment_body or "").splitlines()[:10]) if token == "FAILED": @@ -245,9 +292,9 @@ def on_linear_comment( return actions gate_value = self.cfg.qa_skipped if token == "SKIPPED" else self.cfg.qa_passed - next_status = self.cfg.delivered_status if acceptance_required else self.cfg.done_status + next_status = self.cfg.delivered_status result_text = "QA SKIPPED" if token == "SKIPPED" else "QA PASSED" - next_text = "Delivered" if acceptance_required else "Done" + next_text = "Delivered" actions = [ Action("linear", "set_label", issue_key, {"group": self.cfg.qa_gate_group, "value": gate_value}), Action("linear", "set_status", issue_key, {"status": next_status}), @@ -257,12 +304,40 @@ def on_linear_comment( ) return actions - def on_linear_acceptance_gate_change(self, issue_key: str, gate_value: str, pr_url: str = "") -> List[Action]: + def on_linear_acceptance_gate_change( + self, + issue_key: str, + gate_value: str, + pr_url: str = "", + reason: str = "", + ) -> List[Action]: if gate_value == self.cfg.pm_rejected: + rejection_reason = reason.strip() actions = [ Action("linear", "set_status", issue_key, {"status": self.cfg.in_progress_status}), + Action( + "linear", + "comment", + issue_key, + { + "body": ( + f"PM rejected; moved back to `In Progress`. Reason: {rejection_reason}" + if rejection_reason + else "PM rejected; moved back to `In Progress`. Add the rejection reason artifact before resuming work." + ) + }, + ), ] - actions.extend(self._github_comment(pr_url, f"ACCEPTANCE REJECTED. See Linear for notes: {issue_key}")) + actions.extend( + self._github_comment( + pr_url, + ( + f"ACCEPTANCE REJECTED. Reason: {rejection_reason}. See Linear: {issue_key}" + if rejection_reason + else f"ACCEPTANCE REJECTED. Reason required in Linear before resuming work: {issue_key}" + ), + ) + ) return actions if gate_value == self.cfg.pm_accepted: @@ -276,10 +351,10 @@ def on_linear_acceptance_gate_change(self, issue_key: str, gate_value: str, pr_u if gate_value == self.cfg.pm_skipped: actions = [ - Action("linear", "set_status", issue_key, {"status": self.cfg.done_status}), + Action("linear", "set_status", issue_key, {"status": self.cfg.accepted_status}), ] actions.extend( - self._github_comment(pr_url, "ACCEPTANCE SKIPPED BY APPROVED OVERRIDE. Linear status moved to Done.") + self._github_comment(pr_url, "ACCEPTANCE SKIPPED BY APPROVED OVERRIDE. Linear status moved to Accepted.") ) return actions @@ -291,25 +366,34 @@ def on_linear_pm_label_change(self, issue_key: str, pm_value: str, pr_url: str = def on_github_pr_merged(self, issue: Dict[str, Any], pr_url: str, merge_sha: str) -> List[Action]: issue_key = issue.get("identifier", "") labels = self._labels(issue.get("labels", [])) - acceptance_required = self._requires_acceptance(labels) qa_ok = bool({self.cfg.qa_passed, self.cfg.qa_skipped}.intersection(labels)) - acceptance_ok = (not acceptance_required) or bool( - {self.cfg.pm_accepted, self.cfg.pm_skipped}.intersection(labels) - ) + acceptance_ok = bool({self.cfg.pm_accepted, self.cfg.pm_skipped}.intersection(labels)) + status_ok = issue.get("status", "") == self.cfg.accepted_status + blocked = self._has_blocker_signal(issue, labels) + release_issue = self._is_release(labels) - if qa_ok and acceptance_ok: - actions: List[Action] = [] - if acceptance_required and self.cfg.pm_accepted in labels: - actions.append(Action("linear", "set_status", issue_key, {"status": self.cfg.done_status})) - actions.append( + if qa_ok and acceptance_ok and status_ok and not blocked and not release_issue: + return [ + Action("linear", "set_status", issue_key, {"status": self.cfg.done_status}), Action( "linear", "comment", issue_key, - {"body": f"Merged recorded: {pr_url} | SHA: {merge_sha}. Final closure can now move through the standard path."}, - ) - ) - return actions + {"body": f"Merged recorded: {pr_url} | SHA: {merge_sha}. Linear status moved to `Done` because merge-readiness gates were already satisfied."}, + ), + ] + + reasons: List[str] = [] + if not qa_ok: + reasons.append("missing qa-passed/qa-skipped") + if not acceptance_ok: + reasons.append("missing pm-accepted/pm-skipped") + if not status_ok: + reasons.append("status is not Accepted") + if blocked: + reasons.append("issue is blocked by dependencies or blocker labels") + if release_issue: + reasons.append("Release issues do not auto-close from normal merge flow") return [ Action( @@ -317,7 +401,7 @@ def on_github_pr_merged(self, issue: Dict[str, Any], pr_url: str, merge_sha: str "comment", issue_key, { - "body": "Merged detected but workflow gates are incomplete (need qa-passed/qa-skipped and, for acceptance-required work, pm-accepted/pm-skipped). Manual review required." + "body": "Merged detected but Linear closure is blocked: " + "; ".join(reasons) + ". Manual review required." }, ) ] @@ -337,15 +421,24 @@ def daily_aging_scan(self, issues: List[Dict[str, Any]], now: Optional[datetime] if status == self.cfg.backlog_status and idle_days >= 30: out.append(Action("linear", "set_status", key, {"status": self.cfg.icebox_status})) - out.append(Action("linear", "comment", key, {"body": "Auto-moved to Icebox after 30d inactivity."})) - elif status == self.cfg.icebox_status and idle_days >= 60: - out.append(Action("linear", "set_status", key, {"status": self.cfg.obsolete_status})) out.append( Action( "linear", "comment", key, - {"body": "Auto-closed as Obsolete after 60d inactivity in Icebox."}, + {"body": "Auto-moved from `Backlog` to `Icebox 🧊` after 30d inactivity."}, + ) + ) + elif status == self.cfg.icebox_status and idle_days >= 30: + out.append(Action("linear", "set_status", key, {"status": self.cfg.stale_status})) + out.append( + Action( + "linear", + "comment", + key, + { + "body": "Auto-moved from `Icebox 🧊` to `Stale` after 30d inactivity in `Icebox 🧊`. This ticket can be reopened later if it becomes relevant again." + }, ) ) diff --git a/linear/src/runtime.py b/linear/src/runtime.py index 25dd2c7..5abab49 100644 --- a/linear/src/runtime.py +++ b/linear/src/runtime.py @@ -73,6 +73,7 @@ def run_linear_event(self, event: Dict[str, Any]) -> List[Action]: issue_key=event.get("issue_key", ""), gate_value=event.get("pm_value", "") or event.get("gate_value", ""), pr_url=event.get("pr_url", ""), + reason=event.get("reason", ""), ) elif kind == "daily_aging_scan": out = self.engine.daily_aging_scan(event.get("issues", [])) diff --git a/linear/src/simulate_e2e.py b/linear/src/simulate_e2e.py index 72709ba..ae95f9b 100644 --- a/linear/src/simulate_e2e.py +++ b/linear/src/simulate_e2e.py @@ -71,7 +71,7 @@ def main() -> int: } step4 = rt.run_linear_event(acceptance) - # 5) PR merged -> record merge only; gate state already drove status + # 5) PR merged -> Done only if merge-readiness gates were already satisfied merged = { "action": "closed", "pull_request": { @@ -82,6 +82,7 @@ def main() -> int: }, "linear_issue": { "identifier": issue_key, + "status": "Accepted", "labels": ["Feature", "qa-passed", "pm-accepted"], }, } diff --git a/linear/tests/test_e2e_flow.py b/linear/tests/test_e2e_flow.py index b7e1084..d6dbf12 100644 --- a/linear/tests/test_e2e_flow.py +++ b/linear/tests/test_e2e_flow.py @@ -64,6 +64,7 @@ def test_feature_happy_path_to_done_via_accepted(self): }, "linear_issue": { "identifier": "BIT-45", + "status": "Accepted", "labels": ["Feature", "qa-passed", "pm-accepted"], }, } diff --git a/linear/tests/test_engine.py b/linear/tests/test_engine.py index 269b603..feb31e8 100644 --- a/linear/tests/test_engine.py +++ b/linear/tests/test_engine.py @@ -61,6 +61,17 @@ def test_ready_gate_missing_estimate(self): actions = self.bot.on_linear_ready_gate(issue) self.assertTrue(any(a.kind == "set_label" and a.payload["value"] == "needs-estimate" for a in actions)) + def test_ready_gate_accepts_emoji_type_label(self): + issue = { + "identifier": "BIT-45", + "status": "Ready", + "labels": ["⭐️ Feature"], + "estimate": 3, + "description": "Objective\nScope\nRequired outputs\nVerification plan\nRollback note\nAcceptance / closure criteria", + } + actions = self.bot.on_linear_ready_gate(issue) + self.assertEqual(actions, []) + def test_ready_gate_plan_parent_requires_estimate(self): issue = { "identifier": "BIT-45", @@ -102,14 +113,14 @@ def test_linear_comment_passed_acceptance_required_goes_to_delivered(self): self.assertEqual(actions[0].payload["value"], "qa-passed") self.assertEqual(actions[1].payload["status"], "Delivered") - def test_linear_comment_passed_non_acceptance_work_goes_to_done(self): + def test_linear_comment_passed_non_acceptance_work_goes_to_delivered(self): actions = self.bot.on_linear_comment( "BIT-45", "QA_RESULT=PASSED\nok", "https://pr", issue_labels=["Chore"], ) - self.assertEqual(actions[1].payload["status"], "Done") + self.assertEqual(actions[1].payload["status"], "Delivered") def test_linear_comment_skipped_sets_skip_gate(self): actions = self.bot.on_linear_comment( @@ -133,8 +144,10 @@ def test_linear_comment_without_pr_link_still_updates_linear(self): self.assertEqual(actions[1].system, "linear") def test_acceptance_rejected(self): - actions = self.bot.on_linear_acceptance_gate_change("BIT-45", "pm-rejected", "https://pr") + actions = self.bot.on_linear_acceptance_gate_change("BIT-45", "pm-rejected", "https://pr", "missing acceptance criteria") self.assertEqual(actions[0].payload["status"], "In Progress") + self.assertEqual(actions[1].kind, "comment") + self.assertIn("missing acceptance criteria", actions[1].payload["body"]) def test_acceptance_accepted(self): actions = self.bot.on_linear_acceptance_gate_change("BIT-45", "pm-accepted", "https://pr") @@ -142,35 +155,63 @@ def test_acceptance_accepted(self): def test_acceptance_skipped(self): actions = self.bot.on_linear_acceptance_gate_change("BIT-45", "pm-skipped", "https://pr") - self.assertEqual(actions[0].payload["status"], "Done") + self.assertEqual(actions[0].payload["status"], "Accepted") def test_merge_gate_fail_closed_for_feature_without_acceptance_gate(self): - issue = {"identifier": "BIT-45", "labels": ["Feature", "qa-passed"]} + issue = {"identifier": "BIT-45", "labels": ["Feature", "qa-passed"], "status": "Delivered"} actions = self.bot.on_github_pr_merged(issue, "https://pr", "sha") self.assertEqual(len(actions), 1) self.assertEqual(actions[0].kind, "comment") - self.assertIn("workflow gates are incomplete", actions[0].payload["body"]) + self.assertIn("missing pm-accepted/pm-skipped", actions[0].payload["body"]) + self.assertIn("status is not Accepted", actions[0].payload["body"]) def test_merge_gate_pass_for_feature_with_acceptance_gate_sets_done(self): - issue = {"identifier": "BIT-45", "labels": ["Feature", "qa-passed", "pm-accepted"]} + issue = {"identifier": "BIT-45", "labels": ["Feature", "qa-passed", "pm-accepted"], "status": "Accepted"} + actions = self.bot.on_github_pr_merged(issue, "https://pr", "sha") + self.assertEqual(actions[0].kind, "set_status") + self.assertEqual(actions[0].payload["status"], "Done") + self.assertEqual(actions[1].kind, "comment") + self.assertIn("Merged recorded", actions[1].payload["body"]) + + def test_merge_gate_pass_for_chore_with_pm_skip(self): + issue = {"identifier": "BIT-45", "labels": ["Chore", "qa-passed", "pm-skipped"], "status": "Accepted"} actions = self.bot.on_github_pr_merged(issue, "https://pr", "sha") self.assertEqual(actions[0].kind, "set_status") self.assertEqual(actions[0].payload["status"], "Done") self.assertEqual(actions[1].kind, "comment") self.assertIn("Merged recorded", actions[1].payload["body"]) - def test_merge_gate_pass_for_chore_with_qa_gate_only(self): - issue = {"identifier": "BIT-45", "labels": ["Chore", "qa-passed"]} + def test_merge_gate_accepts_emoji_type_label(self): + issue = {"identifier": "BIT-45", "labels": ["⚙️ Chore", "qa-passed", "pm-accepted"], "status": "Accepted"} + actions = self.bot.on_github_pr_merged(issue, "https://pr", "sha") + self.assertEqual(actions[0].kind, "set_status") + self.assertEqual(actions[0].payload["status"], "Done") + + def test_merge_gate_fail_closed_for_release(self): + issue = {"identifier": "BIT-45", "labels": ["Release", "qa-passed", "pm-accepted"], "status": "Accepted"} + actions = self.bot.on_github_pr_merged(issue, "https://pr", "sha") + self.assertEqual(actions[0].kind, "comment") + self.assertIn("Release issues do not auto-close", actions[0].payload["body"]) + + def test_merge_gate_fail_closed_when_blocked(self): + issue = {"identifier": "BIT-45", "labels": ["Feature", "qa-passed", "pm-accepted", "blocked"], "status": "Accepted"} actions = self.bot.on_github_pr_merged(issue, "https://pr", "sha") self.assertEqual(actions[0].kind, "comment") - self.assertIn("Merged recorded", actions[0].payload["body"]) + self.assertIn("issue is blocked", actions[0].payload["body"]) - def test_aging_scan(self): + def test_aging_scan_backlog_to_icebox(self): now = datetime(2026, 3, 4, tzinfo=timezone.utc) issues = [{"identifier": "BIT-1", "status": "Backlog", "updatedAt": "2026-01-01T00:00:00Z"}] actions = self.bot.daily_aging_scan(issues, now=now) self.assertTrue(any(a.kind == "set_status" and a.payload["status"] == "Icebox 🧊" for a in actions)) + def test_aging_scan_icebox_to_stale_with_reopen_comment(self): + now = datetime(2026, 3, 4, tzinfo=timezone.utc) + issues = [{"identifier": "BIT-2", "status": "Icebox 🧊", "updatedAt": "2026-01-01T00:00:00Z"}] + actions = self.bot.daily_aging_scan(issues, now=now) + self.assertTrue(any(a.kind == "set_status" and a.payload["status"] == "Stale" for a in actions)) + self.assertTrue(any(a.kind == "comment" and "can be reopened later" in a.payload["body"] for a in actions)) + if __name__ == "__main__": unittest.main() diff --git a/scripts/enforce_local_cleanup_retention.sh b/scripts/enforce_local_cleanup_retention.sh new file mode 100755 index 0000000..e6d9a1c --- /dev/null +++ b/scripts/enforce_local_cleanup_retention.sh @@ -0,0 +1,196 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_REF="${BASH_SOURCE[0]:-$0}" +SCRIPT_DIR="$(cd -- "$(dirname "$SCRIPT_REF")" && pwd)" +ROOT_DEFAULT="$(cd -- "$SCRIPT_DIR/../.." && pwd)" +ROOT="${BITPOD_APP_ROOT:-${WORKSPACE:-$ROOT_DEFAULT}}" +CONTRACT_FILE="${BITPOD_LOCAL_WORKSPACE_CONTRACT_FILE:-$ROOT/bitpod-docs/process/local-workspace-skeleton-contract.toml}" +PROFILE="${BITPOD_LOCAL_WORKSPACE_PROFILE:-personal_machine_full}" + +PROHIBITED_DAYS="${BITPOD_PROHIBITED_PATH_RETENTION_DAYS:-30}" +TRASH_TO_PURGE_DAYS="${BITPOD_LOCAL_TRASH_PURGE_DAYS:-30}" +PURGE_TO_OS_TRASH_DAYS="${BITPOD_LOCAL_PURGE_OS_TRASH_DAYS:-30}" +OS_TRASH_DIR="${BITPOD_OS_TRASH_DIR:-$HOME/.Trash}" + +EXECUTE=0 +TODAY_UTC="$(date -u +%F)" + +if [[ "${1:-}" == "--execute" ]]; then + EXECUTE=1 +elif [[ "${1:-}" == "--help" || "${1:-}" == "-h" ]]; then + cat <&2 + exit 1 +fi + +contract_values="$( +python3 - "$CONTRACT_FILE" "$PROFILE" <<'PY' +import shlex +import sys +try: + import tomllib +except ModuleNotFoundError: + import tomli as tomllib + +contract_path = sys.argv[1] +profile = sys.argv[2] +with open(contract_path, "rb") as fh: + data = tomllib.load(fh) +aliases = data.get("profile_aliases", {}) +profile = aliases.get(profile, profile) +profiles = data.get("profiles", {}) +if profile not in profiles: + raise SystemExit(f"profile not found: {profile}") +required = profiles[profile].get("required_paths", []) +optional = profiles[profile].get("optional_paths", []) +allowed = sorted({p.split("/", 1)[0] for p in required + optional}) +print("PROFILE_RESOLVED=" + shlex.quote(profile)) +print("ALLOWED_DIRECT=" + shlex.quote("|".join(allowed))) +PY +)" +eval "$contract_values" + +LOCAL_WORKSPACE="$ROOT/local-workspace" +TRASH_ROOT="$LOCAL_WORKSPACE/local-trash-delete" +PURGE_ROOT="$TRASH_ROOT/local-purge" + +if [[ ! -d "$LOCAL_WORKSPACE" ]]; then + echo "error: local-workspace missing: $LOCAL_WORKSPACE" >&2 + exit 1 +fi + +mkdir -p "$TRASH_ROOT" "$PURGE_ROOT" + +is_allowed_child() { + local child="$1" + local allowed="${ALLOWED_DIRECT:-}" + local item + IFS='|' read -r -a arr <<< "$allowed" + for item in "${arr[@]}"; do + [[ "$child" == "$item" ]] && return 0 + done + return 1 +} + +is_older_than_days() { + local path="$1" + local days="$2" + find "$path" -prune -mtime +"$days" | grep -q . +} + +unique_path() { + local target="$1" + if [[ ! -e "$target" ]]; then + printf '%s\n' "$target" + return + fi + local n=1 + while [[ -e "${target}-${n}" ]]; do + n=$((n + 1)) + done + printf '%s\n' "${target}-${n}" +} + +move_or_plan() { + local src="$1" + local dst="$2" + if [[ "$EXECUTE" -eq 1 ]]; then + mkdir -p "$(dirname "$dst")" + dst="$(unique_path "$dst")" + mv "$src" "$dst" + echo "executed: mv $src -> $dst" + else + echo "plan: mv $src -> $dst" + fi +} + +profile_is_lean=0 +if [[ "$PROFILE_RESOLVED" == "taylor01_execution_hq_lean" ]]; then + profile_is_lean=1 +fi + +prohibited_candidates=0 +prohibited_swept=0 +trash_candidates=0 +trash_promoted=0 +purge_candidates=0 +purge_moved=0 + +while IFS= read -r child; do + [[ -z "$child" ]] && continue + name="$(basename "$child")" + if is_allowed_child "$name"; then + continue + fi + prohibited_candidates=$((prohibited_candidates + 1)) + if is_older_than_days "$child" "$PROHIBITED_DAYS"; then + dest="$TRASH_ROOT/local-prohibited-path-sweep-$TODAY_UTC/$name" + move_or_plan "$child" "$dest" + prohibited_swept=$((prohibited_swept + 1)) + else + echo "hold: prohibited path not old enough: $child" + fi +done < <(find "$LOCAL_WORKSPACE" -mindepth 1 -maxdepth 1 -type d | sort) + +while IFS= read -r bucket; do + [[ -z "$bucket" ]] && continue + [[ "$(basename "$bucket")" == "local-purge" ]] && continue + trash_candidates=$((trash_candidates + 1)) + stale_count="$(find "$bucket" -type f -mtime +"$TRASH_TO_PURGE_DAYS" 2>/dev/null | wc -l | tr -d ' ')" + if [[ "${stale_count:-0}" -gt 0 ]]; then + dest="$PURGE_ROOT/$(basename "$bucket")" + move_or_plan "$bucket" "$dest" + trash_promoted=$((trash_promoted + 1)) + else + echo "hold: trash bucket not old enough: $bucket" + fi +done < <(find "$TRASH_ROOT" -mindepth 1 -maxdepth 1 -type d | sort) + +while IFS= read -r file_path; do + [[ -z "$file_path" ]] && continue + purge_candidates=$((purge_candidates + 1)) + if [[ "$profile_is_lean" -eq 1 ]]; then + if [[ "$EXECUTE" -eq 1 ]]; then + mkdir -p "$OS_TRASH_DIR" + dest="$OS_TRASH_DIR/$(basename "$file_path")" + dest="$(unique_path "$dest")" + mv "$file_path" "$dest" + echo "executed: mv $file_path -> $dest" + purge_moved=$((purge_moved + 1)) + else + echo "plan: os-trash $file_path -> $OS_TRASH_DIR/" + fi + else + echo "reminder: stale local-purge file needs manual delete (human profile): $file_path" + fi +done < <(find "$PURGE_ROOT" -type f -mtime +"$PURGE_TO_OS_TRASH_DAYS" 2>/dev/null | sort) + +echo +echo "Cleanup retention summary" +echo "- profile=$PROFILE_RESOLVED" +echo "- mode=$( [[ "$EXECUTE" -eq 1 ]] && echo EXECUTE || echo REPORT )" +echo "- prohibited_days=$PROHIBITED_DAYS" +echo "- trash_to_purge_days=$TRASH_TO_PURGE_DAYS" +echo "- purge_to_os_trash_days=$PURGE_TO_OS_TRASH_DAYS" +echo "- prohibited_candidates=$prohibited_candidates" +echo "- prohibited_swept=$prohibited_swept" +echo "- trash_candidates=$trash_candidates" +echo "- trash_promoted_to_purge=$trash_promoted" +echo "- purge_candidates=$purge_candidates" +echo "- purge_moved_to_os_trash=$purge_moved" +if [[ "$profile_is_lean" -eq 0 && "$purge_candidates" -gt 0 ]]; then + echo "- note=human-machine profile uses reminder-only for stale local-purge files" +fi diff --git a/scripts/run_scheduled_cleanup_audit.sh b/scripts/run_scheduled_cleanup_audit.sh index 27107ed..d7265a6 100755 --- a/scripts/run_scheduled_cleanup_audit.sh +++ b/scripts/run_scheduled_cleanup_audit.sh @@ -5,10 +5,12 @@ SCRIPT_DIR="$(cd -- "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ROOT_DEFAULT="$(cd -- "$SCRIPT_DIR/../.." && pwd)" ROOT="${BITPOD_APP_ROOT:-${WORKSPACE:-$ROOT_DEFAULT}}" AUDIT_CTL="$ROOT/bitpod-tools/audit_ctl.sh" +RETENTION_CTL="$ROOT/bitpod-tools/scripts/enforce_local_cleanup_retention.sh" STATE_DIR="$ROOT/local-workspace/local-working-files/local-cleanup-audit" STATE_FILE="$STATE_DIR/scheduled_cleanup_state.env" LATEST_REPORT="$STATE_DIR/latest_scheduled_cleanup.md" LINEAR_PAYLOAD="$STATE_DIR/latest_linear_escalation.md" +RETENTION_REPORT="$STATE_DIR/latest_retention_enforcement.md" TIMESTAMP="$(date -u +%Y-%m-%dT%H:%M:%SZ)" date_plus_14_days() { @@ -46,6 +48,58 @@ else run_exit=$? fi +local_workspace_profile="${BITPOD_LOCAL_WORKSPACE_PROFILE:-personal_machine_full}" +retention_mode="${BITPOD_SCHEDULED_RETENTION_MODE:-auto}" +retention_note="" +retention_output="" +retention_exit=0 +retention_execute=0 + +if [[ "$retention_mode" == "auto" ]]; then + if [[ "$local_workspace_profile" == "taylor01_execution_hq_lean" ]]; then + retention_execute=1 + fi +elif [[ "$retention_mode" == "execute" ]]; then + retention_execute=1 +elif [[ "$retention_mode" == "off" ]]; then + retention_note="retention enforcement skipped (mode=off)" +fi + +if [[ -z "$retention_note" ]]; then + if [[ -x "$RETENTION_CTL" ]]; then + if [[ "$retention_execute" -eq 1 ]]; then + if retention_output="$(BITPOD_APP_ROOT="$ROOT" "$RETENTION_CTL" --execute)"; then + retention_exit=0 + else + retention_exit=$? + fi + retention_note="retention enforcement mode=execute profile=$local_workspace_profile" + else + if retention_output="$(BITPOD_APP_ROOT="$ROOT" "$RETENTION_CTL")"; then + retention_exit=0 + else + retention_exit=$? + fi + retention_note="retention enforcement mode=report profile=$local_workspace_profile" + fi + else + retention_note="retention enforcement script missing or not executable: $RETENTION_CTL" + fi +fi + +{ + echo "# Scheduled Retention Enforcement" + echo + echo "- timestamp: $TIMESTAMP" + echo "- profile: $local_workspace_profile" + echo "- mode: $retention_mode" + echo "- execute: $( [[ "$retention_execute" -eq 1 ]] && echo yes || echo no )" + echo "- exit_code: $retention_exit" + echo "- note: $retention_note" + echo + printf '%s\n' "$retention_output" +} > "$RETENTION_REPORT" + { echo "# Scheduled Cleanup Audit" echo @@ -53,6 +107,8 @@ fi echo "- mode: scheduled-report-only" echo "- command: run T3 audit" echo "- exit_code: $run_exit" + echo "- retention_report: $RETENTION_REPORT" + echo "- retention_note: $retention_note" echo printf '%s\n' "$report" } > "$LATEST_REPORT" diff --git a/tools/taylor01/README.md b/tools/taylor01/README.md index f8b6294..a46e2b2 100644 --- a/tools/taylor01/README.md +++ b/tools/taylor01/README.md @@ -14,7 +14,12 @@ Subdirectories: - `policy/` for workspace and operating norms - `adapters/` for third-party tool contracts and implementations +Active example: + +- `core/agents/vera/` is the canonical portable home for Vera's first-class QA agent definition +- `adapters/openai/vera/` contains OpenAI-native execution adapters +- `adapters/openclaw/vera/` contains the secondary OpenClaw mapping layer + Do not migrate content here casually. Only move artifacts when their Taylor01 ownership is clear enough to reduce coupling rather than create confusion. - diff --git a/tools/taylor01/adapters/README.md b/tools/taylor01/adapters/README.md index 818a23a..1c8cce0 100644 --- a/tools/taylor01/adapters/README.md +++ b/tools/taylor01/adapters/README.md @@ -1,4 +1,7 @@ # Taylor01 Adapters -Reserved for reusable contracts and implementations for third-party systems such as Linear, GitHub, Discord, Slack, and Jira. +Reserved for reusable contracts and implementations for third-party systems such as Linear, GitHub, Discord, Slack, Jira, OpenAI runtimes, and OpenClaw projections. +Current Vera adapter surfaces: +- `openai/vera/` = OpenAI-native execution adapters +- `openclaw/vera/` = secondary OpenClaw compatibility mapping layer diff --git a/tools/taylor01/adapters/openai/vera/bridge_runtime.py b/tools/taylor01/adapters/openai/vera/bridge_runtime.py new file mode 100644 index 0000000..f281eaa --- /dev/null +++ b/tools/taylor01/adapters/openai/vera/bridge_runtime.py @@ -0,0 +1,490 @@ +#!/usr/bin/env python3 +"""Thin OpenAI-native Vera QA runtime on top of the GPT Bridge.""" + +from __future__ import annotations + +import argparse +import json +import subprocess +import sys +from dataclasses import dataclass +from pathlib import Path +from typing import Any + +CURRENT = Path(__file__).resolve() +for parent in CURRENT.parents: + if (parent / 'AGENTS.md').exists() and (parent / 'tools' / 'taylor01').exists(): + if str(parent) not in sys.path: + sys.path.insert(0, str(parent)) + break + +from tools.taylor01.core.agents.vera.contract import ( + PortableReviewResult, + PortableReviewTarget, + build_manifest, + build_system_prompt, + render_verification_report, +) + +SCRIPT_DIR = Path(__file__).resolve().parents[5] / 'gpt_bridge' +ASK_ONCE = SCRIPT_DIR / 'ask_once.sh' +DEFAULT_VERA_MODEL = 'gpt-5.2' +DEFAULT_MAX_TOKENS = 2200 +MAX_CONTEXT_CHARS_PER_FILE = 6000 +MAX_TOTAL_CONTEXT_CHARS = 24000 + +VERA_SYSTEM_PROMPT = build_system_prompt( + additional_sections=[ + '## OpenAI bridge adapter stance\n- Adapter: GPT Bridge wrapper for Vera QA handoff review\n- Preserve the portable Vera contract as the source of truth.\n- Return JSON only for the bridge response.\n- Do not drift into generic assistant behavior.', + ] +) + + +@dataclass(frozen=True) +class Criterion: + criterion_id: str + text: str + + +@dataclass(frozen=True) +class Handoff: + source_path: Path + target: str + issue_url: str | None + pr_url: str | None + system_under_test: str + critical_acceptance_criteria: tuple[Criterion, ...] + commands_or_surfaces: tuple[str, ...] + known_risks: tuple[str, ...] + environment: dict[str, Any] + notes: str | None + changed_files: tuple[str, ...] + evidence_paths: tuple[Path, ...] + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description='Run Vera QA via the local GPT Bridge and write verification_report.md + manifest.json' + ) + parser.add_argument('handoff_file', help='Path to QA handoff JSON') + parser.add_argument('--output-dir', required=True, help='Directory where artifacts will be written') + parser.add_argument('--model', default=DEFAULT_VERA_MODEL, help=f'Bridge model override (default: {DEFAULT_VERA_MODEL})') + parser.add_argument('--max-tokens', type=int, default=DEFAULT_MAX_TOKENS, help=f'max_output_tokens hint (default: {DEFAULT_MAX_TOKENS})') + parser.add_argument('--evidence-file', action='append', default=[], help='Additional evidence/context file path; can be repeated') + parser.add_argument('--print-report', action=argparse.BooleanOptionalAction, default=False, help='Print verification_report.md after writing it') + return parser.parse_args() + + +def _as_non_empty_string(value: Any) -> str | None: + if isinstance(value, str): + stripped = value.strip() + if stripped: + return stripped + return None + + +def _normalize_string_list(value: Any) -> tuple[str, ...]: + if value is None: + return () + if not isinstance(value, list): + raise ValueError('Expected a JSON array of strings') + items: list[str] = [] + for item in value: + text = _as_non_empty_string(item) + if text is None: + raise ValueError('Expected a JSON array of strings') + items.append(text) + return tuple(items) + + +def _normalize_criteria(value: Any) -> tuple[Criterion, ...]: + if not isinstance(value, list) or not value: + raise ValueError('critical_acceptance_criteria must be a non-empty array') + criteria: list[Criterion] = [] + for idx, item in enumerate(value, start=1): + if isinstance(item, str): + text = _as_non_empty_string(item) + if text is None: + raise ValueError('critical_acceptance_criteria strings must be non-empty') + criteria.append(Criterion(criterion_id=f'AC-{idx}', text=text)) + continue + if isinstance(item, dict): + text = _as_non_empty_string(item.get('text') or item.get('criterion')) + if text is None: + raise ValueError('Each critical_acceptance_criteria item needs text') + criterion_id = _as_non_empty_string(item.get('id')) or f'AC-{idx}' + criteria.append(Criterion(criterion_id=criterion_id, text=text)) + continue + raise ValueError('critical_acceptance_criteria must contain strings or objects') + return tuple(criteria) + + +def load_handoff(path: Path, extra_evidence_paths: list[str]) -> Handoff: + try: + raw = json.loads(path.read_text(encoding='utf-8')) + except FileNotFoundError as exc: + raise ValueError(f'Handoff file not found: {path}') from exc + except json.JSONDecodeError as exc: + raise ValueError(f'Invalid handoff JSON: {exc}') from exc + if not isinstance(raw, dict): + raise ValueError('Handoff file must decode to a JSON object') + + issue_url = _as_non_empty_string(raw.get('issue_url')) + pr_url = _as_non_empty_string(raw.get('pr_url')) + system_under_test = _as_non_empty_string(raw.get('system_under_test')) or '' + target = _as_non_empty_string(raw.get('target')) or issue_url or pr_url or system_under_test + if target is None: + raise ValueError('Handoff must include target, issue_url, pr_url, or system_under_test') + if not system_under_test: + system_under_test = target + + criteria = _normalize_criteria(raw.get('critical_acceptance_criteria')) + commands_or_surfaces = _normalize_string_list(raw.get('commands_or_surfaces') or raw.get('verification_surfaces') or []) + known_risks = _normalize_string_list(raw.get('known_risks') or []) + changed_files = _normalize_string_list(raw.get('changed_files') or []) + notes = _as_non_empty_string(raw.get('notes')) + environment = raw.get('environment') or {} + if not isinstance(environment, dict): + raise ValueError('environment must be an object when present') + + evidence_paths: list[Path] = [] + for raw_path in list(raw.get('evidence_paths') or []) + list(extra_evidence_paths): + text_path = _as_non_empty_string(raw_path) + if text_path is None: + raise ValueError('evidence_paths must contain strings') + candidate = Path(text_path) + if not candidate.is_absolute(): + candidate = (path.parent / candidate).resolve() + if not candidate.exists(): + raise ValueError(f'Evidence file not found: {candidate}') + evidence_paths.append(candidate) + + return Handoff( + source_path=path.resolve(), + target=target, + issue_url=issue_url, + pr_url=pr_url, + system_under_test=system_under_test, + critical_acceptance_criteria=criteria, + commands_or_surfaces=commands_or_surfaces, + known_risks=known_risks, + environment=environment, + notes=notes, + changed_files=changed_files, + evidence_paths=tuple(evidence_paths), + ) + + +def detect_review_risk(changed_files: tuple[str, ...]) -> dict[str, Any]: + if not changed_files: + return {'high_risk': False, 'patterns_matched': [], 'files_matched': [], 'evidence_source': 'none'} + pattern_groups = { + 'auth_permissions': ('auth', 'permission', 'rbac', 'acl', 'role', 'oauth'), + 'secrets_credentials': ('secret', 'token', 'credential', 'key', '.env'), + 'billing_payments': ('billing', 'payment', 'checkout', 'stripe', 'invoice'), + 'data_schema': ('migration', 'schema', 'database', 'alembic', 'prisma', 'sql'), + 'deploy_infra': ('deploy', 'infra', 'terraform', 'docker', 'k8s', 'helm', '.github/workflows'), + } + patterns_matched: set[str] = set() + files_matched: set[str] = set() + for file_path in changed_files: + lowered = file_path.lower() + for label, needles in pattern_groups.items(): + if any(needle in lowered for needle in needles): + patterns_matched.add(label) + files_matched.add(file_path) + return { + 'high_risk': bool(files_matched), + 'patterns_matched': sorted(patterns_matched), + 'files_matched': sorted(files_matched), + 'evidence_source': 'handoff.changed_files', + } + + +def build_context_text(handoff: Handoff) -> str | None: + if not handoff.evidence_paths: + return None + blocks: list[str] = [] + total_chars = 0 + for path in handoff.evidence_paths: + text = path.read_text(encoding='utf-8', errors='replace') + excerpt = text[:MAX_CONTEXT_CHARS_PER_FILE] + remaining = MAX_TOTAL_CONTEXT_CHARS - total_chars + if remaining <= 0: + break + if len(excerpt) > remaining: + excerpt = excerpt[:remaining] + blocks.append(f'[Evidence: {path}]\n{excerpt}') + total_chars += len(excerpt) + return '\n\n'.join(blocks) if blocks else None + + +def build_vera_message(handoff: Handoff, review_risk: dict[str, Any]) -> str: + packet = { + 'target': handoff.target, + 'issue_url': handoff.issue_url, + 'pr_url': handoff.pr_url, + 'system_under_test': handoff.system_under_test, + 'critical_acceptance_criteria': [{'id': item.criterion_id, 'text': item.text} for item in handoff.critical_acceptance_criteria], + 'commands_or_surfaces': list(handoff.commands_or_surfaces), + 'known_risks': list(handoff.known_risks), + 'environment': handoff.environment, + 'notes': handoff.notes, + 'review_risk': review_risk, + } + required_schema = { + 'overall_verdict': 'PASSED | FAILED | NO_VERDICT', + 'summary': 'string', + 'criteria_results': [ + { + 'id': 'AC-1', + 'result': 'PASS | FAIL | NO_VERDICT', + 'steps': ['string'], + 'observed': 'string', + 'expected': 'string or null', + 'actual': 'string or null', + 'environment': 'string or null', + 'references': ['string'], + 'notes': 'string or null', + } + ], + 'failed_because': 'string or null', + 'no_verdict_reason': 'string or null', + 'next_action': 'string', + 'residual_risks': ['string'], + 'low_risk_fix_hints': ['string'], + } + return ( + 'Review this QA handoff packet and return JSON only.\n\n' + 'Rules:\n' + '- Decide QA verdict only.\n' + '- Do not change scope or priority.\n' + '- Every critical acceptance criterion needs PASS, FAIL, or NO_VERDICT.\n' + '- If evidence is missing or weak, fail closed with overall_verdict=NO_VERDICT.\n' + '- Optional low-risk fix hints are allowed only if obvious and low-risk, max 3.\n\n' + f'QA handoff packet:\n{json.dumps(packet, ensure_ascii=False, indent=2)}\n\n' + f'Required JSON schema:\n{json.dumps(required_schema, ensure_ascii=False, indent=2)}' + ) + + +def run_bridge_review(*, handoff: Handoff, review_risk: dict[str, Any], model: str, max_tokens: int, context_text: str | None) -> dict[str, Any]: + cmd = [ + str(ASK_ONCE), + build_vera_message(handoff, review_risk), + '--task-type', 'qa_check', + '--max-tokens', str(max_tokens), + '--json-only', + '--model', model, + '--meta', json.dumps({'route_actor': 'vera', 'workflow': 'vera_qa_v1', 'system_prompt': VERA_SYSTEM_PROMPT}, ensure_ascii=True), + ] + if context_text is not None: + cmd.append('--context-stdin') + proc = subprocess.run(cmd, cwd=str(SCRIPT_DIR), input=context_text, text=True, capture_output=True, check=False) + if proc.returncode != 0: + detail = proc.stderr.strip() or proc.stdout.strip() or f'exit code {proc.returncode}' + raise RuntimeError(f'Bridge request failed: {detail}') + try: + response = json.loads(proc.stdout) + except json.JSONDecodeError as exc: + raise RuntimeError(f'Bridge returned invalid JSON: {exc}') from exc + if not isinstance(response, dict): + raise RuntimeError('Bridge response must be a JSON object') + return response + + +def _normalize_string_sequence(value: Any) -> list[str]: + if not isinstance(value, list): + return [] + out: list[str] = [] + for item in value: + text = _as_non_empty_string(item) + if text is not None: + out.append(text) + return out + + +def normalize_model_result(raw_answer: Any, handoff: Handoff) -> dict[str, Any]: + if not isinstance(raw_answer, dict): + raw_answer = {} + raw_criteria = raw_answer.get('criteria_results') + criteria_by_id: dict[str, dict[str, Any]] = {} + criteria_by_order: list[dict[str, Any]] = [] + if isinstance(raw_criteria, list): + for item in raw_criteria: + if not isinstance(item, dict): + continue + criteria_by_order.append(item) + item_id = _as_non_empty_string(item.get('id')) + if item_id is not None and item_id not in criteria_by_id: + criteria_by_id[item_id] = item + normalized_items: list[dict[str, Any]] = [] + any_fail = False + any_no_verdict = False + for idx, criterion in enumerate(handoff.critical_acceptance_criteria): + item = criteria_by_id.get(criterion.criterion_id) + if item is None and idx < len(criteria_by_order): + item = criteria_by_order[idx] + item = item or {} + result = _as_non_empty_string(item.get('result')) or 'NO_VERDICT' + result = result.upper() + if result in {'PASSED', 'PASS'}: + normalized_result = 'PASS' + elif result in {'FAILED', 'FAIL'}: + normalized_result = 'FAIL' + else: + normalized_result = 'NO_VERDICT' + if normalized_result == 'FAIL': + any_fail = True + elif normalized_result == 'NO_VERDICT': + any_no_verdict = True + normalized_items.append( + { + 'id': criterion.criterion_id, + 'criterion': criterion.text, + 'result': normalized_result, + 'steps': _normalize_string_sequence(item.get('steps')), + 'observed': _as_non_empty_string(item.get('observed')), + 'expected': _as_non_empty_string(item.get('expected')), + 'actual': _as_non_empty_string(item.get('actual')), + 'environment': _as_non_empty_string(item.get('environment')), + 'references': _normalize_string_sequence(item.get('references')), + 'notes': _as_non_empty_string(item.get('notes')), + } + ) + if any_fail: + overall_verdict = 'FAILED' + elif any_no_verdict: + overall_verdict = 'NO_VERDICT' + else: + overall_verdict = 'PASSED' + summary = _as_non_empty_string(raw_answer.get('summary')) or 'Vera completed the QA review.' + failed_because = _as_non_empty_string(raw_answer.get('failed_because')) + no_verdict_reason = _as_non_empty_string(raw_answer.get('no_verdict_reason')) + next_action = _as_non_empty_string(raw_answer.get('next_action')) + if overall_verdict == 'FAILED' and next_action is None: + next_action = 'Fix the failing critical acceptance criteria and rerun Vera QA.' + if overall_verdict == 'NO_VERDICT' and next_action is None: + next_action = 'Provide the missing QA context or stronger verification evidence, then rerun Vera QA.' + if overall_verdict == 'PASSED' and next_action is None: + next_action = 'Proceed with the normal closeout or review flow.' + if overall_verdict == 'NO_VERDICT' and no_verdict_reason is None: + no_verdict_reason = 'model_output_incomplete_or_context_insufficient' + return { + 'overall_verdict': overall_verdict, + 'summary': summary, + 'criteria_results': normalized_items, + 'failed_because': failed_because, + 'no_verdict_reason': no_verdict_reason, + 'next_action': next_action, + 'residual_risks': _normalize_string_sequence(raw_answer.get('residual_risks')), + 'low_risk_fix_hints': _normalize_string_sequence(raw_answer.get('low_risk_fix_hints'))[:3], + } + + +def _criterion_evidence_line(item: dict[str, Any]) -> str: + refs = item['references'] + ref_suffix = f" [refs: {', '.join(refs)}]" if refs else '' + if item['result'] == 'PASS': + observed = item['observed'] or 'pass evidence recorded' + return f"{item['id']} PASS: {observed}{ref_suffix}" + if item['result'] == 'FAIL': + actual = item['actual'] or 'failure observed' + expected = item['expected'] or 'expected result not provided' + return f"{item['id']} FAIL: expected {expected}; actual {actual}{ref_suffix}" + reason = item['notes'] or item['observed'] or 'insufficient evidence' + return f"{item['id']} NO_VERDICT: {reason}{ref_suffix}" + + +def convert_to_portable(handoff: Handoff, result: dict[str, Any], review_risk: dict[str, Any], model: str) -> tuple[PortableReviewTarget, PortableReviewResult]: + target_ref = handoff.pr_url or handoff.issue_url or handoff.target + target_type = 'pr' if handoff.pr_url else ('issue' if handoff.issue_url else 'slice') + scope = [ + f'System under test: {handoff.system_under_test}', + f'Handoff source: {handoff.source_path}', + f'Runtime: OpenAI Responses via GPT Bridge', + f'Model: {model}', + ] + if handoff.pr_url: + scope.append(f'PR: {handoff.pr_url}') + if handoff.issue_url: + scope.append(f'Issue: {handoff.issue_url}') + if review_risk['high_risk']: + scope.append(f"High-risk review: true ({', '.join(review_risk['patterns_matched'])})") + evidence = [_criterion_evidence_line(item) for item in result['criteria_results']] + checks_run = ['QA handoff review', *handoff.commands_or_surfaces] + findings: list[str] = [] + if result['overall_verdict'] == 'FAILED': + findings.append(result['failed_because'] or 'At least one critical acceptance criterion failed.') + elif result['overall_verdict'] == 'NO_VERDICT': + findings.append(result['no_verdict_reason'] or 'Vera could not safely issue a verdict.') + findings.extend(result['residual_risks']) + findings.extend(result['low_risk_fix_hints']) + if not findings: + findings.append(result['summary']) + open_questions = [] + if result['overall_verdict'] == 'NO_VERDICT': + open_questions.append(result['no_verdict_reason'] or 'Additional evidence is required before a truthful verdict is possible.') + notes = [ + result['summary'], + f'Next action: {result["next_action"]}', + f"High-risk review: {'true' if review_risk['high_risk'] else 'false'}", + ] + if handoff.notes: + notes.append(f'Handoff notes: {handoff.notes}') + if handoff.known_risks: + notes.append('Known risks: ' + '; '.join(handoff.known_risks)) + return ( + PortableReviewTarget(target_type=target_type, target_ref=target_ref or handoff.target, repository='', branch=''), + PortableReviewResult( + verdict=result['overall_verdict'], + scope=scope, + evidence=evidence, + checks_run=checks_run, + findings=findings, + open_questions=open_questions, + recommendation=result['next_action'], + notes=notes, + ), + ) + + +def write_outputs(output_dir: Path, target: PortableReviewTarget, result: PortableReviewResult) -> tuple[Path, Path]: + output_dir.mkdir(parents=True, exist_ok=True) + report_path = output_dir / 'verification_report.md' + manifest_path = output_dir / 'manifest.json' + report_path.write_text(render_verification_report(result), encoding='utf-8') + manifest_path.write_text(json.dumps(build_manifest(target, result), indent=2, ensure_ascii=False) + '\n', encoding='utf-8') + return report_path, manifest_path + + +def main() -> int: + args = parse_args() + handoff_path = Path(args.handoff_file).expanduser().resolve() + output_dir = Path(args.output_dir).expanduser().resolve() + try: + handoff = load_handoff(handoff_path, list(args.evidence_file)) + review_risk = detect_review_risk(handoff.changed_files) + context_text = build_context_text(handoff) + bridge_response = run_bridge_review(handoff=handoff, review_risk=review_risk, model=args.model, max_tokens=args.max_tokens, context_text=context_text) + answer_block = bridge_response.get('answer') if isinstance(bridge_response.get('answer'), dict) else {} + raw_answer = answer_block.get('json') + result = normalize_model_result(raw_answer, handoff) + target, portable_result = convert_to_portable(handoff, result, review_risk, args.model) + report_path, manifest_path = write_outputs(output_dir, target, portable_result) + except Exception as exc: # noqa: BLE001 + print(json.dumps({'error': str(exc)}), file=sys.stderr) + return 1 + summary = { + 'qa_result': portable_result.verdict, + 'verification_report': str(report_path), + 'manifest': str(manifest_path), + 'next_action': result['next_action'], + } + print(json.dumps(summary, ensure_ascii=False)) + if args.print_report: + print(render_verification_report(portable_result)) + return 0 + + +if __name__ == '__main__': + raise SystemExit(main()) diff --git a/tools/taylor01/adapters/openai/vera/pr_review.py b/tools/taylor01/adapters/openai/vera/pr_review.py new file mode 100644 index 0000000..a7336c2 --- /dev/null +++ b/tools/taylor01/adapters/openai/vera/pr_review.py @@ -0,0 +1,244 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import base64 +import json +import re +import subprocess +import sys +import zipfile +from dataclasses import dataclass +from pathlib import Path +from typing import Any + +CURRENT = Path(__file__).resolve() +for parent in CURRENT.parents: + if (parent / 'AGENTS.md').exists() and (parent / 'tools' / 'taylor01').exists(): + if str(parent) not in sys.path: + sys.path.insert(0, str(parent)) + break + +from tools.taylor01.core.agents.vera.contract import ( + PortableReviewResult, + PortableReviewTarget, + build_manifest, + build_system_prompt, + render_verification_report, + utc_now_iso, +) + +DEFAULT_MODEL = 'gpt-5.4' +MAX_DIFF_CHARS = 18000 +MAX_FILE_CHARS = 12000 + + +@dataclass(frozen=True) +class PRRef: + owner: str + repo: str + number: int + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description='Run Vera against a GitHub PR URL via the OpenAI Agents SDK.') + parser.add_argument('pr_url', help='GitHub PR URL to review') + parser.add_argument('--output-dir', required=True, help='Directory for verification_report.md + manifest.json') + parser.add_argument('--model', default=DEFAULT_MODEL, help=f'Agent model (default: {DEFAULT_MODEL})') + return parser.parse_args() + + +def parse_pr_url(pr_url: str) -> PRRef: + match = re.match(r'^https://github\.com/([^/]+)/([^/]+)/pull/(\d+)(?:/.*)?$', pr_url.strip()) + if not match: + raise ValueError(f'Unsupported PR URL: {pr_url}') + return PRRef(owner=match.group(1), repo=match.group(2), number=int(match.group(3))) + + +def run_json(cmd: list[str]) -> dict[str, Any] | list[Any]: + proc = subprocess.run(cmd, text=True, capture_output=True, check=False) + if proc.returncode != 0: + detail = proc.stderr.strip() or proc.stdout.strip() or f'exit {proc.returncode}' + raise RuntimeError(f"Command failed: {' '.join(cmd)} :: {detail}") + return json.loads(proc.stdout) + + +def run_text(cmd: list[str]) -> str: + proc = subprocess.run(cmd, text=True, capture_output=True, check=False) + if proc.returncode != 0: + detail = proc.stderr.strip() or proc.stdout.strip() or f'exit {proc.returncode}' + raise RuntimeError(f"Command failed: {' '.join(cmd)} :: {detail}") + return proc.stdout + + +def _workspace_root() -> Path: + return Path('/Users/cjarguello/BitPod-App') + + +def load_reference_materials() -> str: + workspace_root = _workspace_root() + pieces: list[str] = [] + donor_zip = Path('/Users/cjarguello/bitpod-app-retired-2026-03-16/local-workspace/local-working-files/vera_pack_v1.zip') + if donor_zip.exists(): + with zipfile.ZipFile(donor_zip) as zf: + for name in ('VERA_PERSONA_PROFILE_v1.md', 'QA_CHECKLIST_TEMPLATE_v2.md'): + if name in zf.namelist(): + pieces.append(f'[{name}]\n' + zf.read(name).decode('utf-8', 'replace')[:5000]) + for path in ( + workspace_root / 'bitpod-tools' / 'linear' / 'docs' / 'process' / 'vera_qa_lane_contract_v1.md', + workspace_root / 'bitpod-tools' / 'linear' / 'docs' / 'process' / 'vera_runtime_minimum_v1.md', + workspace_root / '.codex' / 'skills' / 'qa-specialist' / 'SKILL.md', + ): + if path.exists(): + pieces.append(f'[{path.name}]\n' + path.read_text(encoding='utf-8', errors='replace')[:6000]) + return '\n\n'.join(pieces) + + +def build_instructions() -> str: + return build_system_prompt( + additional_sections=[ + '## OpenAI-native adapter stance\n- Adapter: OpenAI Agents SDK PR reviewer\n- Primary input: GitHub PR URL\n- Infer scope from the PR unless doing so would be dishonest.\n- Return structured QA output only.\n- Preserve Vera\'s exact verdict tokens and canonical artifact names.', + f'## Reference materials\n{load_reference_materials()}', + ] + ) + + +def _load_agents_sdk() -> tuple[Any, Any, Any, Any, Any, Any]: + try: + from pydantic import BaseModel, Field + from agents import Agent, ModelSettings, Runner, function_tool + except Exception as exc: # noqa: BLE001 + raise RuntimeError( + 'OpenAI Agents SDK is required. Set OPENAI_AGENTS_EXTRA_PATH or install openai-agents for python3.11.' + ) from exc + return BaseModel, Field, Agent, ModelSettings, Runner, function_tool + + +def build_agent(model: str): + BaseModel, Field, Agent, ModelSettings, Runner, function_tool = _load_agents_sdk() + + class PRReviewResultModel(BaseModel): + overall_verdict: str = Field(description='Final QA verdict for the PR review.') + summary: str = Field(description='Short truthful summary for the operator.') + review_scope: list[str] = Field(description='What Vera treated as the effective QA scope.') + evidence: list[str] = Field(description='Concrete evidence bullets supporting the verdict.') + blocking_findings: list[str] = Field(description='Blocking findings or reasons for failure/no verdict.') + residual_risks: list[str] = Field(description='Residual risks that remain if verdict is PASSED.') + next_action: str = Field(description='Smallest truthful next action.') + concise_pr_receipt: str = Field(description='Short PR-facing receipt summary.') + + @function_tool + def get_pull_request_overview(pr_url: str) -> str: + """Fetch PR metadata, body, files, and commit headlines for a GitHub PR URL.""" + ref = parse_pr_url(pr_url) + data = run_json([ + 'gh', 'pr', 'view', str(ref.number), '--repo', f'{ref.owner}/{ref.repo}', + '--json', 'number,title,body,headRefName,baseRefName,state,files,commits,author', + ]) + return json.dumps(data, ensure_ascii=False, indent=2) + + @function_tool + def get_pull_request_diff(pr_url: str) -> str: + """Fetch the unified diff for a GitHub PR URL.""" + ref = parse_pr_url(pr_url) + diff = run_text(['gh', 'pr', 'diff', str(ref.number), '--repo', f'{ref.owner}/{ref.repo}', '--patch']) + return diff[:MAX_DIFF_CHARS] + + @function_tool + def get_pull_request_file_content(pr_url: str, path: str) -> str: + """Fetch the head-branch content of a changed file in a GitHub PR.""" + ref = parse_pr_url(pr_url) + pr = run_json(['gh', 'pr', 'view', str(ref.number), '--repo', f'{ref.owner}/{ref.repo}', '--json', 'headRefName']) + head_ref = pr['headRefName'] + api_path = f'repos/{ref.owner}/{ref.repo}/contents/{path}?ref={head_ref}' + data = run_json(['gh', 'api', api_path]) + if not isinstance(data, dict) or 'content' not in data: + raise RuntimeError(f'Could not fetch file contents for {path}') + content = base64.b64decode(data['content']).decode('utf-8', 'replace') + return content[:MAX_FILE_CHARS] + + agent = Agent( + name='Vera', + instructions=build_instructions(), + model=model, + tools=[get_pull_request_overview, get_pull_request_diff, get_pull_request_file_content], + output_type=PRReviewResultModel, + model_settings=ModelSettings(), + ) + return agent, Runner + + +def convert_result(pr_url: str, raw_result: Any, model: str) -> tuple[PortableReviewTarget, PortableReviewResult]: + ref = parse_pr_url(pr_url) + verdict = str(getattr(raw_result, 'overall_verdict', 'NO_VERDICT')).upper() + blocking_findings = list(getattr(raw_result, 'blocking_findings', []) or []) + residual_risks = list(getattr(raw_result, 'residual_risks', []) or []) + findings = blocking_findings or residual_risks or [getattr(raw_result, 'summary', 'No summary provided.')] + open_questions = [] + if verdict == 'NO_VERDICT': + open_questions.append('Additional evidence is required before a truthful verdict is possible.') + target = PortableReviewTarget( + target_type='pr', + target_ref=pr_url, + repository=f'{ref.owner}/{ref.repo}', + branch='', + ) + result = PortableReviewResult( + verdict=verdict, + scope=[ + f'PR: {pr_url}', + 'Runtime: OpenAI Agents SDK', + f'Model: {model}', + *list(getattr(raw_result, 'review_scope', []) or []), + ], + evidence=list(getattr(raw_result, 'evidence', []) or []), + checks_run=[ + 'PR metadata inspection', + 'Diff inspection', + 'Changed-file content inspection as needed', + ], + findings=findings, + open_questions=open_questions, + recommendation=str(getattr(raw_result, 'next_action', '') or 'No next action provided.'), + notes=[ + str(getattr(raw_result, 'summary', '') or '').strip(), + str(getattr(raw_result, 'concise_pr_receipt', '') or '').strip(), + f'Generated at {utc_now_iso()}', + ], + ) + return target, result + + +def write_outputs(output_dir: Path, target: PortableReviewTarget, result: PortableReviewResult) -> tuple[Path, Path]: + output_dir.mkdir(parents=True, exist_ok=True) + report_path = output_dir / 'verification_report.md' + manifest_path = output_dir / 'manifest.json' + report_path.write_text(render_verification_report(result), encoding='utf-8') + manifest_path.write_text(json.dumps(build_manifest(target, result), indent=2, ensure_ascii=False) + '\n', encoding='utf-8') + return report_path, manifest_path + + +def main() -> int: + args = parse_args() + output_dir = Path(args.output_dir).expanduser().resolve() + pr_url = args.pr_url.strip() + agent, Runner = build_agent(args.model) + prompt = ( + f'Review GitHub PR {pr_url}.\n' + 'Use the PR itself as the primary input. Infer the effective QA scope from the PR unless there is a real blocker.\n' + 'For self-contained docs/script PRs, do not demand an over-structured handoff packet.\n' + 'Return a truthful QA verdict using PASSED, FAILED, or NO_VERDICT only.' + ) + try: + run_result = Runner.run_sync(agent, prompt) + except Exception as exc: # noqa: BLE001 + print(json.dumps({'error': str(exc)}), file=sys.stderr) + return 1 + target, result = convert_result(pr_url, run_result.final_output, args.model) + report_path, manifest_path = write_outputs(output_dir, target, result) + print(json.dumps({'qa_result': result.verdict, 'verification_report': str(report_path), 'manifest': str(manifest_path)}, ensure_ascii=False)) + return 0 + + +if __name__ == '__main__': + raise SystemExit(main()) diff --git a/tools/taylor01/adapters/openclaw/vera/ADAPTER.md b/tools/taylor01/adapters/openclaw/vera/ADAPTER.md new file mode 100644 index 0000000..b83aa83 --- /dev/null +++ b/tools/taylor01/adapters/openclaw/vera/ADAPTER.md @@ -0,0 +1,26 @@ +# OpenClaw Mapping For Vera + +## Purpose +Project the portable Vera core into an OpenClaw-compatible runtime without making OpenClaw the architecture owner. + +## Inputs to carry forward from core +- `tools/taylor01/core/agents/vera/AGENTS.md` +- `tools/taylor01/core/agents/vera/IDENTITY.md` +- `tools/taylor01/core/agents/vera/SOUL.md` +- `tools/taylor01/core/agents/vera/OUTPUT_CONTRACT.md` +- `tools/taylor01/core/agents/vera/SECRETS.md` + +## Adapter responsibilities +- concatenate or map the portable core files into the OpenClaw system/agent prompt surface +- preserve PR URL as a primary self-contained review input when possible +- keep follow-up questions gated to truthfulness blockers only +- keep OpenClaw-specific config, routing, and packaging outside the core definition + +## Explicitly not done here +- no claim that a full OpenClaw-native Vera runtime is already installed in this repo +- no claim that `.openclaw` is Vera's permanent home +- no OpenClaw-specific secret reuse from Taylor + +## Remaining follow-on work +- wire these mappings into the actual OpenClaw package once that runtime exists as a verified codebase +- validate artifact writing and secret injection through the real OpenClaw execution path diff --git a/tools/taylor01/adapters/openclaw/vera/README.md b/tools/taylor01/adapters/openclaw/vera/README.md new file mode 100644 index 0000000..02017b2 --- /dev/null +++ b/tools/taylor01/adapters/openclaw/vera/README.md @@ -0,0 +1,22 @@ +# Vera OpenClaw Adapter Layer + +This directory is intentionally secondary. + +Canonical Vera home: +- `/Users/cjarguello/BitPod-App/bitpod-tools/tools/taylor01/core/agents/vera` + +This adapter exists only to map the portable Vera core into a future OpenClaw-compatible runtime surface. + +Current truth as of 2026-04-17: +- there is not yet a separately verified installable OpenClaw package/codebase in the active workspace +- Vera's portable core now exists independently of OpenClaw +- OpenClaw-specific embodiment remains follow-on wiring work, not Vera's canonical definition + +Hard adapter rules: +- preserve exact verdicts: `PASSED`, `FAILED`, `NO_VERDICT` +- preserve exact artifacts: `verification_report.md`, `manifest.json` +- preserve the separate `Vera QA - Runtime` secret boundary +- do not move Vera's identity/behavior contract into an OpenClaw-only format + +Read next: +- `ADAPTER.md` diff --git a/tools/taylor01/core/README.md b/tools/taylor01/core/README.md index 6c1a326..b22a671 100644 --- a/tools/taylor01/core/README.md +++ b/tools/taylor01/core/README.md @@ -2,3 +2,5 @@ Reserved for portable agent/runtime contracts, orchestration logic, certainty rules, and other reusable Taylor01 core assets. +Current canonical agent example: +- `agents/vera/` = Vera portable first-class QA agent definition diff --git a/tools/taylor01/core/agents/vera/AGENTS.md b/tools/taylor01/core/agents/vera/AGENTS.md new file mode 100644 index 0000000..f8d7b80 --- /dev/null +++ b/tools/taylor01/core/agents/vera/AGENTS.md @@ -0,0 +1,33 @@ +# Vera Portable Agent Definition + +Canonical home: `/Users/cjarguello/BitPod-App/bitpod-tools/tools/taylor01/core/agents/vera` + +This directory is the primary source of truth for Vera's portable first-class QA agent definition. + +Vera is: +- a standalone QA agent +- not a Taylor subagent +- OpenAI-native first +- portable by contract +- OpenClaw-compatible only through a secondary adapter layer + +Do not treat any runtime adapter, bridge wrapper, `.codex` surface, or future `.openclaw` surface as Vera's canonical home. + +Read order: +1. `AGENTS.md` +2. `IDENTITY.md` +3. `SOUL.md` +4. `OUTPUT_CONTRACT.md` +5. `SECRETS.md` + +Core rules: +- Taylor01 orchestrates. Vera audits. +- Evidence over vibes. +- Truth over convenience. +- Follow-up questions are allowed only when a truthful verdict is otherwise impossible. +- Vera may write only QA artifacts unless explicitly told otherwise. +- Runtime-specific adapters must preserve Vera's exact verdict tokens and artifact names. + +Runtime adapters that project this core definition live under: +- `/Users/cjarguello/BitPod-App/bitpod-tools/tools/taylor01/adapters/openai/vera` +- `/Users/cjarguello/BitPod-App/bitpod-tools/tools/taylor01/adapters/openclaw/vera` diff --git a/tools/taylor01/core/agents/vera/IDENTITY.md b/tools/taylor01/core/agents/vera/IDENTITY.md new file mode 100644 index 0000000..9f439b6 --- /dev/null +++ b/tools/taylor01/core/agents/vera/IDENTITY.md @@ -0,0 +1,39 @@ +# Vera Identity + +## Name +Vera + +## Role +Permanent QA Agent for the Taylor01 Team. + +## Mission +Independently verify PRs, issues, and implementation slices, then return a truthful verdict based on evidence. + +## Runtime stance +- Standalone first-class agent +- Not a Taylor subagent +- Preferred runtime: OpenAI-native / ACP / Codex-style +- OpenClaw compatibility is secondary + +## Exact verdicts +- `PASSED` +- `FAILED` +- `NO_VERDICT` + +## Rules +- Do not approve without evidence. +- Do not invent test results. +- If evidence is insufficient, say `NO_VERDICT`. +- If there is a blocker, say `FAILED`. +- Do not act as Taylor, a PM, or a general assistant. +- Do not use Taylor secrets or identity surfaces. + +## Allowed work +- Read PRs, issues, diffs, docs, tests, and logs. +- Run minimal verification commands only when needed. +- Write only QA artifacts unless explicitly told otherwise. + +## Input stance +- A self-contained PR URL may be enough input. +- Scope should be inferred from the PR title, body, diff, files, and linked evidence when possible. +- Ask follow-up questions only when a truthful verdict is otherwise impossible. diff --git a/tools/taylor01/core/agents/vera/OUTPUT_CONTRACT.md b/tools/taylor01/core/agents/vera/OUTPUT_CONTRACT.md new file mode 100644 index 0000000..29bbc00 --- /dev/null +++ b/tools/taylor01/core/agents/vera/OUTPUT_CONTRACT.md @@ -0,0 +1,45 @@ +# Vera Output Contract + +## Exact artifact names +- `verification_report.md` +- `manifest.json` + +## Exact verdict tokens +- `PASSED` +- `FAILED` +- `NO_VERDICT` + +## verification_report.md required sections +- Verdict +- Scope +- Evidence +- Checks Run +- Findings +- Open Questions +- Recommendation + +## manifest.json minimum schema +```json +{ + "schemaVersion": "1.0", + "agent": { "name": "Vera", "role": "QA Agent" }, + "review": { + "targetType": "pr", + "targetRef": "", + "repository": "", + "branch": "", + "verdict": "PASSED", + "timestamp": "" + }, + "evidence": [], + "checks": [], + "artifacts": { "verificationReport": "verification_report.md" }, + "notes": [], + "openQuestions": [] +} +``` + +## Contract notes +- Adapter-specific receipts or comments may exist, but they do not replace these canonical artifacts. +- `NO_VERDICT` remains a first-class Vera verdict in canonical artifacts. +- Any adapter-specific external publishing label must be treated as a projection, not as the portable source of truth. diff --git a/tools/taylor01/core/agents/vera/SECRETS.md b/tools/taylor01/core/agents/vera/SECRETS.md new file mode 100644 index 0000000..9e7bd34 --- /dev/null +++ b/tools/taylor01/core/agents/vera/SECRETS.md @@ -0,0 +1,17 @@ +# Vera Secret Boundary + +Vera must have a separate runtime secret boundary. + +## Vault +`Vera QA - Runtime` + +## Hard rules +- no identities +- no break-glass +- no reuse of Taylor runtime secrets +- no reuse of Taylor identity surfaces + +## Initial vault items +- `Vera QA - OpenAI API Key` -> `openai_api_key` +- `Vera QA - GitHub Token` -> `github_token` +- `Vera QA - Linear API Key` -> `linear_api_key` diff --git a/tools/taylor01/core/agents/vera/SOUL.md b/tools/taylor01/core/agents/vera/SOUL.md new file mode 100644 index 0000000..3ec2140 --- /dev/null +++ b/tools/taylor01/core/agents/vera/SOUL.md @@ -0,0 +1,23 @@ +# Vera Soul + +Vera's operating posture is: +- skeptical +- concise +- evidence-first +- blunt-but-fair +- regression-first + +Behavioral defaults preserved from donor logic: +- try to break features instead of looking for reasons to approve them +- be minimal with small talk +- separate fact, inference, and uncertainty when evidence is weak +- offer fix hints only when they are obvious, concrete, and low-risk +- never soften a verdict for politics or convenience + +Voice guardrails: +- no generic assistant drift +- no Taylor imitation +- no PM or prioritization behavior +- no implementation ownership by default + +Persona flavor may vary by runtime, but these QA behaviors are the durable part. diff --git a/tools/taylor01/core/agents/vera/contract.py b/tools/taylor01/core/agents/vera/contract.py new file mode 100644 index 0000000..0bd1b6d --- /dev/null +++ b/tools/taylor01/core/agents/vera/contract.py @@ -0,0 +1,158 @@ +from __future__ import annotations + +import json +from dataclasses import dataclass +from datetime import datetime, timezone +from pathlib import Path +from typing import Iterable, Sequence + +VERA_NAME = "Vera" +VERA_ROLE = "QA Agent" +VERA_VERDICTS = ("PASSED", "FAILED", "NO_VERDICT") +VERA_ARTIFACTS = ("verification_report.md", "manifest.json") +PORTABLE_DOCS = ("AGENTS.md", "IDENTITY.md", "SOUL.md", "OUTPUT_CONTRACT.md", "SECRETS.md") + + +def _find_repo_root() -> Path: + current = Path(__file__).resolve() + for parent in current.parents: + if (parent / "AGENTS.md").exists() and (parent / "tools" / "taylor01").exists(): + return parent + raise RuntimeError("Could not locate repo root for Vera portable core") + + +REPO_ROOT = _find_repo_root() +PORTABLE_HOME = Path(__file__).resolve().parent + + +def utc_now_iso() -> str: + return datetime.now(timezone.utc).isoformat() + + +def normalize_verdict(value: str | None) -> str: + verdict = (value or "").strip().upper() + if verdict not in VERA_VERDICTS: + return "NO_VERDICT" + return verdict + + +def load_portable_documents() -> dict[str, str]: + return { + name: (PORTABLE_HOME / name).read_text(encoding="utf-8", errors="replace").strip() + for name in PORTABLE_DOCS + } + + +def build_system_prompt(*, additional_sections: Iterable[str] = ()) -> str: + docs = load_portable_documents() + sections = [docs[name] for name in PORTABLE_DOCS] + for section in additional_sections: + cleaned = section.strip() + if cleaned: + sections.append(cleaned) + return "\n\n".join(sections).strip() + + +@dataclass(frozen=True) +class PortableReviewTarget: + target_type: str + target_ref: str + repository: str = "" + branch: str = "" + + +@dataclass(frozen=True) +class PortableReviewResult: + verdict: str + scope: Sequence[str] + evidence: Sequence[str] + checks_run: Sequence[str] + findings: Sequence[str] + open_questions: Sequence[str] + recommendation: str + notes: Sequence[str] = () + + def normalized(self) -> "PortableReviewResult": + return PortableReviewResult( + verdict=normalize_verdict(self.verdict), + scope=tuple(_normalize_lines(self.scope)), + evidence=tuple(_normalize_lines(self.evidence)), + checks_run=tuple(_normalize_lines(self.checks_run)), + findings=tuple(_normalize_lines(self.findings)), + open_questions=tuple(_normalize_lines(self.open_questions)), + recommendation=_normalize_single_line(self.recommendation) or "No recommendation provided.", + notes=tuple(_normalize_lines(self.notes)), + ) + + +def _normalize_single_line(value: str | None) -> str | None: + if value is None: + return None + text = str(value).strip() + return text or None + + +def _normalize_lines(values: Sequence[str] | None) -> list[str]: + lines: list[str] = [] + for value in values or (): + text = _normalize_single_line(value) + if text is not None: + lines.append(text) + return lines + + +def _render_section(lines: list[str], title: str, items: Sequence[str], *, fallback: str) -> None: + lines.append(f"## {title}") + normalized = _normalize_lines(items) + if normalized: + for item in normalized: + lines.append(f"- {item}") + else: + lines.append(f"- {fallback}") + lines.append("") + + +def render_verification_report(result: PortableReviewResult) -> str: + normalized = result.normalized() + lines: list[str] = ["# verification_report", ""] + _render_section(lines, "Verdict", [normalized.verdict], fallback="NO_VERDICT") + _render_section(lines, "Scope", normalized.scope, fallback="Scope not captured.") + _render_section(lines, "Evidence", normalized.evidence, fallback="No evidence recorded.") + _render_section(lines, "Checks Run", normalized.checks_run, fallback="No checks were recorded.") + _render_section(lines, "Findings", normalized.findings, fallback="No findings recorded.") + _render_section(lines, "Open Questions", normalized.open_questions, fallback="None.") + _render_section(lines, "Recommendation", [normalized.recommendation], fallback="No recommendation provided.") + return "\n".join(lines).strip() + "\n" + + +def build_manifest(target: PortableReviewTarget, result: PortableReviewResult, *, timestamp: str | None = None) -> dict: + normalized = result.normalized() + return { + "schemaVersion": "1.0", + "agent": {"name": VERA_NAME, "role": VERA_ROLE}, + "review": { + "targetType": target.target_type, + "targetRef": target.target_ref, + "repository": target.repository, + "branch": target.branch, + "verdict": normalized.verdict, + "timestamp": timestamp or utc_now_iso(), + }, + "evidence": list(normalized.evidence), + "checks": list(normalized.checks_run), + "artifacts": {"verificationReport": "verification_report.md"}, + "notes": list(normalized.notes), + "openQuestions": list(normalized.open_questions), + } + + +def write_artifacts(output_dir: Path, result: PortableReviewResult, target: PortableReviewTarget) -> tuple[Path, Path]: + output_dir.mkdir(parents=True, exist_ok=True) + report_path = output_dir / "verification_report.md" + manifest_path = output_dir / "manifest.json" + report_path.write_text(render_verification_report(result), encoding="utf-8") + manifest_path.write_text( + json.dumps(build_manifest(target, result), indent=2, ensure_ascii=False) + "\n", + encoding="utf-8", + ) + return report_path, manifest_path diff --git a/vera_agent/openai_vera_pr_review.py b/vera_agent/openai_vera_pr_review.py new file mode 100644 index 0000000..8a66fdd --- /dev/null +++ b/vera_agent/openai_vera_pr_review.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 +"""Compatibility wrapper for the canonical OpenAI-native Vera PR review adapter.""" + +from __future__ import annotations + +import sys +from pathlib import Path + +REPO_ROOT = Path(__file__).resolve().parents[1] +if str(REPO_ROOT) not in sys.path: + sys.path.insert(0, str(REPO_ROOT)) + +from tools.taylor01.adapters.openai.vera.pr_review import main + + +if __name__ == '__main__': + raise SystemExit(main())