Skip to content

Commit 17b8398

Browse files
committed
feat: add interactive llm consent and key prompt options
1 parent 97d051d commit 17b8398

6 files changed

Lines changed: 77 additions & 0 deletions

File tree

README.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,8 @@ python scripts/analyze.py analyze \
146146
--audience nontech \
147147
--overview-length medium \
148148
--enable-llm-descriptions true \
149+
--ask-before-llm-use false \
150+
--prompt-for-llm-key false \
149151
--enable-web-enrichment true
150152
```
151153

@@ -158,6 +160,9 @@ For LLM-based narrative summaries:
158160

159161
- Set `CODE_EXPLAINER_LLM_API_KEY` (or `OPENAI_API_KEY`)
160162
- Optional: `CODE_EXPLAINER_LLM_BASE_URL`, `CODE_EXPLAINER_LLM_MODEL`
163+
- Optional interactive controls:
164+
- `--ask-before-llm-use true` (prompt for permission)
165+
- `--prompt-for-llm-key true` (securely prompt for key when missing)
161166

162167
## Install From GitHub (For Other Developers)
163168

code-explainer/SKILL.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,8 @@ python scripts/analyze.py analyze \
3838
--include-glob <pattern> \
3939
--exclude-glob <pattern> \
4040
--enable-llm-descriptions <true|false> \
41+
--ask-before-llm-use <true|false> \
42+
--prompt-for-llm-key <true|false> \
4143
--enable-web-enrichment <true|false>
4244
```
4345

@@ -47,6 +49,8 @@ Defaults:
4749
- `audience=nontech`
4850
- `overview-length=medium`
4951
- `enable-llm-descriptions=true`
52+
- `ask-before-llm-use=false`
53+
- `prompt-for-llm-key=false`
5054
- `enable-web-enrichment=true`
5155

5256
## Dependencies
@@ -94,6 +98,9 @@ bash ./scripts/install_runtime.sh
9498
- Without `mmdc`, fallback rendering is used and flagged in reports.
9599
- For LLM narrative summaries, set `CODE_EXPLAINER_LLM_API_KEY` (or `OPENAI_API_KEY`).
96100
- Optional: set `CODE_EXPLAINER_LLM_BASE_URL` and `CODE_EXPLAINER_LLM_MODEL`.
101+
- If you want interactive control, enable:
102+
- `--ask-before-llm-use true`
103+
- `--prompt-for-llm-key true`
97104
- This skill does not mutate the analyzed target repository.
98105

99106
## Dependency Troubleshooting

code-explainer/references/mode-behavior.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,9 @@ Goal: Maximum fidelity and audit-ready onboarding.
4343
## LLM Narrative
4444

4545
- Controlled with `--enable-llm-descriptions <true|false>`.
46+
- Optional interactive controls:
47+
- `--ask-before-llm-use true`
48+
- `--prompt-for-llm-key true`
4649
- Reads API config from env vars:
4750
- `CODE_EXPLAINER_LLM_API_KEY` (or `OPENAI_API_KEY`)
4851
- `CODE_EXPLAINER_LLM_BASE_URL` (optional)

code-explainer/references/output-contract.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,8 @@
7272
- `generated_at`
7373
- `enabled`
7474
- `used`
75+
- `asked_before_use`
76+
- `prompted_for_key`
7577
- `provider`
7678
- `model`
7779
- `repo_summary_paragraph`

code-explainer/scripts/analyze.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,8 @@ def run_pipeline(
124124
overview_length: str,
125125
enable_web_enrichment: bool,
126126
enable_llm_descriptions: bool,
127+
ask_before_llm_use: bool = False,
128+
prompt_for_llm_key: bool = False,
127129
include_globs: List[str] | None = None,
128130
exclude_globs: List[str] | None = None,
129131
) -> Dict[str, Any]:
@@ -164,6 +166,8 @@ def run_pipeline(
164166
docs_payload=coverage_payload,
165167
out_dir=meta_dir,
166168
enabled=enable_llm_descriptions,
169+
ask_before_use=ask_before_llm_use,
170+
prompt_for_key=prompt_for_llm_key,
167171
)
168172

169173
diagram_manifest = build_diagrams.build_diagrams(
@@ -259,6 +263,8 @@ def _parse_args() -> argparse.Namespace:
259263
)
260264
parser.add_argument("--enable-web-enrichment", default="true")
261265
parser.add_argument("--enable-llm-descriptions", default="true")
266+
parser.add_argument("--ask-before-llm-use", default="false")
267+
parser.add_argument("--prompt-for-llm-key", default="false")
262268
return parser.parse_args()
263269

264270

@@ -271,6 +277,8 @@ def main() -> int:
271277
mode = common.normalize_mode(args.mode)
272278
web_enabled = common.bool_from_string(args.enable_web_enrichment)
273279
llm_enabled = common.bool_from_string(args.enable_llm_descriptions)
280+
ask_before_llm_use = common.bool_from_string(args.ask_before_llm_use)
281+
prompt_for_llm_key = common.bool_from_string(args.prompt_for_llm_key)
274282
summary = run_pipeline(
275283
source=args.source,
276284
output_root=Path(args.output).resolve(),
@@ -279,6 +287,8 @@ def main() -> int:
279287
overview_length=args.overview_length,
280288
enable_web_enrichment=web_enabled,
281289
enable_llm_descriptions=llm_enabled,
290+
ask_before_llm_use=ask_before_llm_use,
291+
prompt_for_llm_key=prompt_for_llm_key,
282292
include_globs=args.include_glob,
283293
exclude_globs=args.exclude_glob,
284294
)

code-explainer/scripts/llm_describe.py

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
from __future__ import annotations
33

44
import argparse
5+
import getpass
56
import json
67
import os
78
import re
@@ -27,6 +28,25 @@ def _normalize_base_url(url: str) -> str:
2728
return clean or DEFAULT_BASE_URL
2829

2930

31+
def _is_interactive_terminal() -> bool:
32+
try:
33+
return bool(sys.stdin.isatty() and sys.stdout.isatty())
34+
except Exception:
35+
return False
36+
37+
38+
def _confirm_llm_usage() -> bool:
39+
answer = input("Use LLM to generate narrative summaries for this run? [y/N]: ").strip().lower()
40+
return answer in {"y", "yes"}
41+
42+
43+
def _prompt_api_key() -> str:
44+
try:
45+
return getpass.getpass("Enter LLM API key (input hidden): ").strip()
46+
except Exception:
47+
return ""
48+
49+
3050
def _post_json(url: str, api_key: str, payload: Dict[str, Any], timeout: int = 90) -> Tuple[int, str]:
3151
data = json.dumps(payload).encode("utf-8")
3252
req = urllib.request.Request(
@@ -161,6 +181,8 @@ def _default_llm_payload(enabled: bool, model: str) -> Dict[str, Any]:
161181
"generated_at": common.now_iso(),
162182
"enabled": enabled,
163183
"used": False,
184+
"asked_before_use": False,
185+
"prompted_for_key": False,
164186
"provider": "openai_compatible",
165187
"model": model,
166188
"repo_summary_paragraph": "",
@@ -184,17 +206,41 @@ def generate_llm_descriptions(
184206
docs_payload: Dict[str, Any],
185207
out_dir: Path,
186208
enabled: bool = True,
209+
ask_before_use: bool = False,
210+
prompt_for_key: bool = False,
187211
) -> Dict[str, Any]:
188212
model = os.environ.get("CODE_EXPLAINER_LLM_MODEL", DEFAULT_MODEL).strip() or DEFAULT_MODEL
189213
payload = _default_llm_payload(enabled=enabled, model=model)
190214
if not enabled:
191215
common.write_json(out_dir / "llm_summary.json", payload)
192216
return payload
193217

218+
interactive = _is_interactive_terminal()
219+
if ask_before_use:
220+
payload["asked_before_use"] = True
221+
if not interactive:
222+
payload["enabled"] = False
223+
payload["error"] = "LLM ask-before-use requested but terminal is non-interactive; skipped."
224+
common.write_json(out_dir / "llm_summary.json", payload)
225+
return payload
226+
if not _confirm_llm_usage():
227+
payload["enabled"] = False
228+
payload["error"] = "User declined LLM narrative generation for this run."
229+
common.write_json(out_dir / "llm_summary.json", payload)
230+
return payload
231+
194232
api_key = (
195233
os.environ.get("CODE_EXPLAINER_LLM_API_KEY", "").strip()
196234
or os.environ.get("OPENAI_API_KEY", "").strip()
197235
)
236+
if not api_key and prompt_for_key:
237+
payload["prompted_for_key"] = True
238+
if not interactive:
239+
payload["error"] = "Prompt-for-key requested but terminal is non-interactive and no key was found."
240+
common.write_json(out_dir / "llm_summary.json", payload)
241+
return payload
242+
api_key = _prompt_api_key()
243+
198244
if not api_key:
199245
payload["error"] = "No API key found (set CODE_EXPLAINER_LLM_API_KEY or OPENAI_API_KEY)."
200246
common.write_json(out_dir / "llm_summary.json", payload)
@@ -286,6 +332,8 @@ def main() -> int:
286332
parser.add_argument("--coverage", required=True)
287333
parser.add_argument("--output", required=True)
288334
parser.add_argument("--enabled", default="true")
335+
parser.add_argument("--ask-before-use", default="false")
336+
parser.add_argument("--prompt-for-key", default="false")
289337
args = parser.parse_args()
290338

291339
payload = generate_llm_descriptions(
@@ -301,6 +349,8 @@ def main() -> int:
301349
docs_payload=common.read_json(Path(args.coverage), default={}),
302350
out_dir=Path(args.output).resolve(),
303351
enabled=common.bool_from_string(args.enabled),
352+
ask_before_use=common.bool_from_string(args.ask_before_use),
353+
prompt_for_key=common.bool_from_string(args.prompt_for_key),
304354
)
305355
print(json.dumps({"used": payload.get("used", False), "error": payload.get("error", "")}, indent=2))
306356
return 0

0 commit comments

Comments
 (0)