1313from typing import Any , Dict , List , Tuple
1414
1515SCRIPT_DIR = Path (__file__ ).resolve ().parent
16+ SKILL_DIR = SCRIPT_DIR .parent
17+ LOCAL_ENV_PATH = SKILL_DIR / ".env"
1618if str (SCRIPT_DIR ) not in sys .path :
1719 sys .path .insert (0 , str (SCRIPT_DIR ))
1820
@@ -47,6 +49,112 @@ def _prompt_api_key() -> str:
4749 return ""
4850
4951
52+ def _confirm_persist_key (env_path : Path ) -> bool :
53+ answer = input (f"Save the LLM API key to { env_path .as_posix ()} for future runs? [y/N]: " ).strip ().lower ()
54+ return answer in {"y" , "yes" }
55+
56+
57+ def _read_local_env (path : Path ) -> Dict [str , str ]:
58+ values : Dict [str , str ] = {}
59+ if not path .exists ():
60+ return values
61+ for raw in path .read_text (encoding = "utf-8" , errors = "ignore" ).splitlines ():
62+ line = raw .strip ()
63+ if not line or line .startswith ("#" ) or "=" not in line :
64+ continue
65+ key , value = line .split ("=" , 1 )
66+ values [key .strip ()] = value .strip ().strip ('"' ).strip ("'" )
67+ return values
68+
69+
70+ def _write_local_env_key (path : Path , key : str , value : str ) -> None :
71+ existing_lines : List [str ] = []
72+ if path .exists ():
73+ existing_lines = path .read_text (encoding = "utf-8" , errors = "ignore" ).splitlines ()
74+
75+ updated = False
76+ next_lines : List [str ] = []
77+ for raw in existing_lines :
78+ if raw .strip ().startswith (f"{ key } =" ):
79+ next_lines .append (f"{ key } ={ value } " )
80+ updated = True
81+ else :
82+ next_lines .append (raw )
83+ if not updated :
84+ next_lines .append (f"{ key } ={ value } " )
85+
86+ path .write_text ("\n " .join (next_lines ).rstrip () + "\n " , encoding = "utf-8" )
87+
88+
89+ def resolve_llm_runtime (
90+ prompt_for_key : bool = True ,
91+ persist_key_mode : str = "ask" ,
92+ require_key : bool = True ,
93+ ) -> Dict [str , Any ]:
94+ model = os .environ .get ("CODE_EXPLAINER_LLM_MODEL" , DEFAULT_MODEL ).strip () or DEFAULT_MODEL
95+ base_url = _normalize_base_url (os .environ .get ("CODE_EXPLAINER_LLM_BASE_URL" , DEFAULT_BASE_URL ))
96+ local_env = _read_local_env (LOCAL_ENV_PATH )
97+ interactive = _is_interactive_terminal ()
98+ prompted = False
99+ persisted = False
100+ key_source = ""
101+
102+ api_key = os .environ .get ("CODE_EXPLAINER_LLM_API_KEY" , "" ).strip ()
103+ if api_key :
104+ key_source = "environment:CODE_EXPLAINER_LLM_API_KEY"
105+ if not api_key :
106+ api_key = os .environ .get ("OPENAI_API_KEY" , "" ).strip ()
107+ if api_key :
108+ key_source = "environment:OPENAI_API_KEY"
109+ if not api_key :
110+ api_key = local_env .get ("CODE_EXPLAINER_LLM_API_KEY" , "" ).strip ()
111+ if api_key :
112+ key_source = f"local-env:{ LOCAL_ENV_PATH .name } "
113+ if not api_key :
114+ api_key = local_env .get ("OPENAI_API_KEY" , "" ).strip ()
115+ if api_key :
116+ key_source = f"local-env:{ LOCAL_ENV_PATH .name } "
117+
118+ if not api_key and prompt_for_key :
119+ prompted = True
120+ if not interactive :
121+ raise RuntimeError (
122+ "No LLM API key was found and this terminal cannot prompt. Set CODE_EXPLAINER_LLM_API_KEY or OPENAI_API_KEY, "
123+ f"or add CODE_EXPLAINER_LLM_API_KEY to { LOCAL_ENV_PATH .as_posix ()} ."
124+ )
125+ api_key = _prompt_api_key ()
126+ if not api_key and require_key :
127+ raise RuntimeError ("LLM API key is required for this skill and was not provided." )
128+ key_source = "prompt"
129+ persist_mode = (persist_key_mode or "ask" ).strip ().lower ()
130+ should_persist = False
131+ if api_key :
132+ if persist_mode == "true" :
133+ should_persist = True
134+ elif persist_mode == "ask" :
135+ should_persist = _confirm_persist_key (LOCAL_ENV_PATH )
136+ if should_persist :
137+ _write_local_env_key (LOCAL_ENV_PATH , "CODE_EXPLAINER_LLM_API_KEY" , api_key )
138+ persisted = True
139+ key_source = f"local-env:{ LOCAL_ENV_PATH .name } "
140+
141+ if not api_key and require_key :
142+ raise RuntimeError (
143+ "No LLM API key found. Set CODE_EXPLAINER_LLM_API_KEY or OPENAI_API_KEY, "
144+ f"or add CODE_EXPLAINER_LLM_API_KEY to { LOCAL_ENV_PATH .as_posix ()} ."
145+ )
146+
147+ return {
148+ "api_key" : api_key ,
149+ "model" : model ,
150+ "base_url" : base_url ,
151+ "prompted_for_key" : prompted ,
152+ "persisted_key" : persisted ,
153+ "key_source" : key_source ,
154+ "env_path" : LOCAL_ENV_PATH .as_posix (),
155+ }
156+
157+
50158def _post_json (url : str , api_key : str , payload : Dict [str , Any ], timeout : int = 90 ) -> Tuple [int , str ]:
51159 data = json .dumps (payload ).encode ("utf-8" )
52160 req = urllib .request .Request (
@@ -108,6 +216,9 @@ def _default_llm_payload(enabled: bool, model: str) -> Dict[str, Any]:
108216 "diagram_briefs" : [],
109217 "caveats" : [],
110218 "confidence_notes" : [],
219+ "key_source" : "" ,
220+ "persisted_key" : False ,
221+ "env_path" : LOCAL_ENV_PATH .as_posix (),
111222 "error" : "" ,
112223 }
113224
@@ -268,7 +379,9 @@ def generate_llm_descriptions(
268379 out_dir : Path ,
269380 enabled : bool = True ,
270381 ask_before_use : bool = False ,
271- prompt_for_key : bool = False ,
382+ prompt_for_key : bool = True ,
383+ persist_key_mode : str = "ask" ,
384+ resolved_runtime : Dict [str , Any ] | None = None ,
272385) -> Dict [str , Any ]:
273386 del repo_root , index_payload , entry_payload , docs_payload
274387 model = os .environ .get ("CODE_EXPLAINER_LLM_MODEL" , DEFAULT_MODEL ).strip () or DEFAULT_MODEL
@@ -308,27 +421,34 @@ def generate_llm_descriptions(
308421 common .write_json (out_dir / "llm_summary.json" , payload )
309422 return payload
310423
311- api_key = (
312- os .environ .get ("CODE_EXPLAINER_LLM_API_KEY" , "" ).strip ()
313- or os .environ .get ("OPENAI_API_KEY" , "" ).strip ()
314- )
315- if not api_key and prompt_for_key :
316- payload ["prompted_for_key" ] = True
317- if not interactive :
318- payload ["error" ] = "Prompt-for-key requested but terminal is non-interactive and no key was found."
424+ runtime = resolved_runtime
425+ if runtime is None :
426+ try :
427+ runtime = resolve_llm_runtime (
428+ prompt_for_key = prompt_for_key ,
429+ persist_key_mode = persist_key_mode ,
430+ require_key = True ,
431+ )
432+ except RuntimeError as exc :
433+ payload ["error" ] = str (exc )
319434 common .write_json (out_dir / "llm_summary.json" , payload )
320435 return payload
321- api_key = _prompt_api_key ()
436+
437+ api_key = str (runtime .get ("api_key" , "" )).strip ()
438+ payload ["prompted_for_key" ] = bool (runtime .get ("prompted_for_key" , False ))
439+ payload ["persisted_key" ] = bool (runtime .get ("persisted_key" , False ))
440+ payload ["key_source" ] = str (runtime .get ("key_source" , "" )).strip ()
441+ payload ["env_path" ] = str (runtime .get ("env_path" , LOCAL_ENV_PATH .as_posix ()))
442+ payload ["model" ] = str (runtime .get ("model" , model )).strip () or model
322443
323444 if not api_key :
324445 payload ["error" ] = "No API key found (set CODE_EXPLAINER_LLM_API_KEY or OPENAI_API_KEY)."
325446 common .write_json (out_dir / "llm_summary.json" , payload )
326447 return payload
327448
328- base_url = _normalize_base_url (os .environ .get ("CODE_EXPLAINER_LLM_BASE_URL" , DEFAULT_BASE_URL ))
329- endpoint = f"{ base_url } /chat/completions"
449+ endpoint = f"{ str (runtime .get ('base_url' , DEFAULT_BASE_URL )).rstrip ('/' )} /chat/completions"
330450 request_payload = {
331- "model" : model ,
451+ "model" : payload [ " model" ] ,
332452 "temperature" : 0.15 ,
333453 "response_format" : {"type" : "json_object" },
334454 "messages" : _request_messages (request_context ),
@@ -394,7 +514,8 @@ def main() -> int:
394514 parser .add_argument ("--output" , required = True )
395515 parser .add_argument ("--enabled" , default = "true" )
396516 parser .add_argument ("--ask-before-use" , default = "false" )
397- parser .add_argument ("--prompt-for-key" , default = "false" )
517+ parser .add_argument ("--prompt-for-key" , default = "true" )
518+ parser .add_argument ("--persist-key" , default = "ask" )
398519 args = parser .parse_args ()
399520
400521 payload = generate_llm_descriptions (
@@ -415,6 +536,7 @@ def main() -> int:
415536 enabled = common .bool_from_string (args .enabled ),
416537 ask_before_use = common .bool_from_string (args .ask_before_use ),
417538 prompt_for_key = common .bool_from_string (args .prompt_for_key ),
539+ persist_key_mode = args .persist_key ,
418540 )
419541 print (json .dumps ({"used" : payload .get ("used" , False ), "provider" : payload .get ("provider" , "" ), "error" : payload .get ("error" , "" )}, indent = 2 ))
420542 return 0
0 commit comments