|
17 | 17 |
|
18 | 18 |
|
19 | 19 | from llm_client import LLMAPIClient |
20 | | -from llm_configs import ClaudeConfig, GeminiConfig, GrokConfig, NvidiaNIMConfig, PerplexityConfig |
| 20 | +from llm_utils import get_config_class, get_model_key_from_env |
21 | 21 |
|
22 | 22 | import prompt |
23 | 23 |
|
@@ -144,127 +144,6 @@ def write_token_usage( |
144 | 144 | logging.warning(f"Could not write token usage: {e}") |
145 | 145 |
|
146 | 146 |
|
147 | | -def get_startwith(key:str, dictionary:dict) -> Any: |
148 | | - result = None |
149 | | - for k, v in dictionary.items(): |
150 | | - if key.startswith(k): |
151 | | - result = v |
152 | | - break |
153 | | - return result |
154 | | - |
155 | | - |
156 | | -def get_model_key_from_env() -> Tuple[str, str]: |
157 | | - """ |
158 | | - Extracts the LLM model and API key from environment variables with flexible selection. |
159 | | - - Uses INPUT_API-KEY if provided, especially with a specified model. |
160 | | - - Falls back to model-specific API keys if INPUT_API-KEY is not set. |
161 | | - - Raises ValueError if no API keys are available. |
162 | | - - Uses model-to-provider mapping for precise model IDs. |
163 | | - """ |
164 | | - api_key_dict = get_api_key_dict_from_env() |
165 | | - valid_keys_dict = {k: v for k, v in api_key_dict.items() if v and v.strip()} |
166 | | - |
167 | | - model = os.getenv('INPUT_MODEL', '').lower() |
168 | | - general_api_key = os.getenv('INPUT_API-KEY', '').strip() |
169 | | - |
170 | | - # Model-to-provider mapping for precise model IDs |
171 | | - model_to_provider = { |
172 | | - 'google/gemma-2-9b-it': 'nvidia_nim', |
173 | | - 'sonar': 'perplexity', |
174 | | - 'gemini-2.5-flash': 'gemini', |
175 | | - 'grok-code-fast': 'grok', |
176 | | - 'claude-sonnet-4-20250514': 'claude' |
177 | | - } |
178 | | - |
179 | | - # Case 1: Use INPUT_API-KEY if provided |
180 | | - if general_api_key: |
181 | | - selected_model = model or 'gemini-2.5-flash' # Default to specific Gemini model |
182 | | - logging.info(f"Using INPUT_API-KEY for model: {selected_model}") |
183 | | - return selected_model, general_api_key |
184 | | - |
185 | | - # Case 2: No INPUT_API-KEY, check model-specific keys |
186 | | - if not valid_keys_dict: |
187 | | - raise ValueError( |
188 | | - "No API keys provided. Set at least one of:\n" |
189 | | - "\tINPUT_API-KEY\n" |
190 | | - "\tINPUT_CLAUDE_API_KEY\n" |
191 | | - "\tINPUT_GEMINI-API-KEY\n" |
192 | | - "\tINPUT_GROK-API-KEY\n" |
193 | | - "\tINPUT_NVIDIA-API-KEY\n" |
194 | | - "\tINPUT_PERPLEXITY-API-KEY\n" |
195 | | - ) |
196 | | - |
197 | | - # Case 3: Only one API key available |
198 | | - if len(valid_keys_dict) == 1: |
199 | | - selected_model, api_key = next(iter(valid_keys_dict.items())) |
200 | | - logging.info(f"Using single available model: {selected_model}") |
201 | | - return selected_model, api_key.strip() |
202 | | - |
203 | | - # Case 4: Use model-to-provider mapping for specified model |
204 | | - provider = model_to_provider.get(model, None) |
205 | | - if model and provider and provider in valid_keys_dict: |
206 | | - logging.info(f"Using mapped model: {model} with provider: {provider}") |
207 | | - return model, valid_keys_dict[provider].strip() |
208 | | - |
209 | | - # Case 5: Fallback to provider-based matching |
210 | | - if model: |
211 | | - api_key = get_startwith(model, valid_keys_dict) |
212 | | - if api_key: |
213 | | - logging.info(f"Using specified model with provider matching: {model}") |
214 | | - return model, api_key.strip() |
215 | | - |
216 | | - # Case 6: Fallback to Gemini if available |
217 | | - if 'gemini' in valid_keys_dict: |
218 | | - logging.info("Falling back to Gemini model") |
219 | | - return 'gemini-2.5-flash', valid_keys_dict['gemini'].strip() |
220 | | - |
221 | | - # Case 7: No matching model or Gemini |
222 | | - raise ValueError( |
223 | | - f"No API key provided for specified model '{model}' and Gemini not available. " |
224 | | - f"Available models: {', '.join(valid_keys_dict.keys())}" |
225 | | - ) |
226 | | - |
227 | | - |
228 | | -def get_api_key_dict_from_env() -> Dict[str, str]: |
229 | | - """ |
230 | | - Retrieves API keys for different models from environment variables. |
231 | | - Returns empty strings for unset variables. |
232 | | - """ |
233 | | - return { |
234 | | - 'claude': os.getenv('INPUT_CLAUDE_API_KEY', ''), |
235 | | - 'gemini': os.getenv('INPUT_GEMINI-API-KEY', ''), |
236 | | - 'grok': os.getenv('INPUT_GROK-API-KEY', ''), |
237 | | - 'nvidia_nim': os.getenv('INPUT_NVIDIA-API-KEY', ''), |
238 | | - 'perplexity': os.getenv('INPUT_PERPLEXITY-API-KEY', ''), |
239 | | - } |
240 | | - |
241 | | - |
242 | | -def get_config_class_dict() -> Dict[str, type]: |
243 | | - """ |
244 | | - Returns a dictionary mapping model names to their respective configuration classes. |
245 | | - """ |
246 | | - return { |
247 | | - 'claude': ClaudeConfig, |
248 | | - 'claude-sonnet-4-20250514': ClaudeConfig, # Add specific model |
249 | | - 'gemini': GeminiConfig, |
250 | | - 'gemini-2.5-flash': GeminiConfig, |
251 | | - 'grok': GrokConfig, |
252 | | - 'grok-code-fast': GrokConfig, |
253 | | - 'nvidia_nim': NvidiaNIMConfig, |
254 | | - 'google/gemma-2-9b-it': NvidiaNIMConfig, # Add specific model |
255 | | - 'perplexity': PerplexityConfig, |
256 | | - 'sonar': PerplexityConfig # Add specific model |
257 | | - } |
258 | | - |
259 | | - |
260 | | -def get_config_class(model: str) -> type: |
261 | | - config_map = get_config_class_dict() |
262 | | - config_class = get_startwith(model, config_map) |
263 | | - if not config_class: |
264 | | - raise ValueError(f"Unsupported LLM type: {model}. Use {', '.join(config_map.keys())}") |
265 | | - return config_class |
266 | | - |
267 | | - |
268 | 147 | def get_path_tuple(paths_str: str) -> Tuple[pathlib.Path]: |
269 | 148 | """ |
270 | 149 | Converts a comma-separated string of file paths to a tuple of pathlib.Path objects. |
|
0 commit comments