|
27 | 27 | from google.adk.models.lite_llm import _append_fallback_user_content_if_missing |
28 | 28 | from google.adk.models.lite_llm import _content_to_message_param |
29 | 29 | from google.adk.models.lite_llm import _enforce_strict_openai_schema |
| 30 | +from google.adk.models.lite_llm import _extract_reasoning_value |
30 | 31 | from google.adk.models.lite_llm import _FILE_ID_REQUIRED_PROVIDERS |
31 | 32 | from google.adk.models.lite_llm import _FINISH_REASON_MAPPING |
32 | 33 | from google.adk.models.lite_llm import _function_declaration_to_tool_param |
@@ -2285,6 +2286,139 @@ def test_model_response_to_generate_content_response_reasoning_content(): |
2285 | 2286 | assert response.content.parts[1].text == "Answer" |
2286 | 2287 |
|
2287 | 2288 |
|
| 2289 | +def test_message_to_generate_content_response_reasoning_field(): |
| 2290 | + """Test that the 'reasoning' field is supported (LM Studio, vLLM).""" |
| 2291 | + message = { |
| 2292 | + "role": "assistant", |
| 2293 | + "content": "Final answer", |
| 2294 | + "reasoning": "Thinking process", |
| 2295 | + } |
| 2296 | + response = _message_to_generate_content_response(message) |
| 2297 | + |
| 2298 | + assert len(response.content.parts) == 2 |
| 2299 | + thought_part = response.content.parts[0] |
| 2300 | + text_part = response.content.parts[1] |
| 2301 | + assert thought_part.text == "Thinking process" |
| 2302 | + assert thought_part.thought is True |
| 2303 | + assert text_part.text == "Final answer" |
| 2304 | + |
| 2305 | + |
| 2306 | +def test_model_response_to_generate_content_response_reasoning_field(): |
| 2307 | + """Test that 'reasoning' field is supported in ModelResponse.""" |
| 2308 | + model_response = ModelResponse( |
| 2309 | + model="test-model", |
| 2310 | + choices=[{ |
| 2311 | + "message": { |
| 2312 | + "role": "assistant", |
| 2313 | + "content": "Result", |
| 2314 | + "reasoning": "Chain of thought", |
| 2315 | + }, |
| 2316 | + "finish_reason": "stop", |
| 2317 | + }], |
| 2318 | + ) |
| 2319 | + |
| 2320 | + response = _model_response_to_generate_content_response(model_response) |
| 2321 | + |
| 2322 | + assert response.content.parts[0].text == "Chain of thought" |
| 2323 | + assert response.content.parts[0].thought is True |
| 2324 | + assert response.content.parts[1].text == "Result" |
| 2325 | + |
| 2326 | + |
| 2327 | +def test_reasoning_content_takes_precedence_over_reasoning(): |
| 2328 | + """Test that 'reasoning_content' is prioritized over 'reasoning'.""" |
| 2329 | + message = { |
| 2330 | + "role": "assistant", |
| 2331 | + "content": "Answer", |
| 2332 | + "reasoning_content": "LiteLLM standard reasoning", |
| 2333 | + "reasoning": "Alternative reasoning", |
| 2334 | + } |
| 2335 | + response = _message_to_generate_content_response(message) |
| 2336 | + |
| 2337 | + assert len(response.content.parts) == 2 |
| 2338 | + thought_part = response.content.parts[0] |
| 2339 | + assert thought_part.text == "LiteLLM standard reasoning" |
| 2340 | + assert thought_part.thought is True |
| 2341 | + |
| 2342 | + |
| 2343 | +def test_extract_reasoning_value_from_reasoning_content(): |
| 2344 | + """Test extraction from reasoning_content (LiteLLM standard).""" |
| 2345 | + message = ChatCompletionAssistantMessage( |
| 2346 | + role="assistant", |
| 2347 | + content="Answer", |
| 2348 | + reasoning_content="LiteLLM reasoning", |
| 2349 | + ) |
| 2350 | + result = _extract_reasoning_value(message) |
| 2351 | + assert result == "LiteLLM reasoning" |
| 2352 | + |
| 2353 | + |
| 2354 | +def test_extract_reasoning_value_from_reasoning(): |
| 2355 | + """Test extraction from reasoning (LM Studio, vLLM).""" |
| 2356 | + |
| 2357 | + class MockMessage: |
| 2358 | + |
| 2359 | + def __init__(self): |
| 2360 | + self.role = "assistant" |
| 2361 | + self.content = "Answer" |
| 2362 | + self.reasoning = "Alternative reasoning" |
| 2363 | + |
| 2364 | + def get(self, key, default=None): |
| 2365 | + return getattr(self, key, default) |
| 2366 | + |
| 2367 | + message = MockMessage() |
| 2368 | + result = _extract_reasoning_value(message) |
| 2369 | + assert result == "Alternative reasoning" |
| 2370 | + |
| 2371 | + |
| 2372 | +def test_extract_reasoning_value_dict_reasoning_content(): |
| 2373 | + """Test extraction from dict with reasoning_content field.""" |
| 2374 | + message = { |
| 2375 | + "role": "assistant", |
| 2376 | + "content": "Answer", |
| 2377 | + "reasoning_content": "Dict reasoning content", |
| 2378 | + } |
| 2379 | + result = _extract_reasoning_value(message) |
| 2380 | + assert result == "Dict reasoning content" |
| 2381 | + |
| 2382 | + |
| 2383 | +def test_extract_reasoning_value_dict_reasoning(): |
| 2384 | + """Test extraction from dict with reasoning field.""" |
| 2385 | + message = { |
| 2386 | + "role": "assistant", |
| 2387 | + "content": "Answer", |
| 2388 | + "reasoning": "Dict reasoning", |
| 2389 | + } |
| 2390 | + result = _extract_reasoning_value(message) |
| 2391 | + assert result == "Dict reasoning" |
| 2392 | + |
| 2393 | + |
| 2394 | +def test_extract_reasoning_value_dict_prefers_reasoning_content(): |
| 2395 | + """Test that reasoning_content takes precedence over reasoning in dicts.""" |
| 2396 | + message = { |
| 2397 | + "role": "assistant", |
| 2398 | + "content": "Answer", |
| 2399 | + "reasoning_content": "Primary", |
| 2400 | + "reasoning": "Secondary", |
| 2401 | + } |
| 2402 | + result = _extract_reasoning_value(message) |
| 2403 | + assert result == "Primary" |
| 2404 | + |
| 2405 | + |
| 2406 | +def test_extract_reasoning_value_none_message(): |
| 2407 | + """Test that None message returns None.""" |
| 2408 | + result = _extract_reasoning_value(None) |
| 2409 | + assert result is None |
| 2410 | + |
| 2411 | + |
| 2412 | +def test_extract_reasoning_value_no_reasoning_fields(): |
| 2413 | + """Test that None is returned when no reasoning fields exist.""" |
| 2414 | + message = { |
| 2415 | + "role": "assistant", |
| 2416 | + "content": "Answer only", |
| 2417 | + } |
| 2418 | + result = _extract_reasoning_value(message) |
| 2419 | + assert result is None |
| 2420 | + |
| 2421 | + |
2288 | 2422 | def test_parse_tool_calls_from_text_multiple_calls(): |
2289 | 2423 | text = ( |
2290 | 2424 | '{"name":"alpha","arguments":{"value":1}}\n' |
|
0 commit comments