|
1 | 1 | from __future__ import annotations |
2 | 2 |
|
3 | 3 | import json |
| 4 | +from typing import cast |
4 | 5 | from typing_extensions import TypeVar |
5 | 6 |
|
6 | 7 | import httpx |
|
11 | 12 | from openai import OpenAI, AsyncOpenAI |
12 | 13 | from openai._utils import assert_signatures_in_sync |
13 | 14 | from openai._compat import parse_obj |
14 | | -from openai.types.responses.response import Response |
| 15 | +from openai.types.responses import Response, ResponseReasoningItem |
15 | 16 |
|
16 | 17 | from ...conftest import base_url |
17 | 18 | from ..snapshots import make_snapshot_request |
@@ -181,6 +182,69 @@ async def test_async_replayed_response_output_items_can_be_counted_without_null_ |
181 | 182 | assert request_body["input"] == EXPECTED_REPLAYED_OUTPUT_INPUT |
182 | 183 |
|
183 | 184 |
|
| 185 | +def test_output_as_input_omits_null_only_response_fields() -> None: |
| 186 | + response = Response.construct( |
| 187 | + id="resp_123", |
| 188 | + created_at=1754925861, |
| 189 | + model="o4-mini", |
| 190 | + object="response", |
| 191 | + output=[ |
| 192 | + { |
| 193 | + "id": "rs_123", |
| 194 | + "summary": [{"text": "Reasoning summary", "type": "summary_text"}], |
| 195 | + "type": "reasoning", |
| 196 | + }, |
| 197 | + { |
| 198 | + "id": "msg_123", |
| 199 | + "type": "message", |
| 200 | + "status": "completed", |
| 201 | + "content": [ |
| 202 | + { |
| 203 | + "type": "output_text", |
| 204 | + "annotations": [], |
| 205 | + "text": "Paris.", |
| 206 | + } |
| 207 | + ], |
| 208 | + "role": "assistant", |
| 209 | + }, |
| 210 | + ], |
| 211 | + parallel_tool_calls=True, |
| 212 | + tool_choice="auto", |
| 213 | + tools=[], |
| 214 | + ) |
| 215 | + |
| 216 | + reasoning_item = cast(ResponseReasoningItem, response.output[0]) |
| 217 | + assert reasoning_item.model_dump() == { |
| 218 | + "id": "rs_123", |
| 219 | + "summary": [{"text": "Reasoning summary", "type": "summary_text"}], |
| 220 | + "type": "reasoning", |
| 221 | + "content": None, |
| 222 | + "encrypted_content": None, |
| 223 | + "status": None, |
| 224 | + } |
| 225 | + |
| 226 | + assert response.output_as_input == [ |
| 227 | + { |
| 228 | + "id": "rs_123", |
| 229 | + "summary": [{"text": "Reasoning summary", "type": "summary_text"}], |
| 230 | + "type": "reasoning", |
| 231 | + }, |
| 232 | + { |
| 233 | + "id": "msg_123", |
| 234 | + "type": "message", |
| 235 | + "status": "completed", |
| 236 | + "content": [ |
| 237 | + { |
| 238 | + "type": "output_text", |
| 239 | + "annotations": [], |
| 240 | + "text": "Paris.", |
| 241 | + } |
| 242 | + ], |
| 243 | + "role": "assistant", |
| 244 | + }, |
| 245 | + ] |
| 246 | + |
| 247 | + |
184 | 248 | @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) |
185 | 249 | def test_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None: |
186 | 250 | checking_client: OpenAI | AsyncOpenAI = client if sync else async_client |
|
0 commit comments