|
12 | 12 | from openai._utils import assert_signatures_in_sync |
13 | 13 | from openai._compat import parse_obj |
14 | 14 | from openai.types.responses.response import Response |
15 | | -from openai.types.responses.response_reasoning_item import ResponseReasoningItem |
16 | | -from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall |
17 | 15 |
|
18 | 16 | from ...conftest import base_url |
19 | 17 | from ..snapshots import make_snapshot_request |
|
27 | 25 | # `OPENAI_LIVE=1 pytest --inline-snapshot=fix -p no:xdist -o addopts=""` |
28 | 26 |
|
29 | 27 |
|
| 28 | +EXPECTED_REPLAYED_OUTPUT_INPUT = [ |
| 29 | + { |
| 30 | + "id": "rs_123", |
| 31 | + "summary": [], |
| 32 | + "type": "reasoning", |
| 33 | + }, |
| 34 | + { |
| 35 | + "arguments": "{}", |
| 36 | + "call_id": "call_123", |
| 37 | + "id": "fc_123", |
| 38 | + "name": "weather", |
| 39 | + "type": "function_call", |
| 40 | + }, |
| 41 | + { |
| 42 | + "content": [ |
| 43 | + { |
| 44 | + "annotations": [], |
| 45 | + "text": "The weather is sunny.", |
| 46 | + "type": "output_text", |
| 47 | + } |
| 48 | + ], |
| 49 | + "id": "msg_123", |
| 50 | + "phase": "final_answer", |
| 51 | + "role": "assistant", |
| 52 | + "status": "completed", |
| 53 | + "type": "message", |
| 54 | + }, |
| 55 | +] |
| 56 | + |
| 57 | + |
| 58 | +def make_replayed_response_output() -> list[object]: |
| 59 | + response = parse_obj( |
| 60 | + Response, |
| 61 | + { |
| 62 | + "id": "resp_123", |
| 63 | + "object": "response", |
| 64 | + "created_at": 0, |
| 65 | + "model": "gpt-4o-mini", |
| 66 | + "output": [ |
| 67 | + { |
| 68 | + "id": "rs_123", |
| 69 | + "type": "reasoning", |
| 70 | + "summary": [], |
| 71 | + "encrypted_content": None, |
| 72 | + "status": None, |
| 73 | + }, |
| 74 | + { |
| 75 | + "arguments": "{}", |
| 76 | + "call_id": "call_123", |
| 77 | + "name": "weather", |
| 78 | + "type": "function_call", |
| 79 | + "id": "fc_123", |
| 80 | + "status": None, |
| 81 | + }, |
| 82 | + { |
| 83 | + "id": "msg_123", |
| 84 | + "type": "message", |
| 85 | + "status": "completed", |
| 86 | + "role": "assistant", |
| 87 | + "phase": "final_answer", |
| 88 | + "content": [ |
| 89 | + { |
| 90 | + "type": "output_text", |
| 91 | + "annotations": [], |
| 92 | + "logprobs": None, |
| 93 | + "text": "The weather is sunny.", |
| 94 | + } |
| 95 | + ], |
| 96 | + }, |
| 97 | + ], |
| 98 | + "parallel_tool_calls": True, |
| 99 | + "tool_choice": "auto", |
| 100 | + "tools": [], |
| 101 | + }, |
| 102 | + ) |
| 103 | + return response.output |
| 104 | + |
| 105 | + |
30 | 106 | @pytest.mark.respx(base_url=base_url) |
31 | 107 | def test_output_text(client: OpenAI, respx_mock: MockRouter) -> None: |
32 | 108 | response = make_snapshot_request( |
@@ -68,50 +144,41 @@ def test_response_output_items_can_be_replayed_without_null_only_fields( |
68 | 144 | ) |
69 | 145 | ) |
70 | 146 |
|
71 | | - reasoning = parse_obj( |
72 | | - ResponseReasoningItem, |
73 | | - { |
74 | | - "id": "rs_123", |
75 | | - "type": "reasoning", |
76 | | - "summary": [], |
77 | | - "encrypted_content": None, |
78 | | - "status": None, |
79 | | - }, |
| 147 | + response = client.responses.create( |
| 148 | + model="gpt-4o-mini", |
| 149 | + input=make_replayed_response_output(), |
80 | 150 | ) |
81 | | - function_call = parse_obj( |
82 | | - ResponseFunctionToolCall, |
83 | | - { |
84 | | - "arguments": "{}", |
85 | | - "call_id": "call_123", |
86 | | - "name": "weather", |
87 | | - "type": "function_call", |
88 | | - "id": "fc_123", |
89 | | - "status": None, |
90 | | - }, |
| 151 | + |
| 152 | + assert isinstance(response, Response) |
| 153 | + |
| 154 | + request_body = json.loads(route.calls[0].request.content.decode("utf-8")) |
| 155 | + assert request_body["input"] == EXPECTED_REPLAYED_OUTPUT_INPUT |
| 156 | + |
| 157 | + |
| 158 | +@pytest.mark.respx(base_url=base_url) |
| 159 | +async def test_async_replayed_response_output_items_can_be_counted_without_null_only_fields( |
| 160 | + async_client: AsyncOpenAI, |
| 161 | + respx_mock: MockRouter, |
| 162 | +) -> None: |
| 163 | + route = respx_mock.post("/responses/input_tokens").mock( |
| 164 | + return_value=httpx.Response( |
| 165 | + 200, |
| 166 | + json={ |
| 167 | + "input_tokens": 3, |
| 168 | + "object": "response.input_tokens", |
| 169 | + }, |
| 170 | + ) |
91 | 171 | ) |
92 | 172 |
|
93 | | - response = client.responses.create( |
| 173 | + response = await async_client.responses.input_tokens.count( |
94 | 174 | model="gpt-4o-mini", |
95 | | - input=[reasoning, function_call], |
| 175 | + input=make_replayed_response_output(), |
96 | 176 | ) |
97 | 177 |
|
98 | | - assert isinstance(response, Response) |
| 178 | + assert response.input_tokens == 3 |
99 | 179 |
|
100 | 180 | request_body = json.loads(route.calls[0].request.content.decode("utf-8")) |
101 | | - assert request_body["input"] == [ |
102 | | - { |
103 | | - "id": "rs_123", |
104 | | - "summary": [], |
105 | | - "type": "reasoning", |
106 | | - }, |
107 | | - { |
108 | | - "arguments": "{}", |
109 | | - "call_id": "call_123", |
110 | | - "id": "fc_123", |
111 | | - "name": "weather", |
112 | | - "type": "function_call", |
113 | | - }, |
114 | | - ] |
| 181 | + assert request_body["input"] == EXPECTED_REPLAYED_OUTPUT_INPUT |
115 | 182 |
|
116 | 183 |
|
117 | 184 | @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) |
|
0 commit comments