Skip to content

Commit 58981f1

Browse files
committed
Handle replayed response output items
1 parent 5ae2cc1 commit 58981f1

4 files changed

Lines changed: 103 additions & 8 deletions

File tree

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
from __future__ import annotations
2+
3+
from typing import Any, Iterable
4+
5+
from ..._types import Omit, omit
6+
from ..._models import BaseModel
7+
8+
9+
def sanitize_response_input(
10+
input: str | Iterable[Any] | None | Omit,
11+
) -> str | list[Any] | None | Omit:
12+
if input is omit or input is None or isinstance(input, str):
13+
return input
14+
15+
return [
16+
item.to_dict(mode="json", exclude_unset=True, exclude_none=True)
17+
if isinstance(item, BaseModel)
18+
else item
19+
for item in input
20+
]

src/openai/resources/responses/input_tokens.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
import httpx
99

1010
from ... import _legacy_response
11+
from ._input import sanitize_response_input
1112
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
1213
from ..._utils import maybe_transform, async_maybe_transform
1314
from ..._compat import cached_property
@@ -129,7 +130,7 @@ def count(
129130
body=maybe_transform(
130131
{
131132
"conversation": conversation,
132-
"input": input,
133+
"input": sanitize_response_input(input),
133134
"instructions": instructions,
134135
"model": model,
135136
"parallel_tool_calls": parallel_tool_calls,
@@ -255,7 +256,7 @@ async def count(
255256
body=await async_maybe_transform(
256257
{
257258
"conversation": conversation,
258-
"input": input,
259+
"input": sanitize_response_input(input),
259260
"instructions": instructions,
260261
"model": model,
261262
"parallel_tool_calls": parallel_tool_calls,

src/openai/resources/responses/responses.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
from pydantic import BaseModel
1515

1616
from ... import _legacy_response
17+
from ._input import sanitize_response_input
1718
from ..._types import NOT_GIVEN, Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
1819
from ..._utils import is_given, path_template, maybe_transform, strip_not_given, async_maybe_transform
1920
from ..._compat import cached_property
@@ -903,7 +904,7 @@ def create(
903904
"context_management": context_management,
904905
"conversation": conversation,
905906
"include": include,
906-
"input": input,
907+
"input": sanitize_response_input(input),
907908
"instructions": instructions,
908909
"max_output_tokens": max_output_tokens,
909910
"max_tool_calls": max_tool_calls,
@@ -1217,7 +1218,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
12171218
"context_management": context_management,
12181219
"conversation": conversation,
12191220
"include": include,
1220-
"input": input,
1221+
"input": sanitize_response_input(input),
12211222
"instructions": instructions,
12221223
"max_output_tokens": max_output_tokens,
12231224
"max_tool_calls": max_tool_calls,
@@ -1717,7 +1718,7 @@ def compact(
17171718
body=maybe_transform(
17181719
{
17191720
"model": model,
1720-
"input": input,
1721+
"input": sanitize_response_input(input),
17211722
"instructions": instructions,
17221723
"previous_response_id": previous_response_id,
17231724
"prompt_cache_key": prompt_cache_key,
@@ -2570,7 +2571,7 @@ async def create(
25702571
"context_management": context_management,
25712572
"conversation": conversation,
25722573
"include": include,
2573-
"input": input,
2574+
"input": sanitize_response_input(input),
25742575
"instructions": instructions,
25752576
"max_output_tokens": max_output_tokens,
25762577
"max_tool_calls": max_tool_calls,
@@ -2888,7 +2889,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
28882889
"context_management": context_management,
28892890
"conversation": conversation,
28902891
"include": include,
2891-
"input": input,
2892+
"input": sanitize_response_input(input),
28922893
"instructions": instructions,
28932894
"max_output_tokens": max_output_tokens,
28942895
"max_tool_calls": max_tool_calls,
@@ -3388,7 +3389,7 @@ async def compact(
33883389
body=await async_maybe_transform(
33893390
{
33903391
"model": model,
3391-
"input": input,
3392+
"input": sanitize_response_input(input),
33923393
"instructions": instructions,
33933394
"previous_response_id": previous_response_id,
33943395
"prompt_cache_key": prompt_cache_key,

tests/lib/responses/test_responses.py

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,19 @@
11
from __future__ import annotations
22

3+
import json
34
from typing_extensions import TypeVar
45

6+
import httpx
57
import pytest
68
from respx import MockRouter
79
from inline_snapshot import snapshot
810

911
from openai import OpenAI, AsyncOpenAI
1012
from openai._utils import assert_signatures_in_sync
13+
from openai._compat import parse_obj
14+
from openai.types.responses.response import Response
15+
from openai.types.responses.response_reasoning_item import ResponseReasoningItem
16+
from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall
1117

1218
from ...conftest import base_url
1319
from ..snapshots import make_snapshot_request
@@ -41,6 +47,73 @@ def test_output_text(client: OpenAI, respx_mock: MockRouter) -> None:
4147
)
4248

4349

50+
@pytest.mark.respx(base_url=base_url)
51+
def test_response_output_items_can_be_replayed_without_null_only_fields(
52+
client: OpenAI,
53+
respx_mock: MockRouter,
54+
) -> None:
55+
route = respx_mock.post("/responses").mock(
56+
return_value=httpx.Response(
57+
200,
58+
json={
59+
"id": "resp_123",
60+
"object": "response",
61+
"created_at": 0,
62+
"model": "gpt-4o-mini",
63+
"output": [],
64+
"parallel_tool_calls": True,
65+
"tool_choice": "auto",
66+
"tools": [],
67+
},
68+
)
69+
)
70+
71+
reasoning = parse_obj(
72+
ResponseReasoningItem,
73+
{
74+
"id": "rs_123",
75+
"type": "reasoning",
76+
"summary": [],
77+
"encrypted_content": None,
78+
"status": None,
79+
},
80+
)
81+
function_call = parse_obj(
82+
ResponseFunctionToolCall,
83+
{
84+
"arguments": "{}",
85+
"call_id": "call_123",
86+
"name": "weather",
87+
"type": "function_call",
88+
"id": "fc_123",
89+
"status": None,
90+
},
91+
)
92+
93+
response = client.responses.create(
94+
model="gpt-4o-mini",
95+
input=[reasoning, function_call],
96+
)
97+
98+
assert isinstance(response, Response)
99+
100+
request_body = json.loads(route.calls[0].request.content.decode("utf-8"))
101+
assert request_body["input"] == [
102+
{
103+
"id": "rs_123",
104+
"summary": [],
105+
"type": "reasoning",
106+
},
107+
{
108+
"arguments": "{}",
109+
"call_id": "call_123",
110+
"id": "fc_123",
111+
"name": "weather",
112+
"type": "function_call",
113+
},
114+
]
115+
116+
44117
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
45118
def test_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
46119
checking_client: OpenAI | AsyncOpenAI = client if sync else async_client

0 commit comments

Comments
 (0)