Skip to content

Commit be9894e

Browse files
committed
feat(responses): add safe follow-up input helper
1 parent b326ab3 commit be9894e

2 files changed

Lines changed: 80 additions & 2 deletions

File tree

src/openai/types/responses/response.py

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

3-
from typing import List, Union, Optional
3+
from typing import Any, List, Union, Optional, cast
44
from typing_extensions import Literal, TypeAlias
55

66
from .tool import Tool
@@ -23,6 +23,7 @@
2323
from .tool_choice_function import ToolChoiceFunction
2424
from ..shared.responses_model import ResponsesModel
2525
from .tool_choice_apply_patch import ToolChoiceApplyPatch
26+
from .response_input_item_param import ResponseInputItemParam
2627

2728
__all__ = ["Response", "IncompleteDetails", "ToolChoice", "Conversation"]
2829

@@ -319,3 +320,16 @@ def output_text(self) -> str:
319320
texts.append(content.text)
320321

321322
return "".join(texts)
323+
324+
@property
325+
def output_as_input(self) -> List[ResponseInputItemParam]:
326+
"""Convenience property that converts `output` items into follow-up `input` items.
327+
328+
This omits unset and `None` fields so the returned items can be passed back
329+
to `responses.create(..., input=...)` for a subsequent turn.
330+
"""
331+
332+
return [
333+
cast(ResponseInputItemParam, output.to_dict(mode="json", exclude_none=True))
334+
for output in cast(List[Any], self.output)
335+
]

tests/lib/responses/test_responses.py

Lines changed: 65 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
from __future__ import annotations
22

33
import json
4+
from typing import cast
45
from typing_extensions import TypeVar
56

67
import httpx
@@ -11,7 +12,7 @@
1112
from openai import OpenAI, AsyncOpenAI
1213
from openai._utils import assert_signatures_in_sync
1314
from openai._compat import parse_obj
14-
from openai.types.responses.response import Response
15+
from openai.types.responses import Response, ResponseReasoningItem
1516

1617
from ...conftest import base_url
1718
from ..snapshots import make_snapshot_request
@@ -181,6 +182,69 @@ async def test_async_replayed_response_output_items_can_be_counted_without_null_
181182
assert request_body["input"] == EXPECTED_REPLAYED_OUTPUT_INPUT
182183

183184

185+
def test_output_as_input_omits_null_only_response_fields() -> None:
186+
response = Response.construct(
187+
id="resp_123",
188+
created_at=1754925861,
189+
model="o4-mini",
190+
object="response",
191+
output=[
192+
{
193+
"id": "rs_123",
194+
"summary": [{"text": "Reasoning summary", "type": "summary_text"}],
195+
"type": "reasoning",
196+
},
197+
{
198+
"id": "msg_123",
199+
"type": "message",
200+
"status": "completed",
201+
"content": [
202+
{
203+
"type": "output_text",
204+
"annotations": [],
205+
"text": "Paris.",
206+
}
207+
],
208+
"role": "assistant",
209+
},
210+
],
211+
parallel_tool_calls=True,
212+
tool_choice="auto",
213+
tools=[],
214+
)
215+
216+
reasoning_item = cast(ResponseReasoningItem, response.output[0])
217+
assert reasoning_item.model_dump() == {
218+
"id": "rs_123",
219+
"summary": [{"text": "Reasoning summary", "type": "summary_text"}],
220+
"type": "reasoning",
221+
"content": None,
222+
"encrypted_content": None,
223+
"status": None,
224+
}
225+
226+
assert response.output_as_input == [
227+
{
228+
"id": "rs_123",
229+
"summary": [{"text": "Reasoning summary", "type": "summary_text"}],
230+
"type": "reasoning",
231+
},
232+
{
233+
"id": "msg_123",
234+
"type": "message",
235+
"status": "completed",
236+
"content": [
237+
{
238+
"type": "output_text",
239+
"annotations": [],
240+
"text": "Paris.",
241+
}
242+
],
243+
"role": "assistant",
244+
},
245+
]
246+
247+
184248
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
185249
def test_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
186250
checking_client: OpenAI | AsyncOpenAI = client if sync else async_client

0 commit comments

Comments
 (0)