Skip to content

Commit ba41e84

Browse files
pakrym-oaicodex
andauthored
Use model catalog default for reasoning summary fallback (openai#12873)
## Summary - make `Config.model_reasoning_summary` optional so unset means use model default - resolve the optional config value to a concrete summary when building `TurnContext` - add protocol support for `default_reasoning_summary` in model metadata ## Validation - `cargo test -p codex-core --lib client::tests -- --nocapture` --------- Co-authored-by: Codex <noreply@openai.com>
1 parent f0a85de commit ba41e84

24 files changed

Lines changed: 175 additions & 40 deletions

File tree

codex-rs/app-server/tests/common/models_cache.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
use chrono::DateTime;
22
use chrono::Utc;
33
use codex_core::test_support::all_model_presets;
4+
use codex_protocol::config_types::ReasoningSummary;
45
use codex_protocol::openai_models::ConfigShellToolType;
56
use codex_protocol::openai_models::ModelInfo;
67
use codex_protocol::openai_models::ModelPreset;
@@ -30,6 +31,7 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
3031
base_instructions: "base instructions".to_string(),
3132
model_messages: None,
3233
supports_reasoning_summaries: false,
34+
default_reasoning_summary: ReasoningSummary::Auto,
3335
support_verbosity: false,
3436
default_verbosity: None,
3537
apply_patch_tool_type: None,

codex-rs/codex-api/tests/models_integration.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ use codex_api::ModelsClient;
33
use codex_api::provider::Provider;
44
use codex_api::provider::RetryConfig;
55
use codex_client::ReqwestTransport;
6+
use codex_protocol::config_types::ReasoningSummary;
67
use codex_protocol::openai_models::ConfigShellToolType;
78
use codex_protocol::openai_models::ModelInfo;
89
use codex_protocol::openai_models::ModelVisibility;
@@ -78,6 +79,7 @@ async fn models_client_hits_models_endpoint() {
7879
base_instructions: "base instructions".to_string(),
7980
model_messages: None,
8081
supports_reasoning_summaries: false,
82+
default_reasoning_summary: ReasoningSummary::Auto,
8183
support_verbosity: false,
8284
default_verbosity: None,
8385
apply_patch_tool_type: None,

codex-rs/core/src/codex.rs

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -754,7 +754,7 @@ pub(crate) struct SessionConfiguration {
754754
provider: ModelProviderInfo,
755755

756756
collaboration_mode: CollaborationMode,
757-
model_reasoning_summary: ReasoningSummaryConfig,
757+
model_reasoning_summary: Option<ReasoningSummaryConfig>,
758758

759759
/// Developer instructions that supplement the base instructions.
760760
developer_instructions: Option<String>,
@@ -824,7 +824,7 @@ impl SessionConfiguration {
824824
next_configuration.collaboration_mode = collaboration_mode;
825825
}
826826
if let Some(summary) = updates.reasoning_summary {
827-
next_configuration.model_reasoning_summary = summary;
827+
next_configuration.model_reasoning_summary = Some(summary);
828828
}
829829
if let Some(personality) = updates.personality {
830830
next_configuration.personality = Some(personality);
@@ -985,7 +985,9 @@ impl Session {
985985
skills_outcome: Arc<SkillLoadOutcome>,
986986
) -> TurnContext {
987987
let reasoning_effort = session_configuration.collaboration_mode.reasoning_effort();
988-
let reasoning_summary = session_configuration.model_reasoning_summary;
988+
let reasoning_summary = session_configuration
989+
.model_reasoning_summary
990+
.unwrap_or(model_info.default_reasoning_summary);
989991
let otel_manager = otel_manager.clone().with_model(
990992
session_configuration.collaboration_mode.model(),
991993
model_info.slug.as_str(),
@@ -1271,7 +1273,9 @@ impl Session {
12711273
otel_manager.conversation_starts(
12721274
config.model_provider.name.as_str(),
12731275
session_configuration.collaboration_mode.reasoning_effort(),
1274-
config.model_reasoning_summary,
1276+
config
1277+
.model_reasoning_summary
1278+
.unwrap_or(ReasoningSummaryConfig::Auto),
12751279
config.model_context_window,
12761280
config.model_auto_compact_token_limit,
12771281
config.permissions.approval_policy.value(),
@@ -4635,7 +4639,9 @@ async fn spawn_review_thread(
46354639
let provider_for_context = provider.clone();
46364640
let otel_manager_for_context = otel_manager.clone();
46374641
let reasoning_effort = per_turn_config.model_reasoning_effort;
4638-
let reasoning_summary = per_turn_config.model_reasoning_summary;
4642+
let reasoning_summary = per_turn_config
4643+
.model_reasoning_summary
4644+
.unwrap_or(model_info.default_reasoning_summary);
46394645
let session_source = parent_turn_context.session_source.clone();
46404646

46414647
let per_turn_config = Arc::new(per_turn_config);

codex-rs/core/src/config/mod.rs

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -412,9 +412,9 @@ pub struct Config {
412412
/// global default").
413413
pub plan_mode_reasoning_effort: Option<ReasoningEffort>,
414414

415-
/// If not "none", the value to use for `reasoning.summary` when making a
416-
/// request using the Responses API.
417-
pub model_reasoning_summary: ReasoningSummary,
415+
/// Optional value to use for `reasoning.summary` when making a request
416+
/// using the Responses API. When unset, the model catalog default is used.
417+
pub model_reasoning_summary: Option<ReasoningSummary>,
418418

419419
/// Optional override to force-enable reasoning summaries for the configured model.
420420
pub model_supports_reasoning_summaries: Option<bool>,
@@ -2141,8 +2141,7 @@ impl Config {
21412141
.or(cfg.plan_mode_reasoning_effort),
21422142
model_reasoning_summary: config_profile
21432143
.model_reasoning_summary
2144-
.or(cfg.model_reasoning_summary)
2145-
.unwrap_or_default(),
2144+
.or(cfg.model_reasoning_summary),
21462145
model_supports_reasoning_summaries: cfg.model_supports_reasoning_summaries,
21472146
model_catalog,
21482147
model_verbosity: config_profile.model_verbosity.or(cfg.model_verbosity),
@@ -4764,7 +4763,7 @@ model_verbosity = "high"
47644763
show_raw_agent_reasoning: false,
47654764
model_reasoning_effort: Some(ReasoningEffort::High),
47664765
plan_mode_reasoning_effort: None,
4767-
model_reasoning_summary: ReasoningSummary::Detailed,
4766+
model_reasoning_summary: Some(ReasoningSummary::Detailed),
47684767
model_supports_reasoning_summaries: None,
47694768
model_catalog: None,
47704769
model_verbosity: None,
@@ -4890,7 +4889,7 @@ model_verbosity = "high"
48904889
show_raw_agent_reasoning: false,
48914890
model_reasoning_effort: None,
48924891
plan_mode_reasoning_effort: None,
4893-
model_reasoning_summary: ReasoningSummary::default(),
4892+
model_reasoning_summary: None,
48944893
model_supports_reasoning_summaries: None,
48954894
model_catalog: None,
48964895
model_verbosity: None,
@@ -5014,7 +5013,7 @@ model_verbosity = "high"
50145013
show_raw_agent_reasoning: false,
50155014
model_reasoning_effort: None,
50165015
plan_mode_reasoning_effort: None,
5017-
model_reasoning_summary: ReasoningSummary::default(),
5016+
model_reasoning_summary: None,
50185017
model_supports_reasoning_summaries: None,
50195018
model_catalog: None,
50205019
model_verbosity: None,
@@ -5124,7 +5123,7 @@ model_verbosity = "high"
51245123
show_raw_agent_reasoning: false,
51255124
model_reasoning_effort: Some(ReasoningEffort::High),
51265125
plan_mode_reasoning_effort: None,
5127-
model_reasoning_summary: ReasoningSummary::Detailed,
5126+
model_reasoning_summary: Some(ReasoningSummary::Detailed),
51285127
model_supports_reasoning_summaries: None,
51295128
model_catalog: None,
51305129
model_verbosity: Some(Verbosity::High),

codex-rs/core/src/models_manager/model_info.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
use codex_protocol::config_types::ReasoningSummary;
12
use codex_protocol::openai_models::ConfigShellToolType;
23
use codex_protocol::openai_models::ModelInfo;
34
use codex_protocol::openai_models::ModelInstructionsVariables;
@@ -72,6 +73,7 @@ pub(crate) fn model_info_from_slug(slug: &str) -> ModelInfo {
7273
base_instructions: BASE_INSTRUCTIONS.to_string(),
7374
model_messages: local_personality_messages_for_slug(slug),
7475
supports_reasoning_summaries: false,
76+
default_reasoning_summary: ReasoningSummary::Auto,
7577
support_verbosity: false,
7678
default_verbosity: None,
7779
apply_patch_tool_type: None,

codex-rs/core/src/tools/handlers/multi_agents.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -914,7 +914,7 @@ fn build_agent_shared_config(turn: &TurnContext) -> Result<Config, FunctionCallE
914914
config.model = Some(turn.model_info.slug.clone());
915915
config.model_provider = turn.provider.clone();
916916
config.model_reasoning_effort = turn.reasoning_effort;
917-
config.model_reasoning_summary = turn.reasoning_summary;
917+
config.model_reasoning_summary = Some(turn.reasoning_summary);
918918
config.developer_instructions = turn.developer_instructions.clone();
919919
config.compact_prompt = turn.compact_prompt.clone();
920920
apply_spawn_agent_runtime_overrides(&mut config, turn)?;
@@ -2046,7 +2046,7 @@ mod tests {
20462046
expected.model = Some(turn.model_info.slug.clone());
20472047
expected.model_provider = turn.provider.clone();
20482048
expected.model_reasoning_effort = turn.reasoning_effort;
2049-
expected.model_reasoning_summary = turn.reasoning_summary;
2049+
expected.model_reasoning_summary = Some(turn.reasoning_summary);
20502050
expected.developer_instructions = turn.developer_instructions.clone();
20512051
expected.compact_prompt = turn.compact_prompt.clone();
20522052
expected.permissions.shell_environment_policy = turn.shell_environment_policy.clone();
@@ -2098,7 +2098,7 @@ mod tests {
20982098
expected.model = Some(turn.model_info.slug.clone());
20992099
expected.model_provider = turn.provider.clone();
21002100
expected.model_reasoning_effort = turn.reasoning_effort;
2101-
expected.model_reasoning_summary = turn.reasoning_summary;
2101+
expected.model_reasoning_summary = Some(turn.reasoning_summary);
21022102
expected.developer_instructions = turn.developer_instructions.clone();
21032103
expected.compact_prompt = turn.compact_prompt.clone();
21042104
expected.permissions.shell_environment_policy = turn.shell_environment_policy.clone();

codex-rs/core/tests/responses_headers.rs

Lines changed: 25 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,14 @@ async fn responses_stream_includes_subagent_header_on_review() {
111111
}];
112112

113113
let mut stream = client_session
114-
.stream(&prompt, &model_info, &otel_manager, effort, summary, None)
114+
.stream(
115+
&prompt,
116+
&model_info,
117+
&otel_manager,
118+
effort,
119+
summary.unwrap_or(model_info.default_reasoning_summary),
120+
None,
121+
)
115122
.await
116123
.expect("stream failed");
117124
while let Some(event) = stream.next().await {
@@ -216,7 +223,14 @@ async fn responses_stream_includes_subagent_header_on_other() {
216223
}];
217224

218225
let mut stream = client_session
219-
.stream(&prompt, &model_info, &otel_manager, effort, summary, None)
226+
.stream(
227+
&prompt,
228+
&model_info,
229+
&otel_manager,
230+
effort,
231+
summary.unwrap_or(model_info.default_reasoning_summary),
232+
None,
233+
)
220234
.await
221235
.expect("stream failed");
222236
while let Some(event) = stream.next().await {
@@ -267,7 +281,7 @@ async fn responses_respects_model_info_overrides_from_config() {
267281
config.model_provider_id = provider.name.clone();
268282
config.model_provider = provider.clone();
269283
config.model_supports_reasoning_summaries = Some(true);
270-
config.model_reasoning_summary = ReasoningSummary::Detailed;
284+
config.model_reasoning_summary = Some(ReasoningSummary::Detailed);
271285
let effort = config.model_reasoning_effort;
272286
let summary = config.model_reasoning_summary;
273287
let model = config.model.clone().expect("model configured");
@@ -320,7 +334,14 @@ async fn responses_respects_model_info_overrides_from_config() {
320334
}];
321335

322336
let mut stream = client_session
323-
.stream(&prompt, &model_info, &otel_manager, effort, summary, None)
337+
.stream(
338+
&prompt,
339+
&model_info,
340+
&otel_manager,
341+
effort,
342+
summary.unwrap_or(model_info.default_reasoning_summary),
343+
None,
344+
)
324345
.await
325346
.expect("stream failed");
326347
while let Some(event) = stream.next().await {

codex-rs/core/tests/suite/client.rs

Lines changed: 68 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ use codex_protocol::models::ReasoningItemContent;
3131
use codex_protocol::models::ReasoningItemReasoningSummary;
3232
use codex_protocol::models::ResponseItem;
3333
use codex_protocol::models::WebSearchAction;
34+
use codex_protocol::openai_models::ModelsResponse;
3435
use codex_protocol::openai_models::ReasoningEffort;
3536
use codex_protocol::protocol::EventMsg;
3637
use codex_protocol::protocol::Op;
@@ -980,7 +981,9 @@ async fn user_turn_collaboration_mode_overrides_model_and_effort() -> anyhow::Re
980981
sandbox_policy: config.permissions.sandbox_policy.get().clone(),
981982
model: session_configured.model.clone(),
982983
effort: Some(ReasoningEffort::Low),
983-
summary: config.model_reasoning_summary,
984+
summary: config
985+
.model_reasoning_summary
986+
.unwrap_or(ReasoningSummary::Auto),
984987
collaboration_mode: Some(collaboration_mode),
985988
final_output_json_schema: None,
986989
personality: None,
@@ -1014,7 +1017,7 @@ async fn configured_reasoning_summary_is_sent() -> anyhow::Result<()> {
10141017
.await;
10151018
let TestCodex { codex, .. } = test_codex()
10161019
.with_config(|config| {
1017-
config.model_reasoning_summary = ReasoningSummary::Concise;
1020+
config.model_reasoning_summary = Some(ReasoningSummary::Concise);
10181021
})
10191022
.build(&server)
10201023
.await?;
@@ -1058,7 +1061,7 @@ async fn reasoning_summary_is_omitted_when_disabled() -> anyhow::Result<()> {
10581061
.await;
10591062
let TestCodex { codex, .. } = test_codex()
10601063
.with_config(|config| {
1061-
config.model_reasoning_summary = ReasoningSummary::None;
1064+
config.model_reasoning_summary = Some(ReasoningSummary::None);
10621065
})
10631066
.build(&server)
10641067
.await?;
@@ -1089,6 +1092,60 @@ async fn reasoning_summary_is_omitted_when_disabled() -> anyhow::Result<()> {
10891092
Ok(())
10901093
}
10911094

1095+
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
1096+
async fn reasoning_summary_none_overrides_model_catalog_default() -> anyhow::Result<()> {
1097+
skip_if_no_network!(Ok(()));
1098+
let server = MockServer::start().await;
1099+
1100+
let resp_mock = mount_sse_once(
1101+
&server,
1102+
sse(vec![ev_response_created("resp1"), ev_completed("resp1")]),
1103+
)
1104+
.await;
1105+
1106+
let mut model_catalog: ModelsResponse =
1107+
serde_json::from_str(include_str!("../../models.json")).expect("valid models.json");
1108+
let model = model_catalog
1109+
.models
1110+
.iter_mut()
1111+
.find(|model| model.slug == "gpt-5.1")
1112+
.expect("gpt-5.1 exists in bundled models.json");
1113+
model.supports_reasoning_summaries = true;
1114+
model.default_reasoning_summary = ReasoningSummary::Detailed;
1115+
1116+
let TestCodex { codex, .. } = test_codex()
1117+
.with_model("gpt-5.1")
1118+
.with_config(move |config| {
1119+
config.model_reasoning_summary = Some(ReasoningSummary::None);
1120+
config.model_catalog = Some(model_catalog);
1121+
})
1122+
.build(&server)
1123+
.await?;
1124+
1125+
codex
1126+
.submit(Op::UserInput {
1127+
items: vec![UserInput::Text {
1128+
text: "hello".into(),
1129+
text_elements: Vec::new(),
1130+
}],
1131+
final_output_json_schema: None,
1132+
})
1133+
.await
1134+
.unwrap();
1135+
1136+
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
1137+
1138+
let request_body = resp_mock.single_request().body_json();
1139+
pretty_assertions::assert_eq!(
1140+
request_body
1141+
.get("reasoning")
1142+
.and_then(|reasoning| reasoning.get("summary")),
1143+
None
1144+
);
1145+
1146+
Ok(())
1147+
}
1148+
10921149
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
10931150
async fn includes_default_verbosity_in_request() -> anyhow::Result<()> {
10941151
skip_if_no_network!(Ok(()));
@@ -1441,7 +1498,14 @@ async fn azure_responses_request_includes_store_and_reasoning_ids() {
14411498
});
14421499

14431500
let mut stream = client_session
1444-
.stream(&prompt, &model_info, &otel_manager, effort, summary, None)
1501+
.stream(
1502+
&prompt,
1503+
&model_info,
1504+
&otel_manager,
1505+
effort,
1506+
summary.unwrap_or(ReasoningSummary::Auto),
1507+
None,
1508+
)
14451509
.await
14461510
.expect("responses stream to start");
14471511

codex-rs/core/tests/suite/collaboration_instructions.rs

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,10 @@ async fn collaboration_instructions_added_on_user_turn() -> Result<()> {
169169
sandbox_policy: test.config.permissions.sandbox_policy.get().clone(),
170170
model: test.session_configured.model.clone(),
171171
effort: None,
172-
summary: test.config.model_reasoning_summary,
172+
summary: test
173+
.config
174+
.model_reasoning_summary
175+
.unwrap_or(codex_protocol::config_types::ReasoningSummary::Auto),
173176
collaboration_mode: Some(collaboration_mode),
174177
final_output_json_schema: None,
175178
personality: None,
@@ -275,7 +278,10 @@ async fn user_turn_overrides_collaboration_instructions_after_override() -> Resu
275278
sandbox_policy: test.config.permissions.sandbox_policy.get().clone(),
276279
model: test.session_configured.model.clone(),
277280
effort: None,
278-
summary: test.config.model_reasoning_summary,
281+
summary: test
282+
.config
283+
.model_reasoning_summary
284+
.unwrap_or(codex_protocol::config_types::ReasoningSummary::Auto),
279285
collaboration_mode: Some(turn_mode),
280286
final_output_json_schema: None,
281287
personality: None,

codex-rs/core/tests/suite/model_switching.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -234,6 +234,7 @@ async fn model_change_from_image_to_text_strips_prior_image_content() -> Result<
234234
base_instructions: "base instructions".to_string(),
235235
model_messages: None,
236236
supports_reasoning_summaries: false,
237+
default_reasoning_summary: ReasoningSummary::Auto,
237238
support_verbosity: false,
238239
default_verbosity: None,
239240
apply_patch_tool_type: None,
@@ -391,6 +392,7 @@ async fn model_switch_to_smaller_model_updates_token_context_window() -> Result<
391392
base_instructions: "base instructions".to_string(),
392393
model_messages: None,
393394
supports_reasoning_summaries: false,
395+
default_reasoning_summary: ReasoningSummary::Auto,
394396
support_verbosity: false,
395397
default_verbosity: None,
396398
apply_patch_tool_type: None,

0 commit comments

Comments
 (0)