Skip to content

Commit fe8b474

Browse files
fix(core,app-server) resume with different model (openai#10719)
## Summary When resuming with a different model, we should also append a developer message with the model instructions ## Testing - [x] Added unit tests
1 parent 1e1146c commit fe8b474

4 files changed

Lines changed: 347 additions & 42 deletions

File tree

codex-rs/app-server/tests/suite/send_message.rs

Lines changed: 142 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
use anyhow::Result;
22
use app_test_support::McpProcess;
3+
use app_test_support::create_fake_rollout;
4+
use app_test_support::rollout_path;
35
use app_test_support::to_response;
46
use codex_app_server_protocol::AddConversationListenerParams;
57
use codex_app_server_protocol::AddConversationSubscriptionResponse;
@@ -9,18 +11,25 @@ use codex_app_server_protocol::JSONRPCResponse;
911
use codex_app_server_protocol::NewConversationParams;
1012
use codex_app_server_protocol::NewConversationResponse;
1113
use codex_app_server_protocol::RequestId;
14+
use codex_app_server_protocol::ResumeConversationParams;
15+
use codex_app_server_protocol::ResumeConversationResponse;
1216
use codex_app_server_protocol::SendUserMessageParams;
1317
use codex_app_server_protocol::SendUserMessageResponse;
1418
use codex_execpolicy::Policy;
1519
use codex_protocol::ThreadId;
20+
use codex_protocol::config_types::ReasoningSummary;
1621
use codex_protocol::models::ContentItem;
1722
use codex_protocol::models::DeveloperInstructions;
1823
use codex_protocol::models::ResponseItem;
1924
use codex_protocol::protocol::AskForApproval;
2025
use codex_protocol::protocol::RawResponseItemEvent;
26+
use codex_protocol::protocol::RolloutItem;
27+
use codex_protocol::protocol::RolloutLine;
2128
use codex_protocol::protocol::SandboxPolicy;
29+
use codex_protocol::protocol::TurnContextItem;
2230
use core_test_support::responses;
2331
use pretty_assertions::assert_eq;
32+
use std::io::Write;
2433
use std::path::Path;
2534
use std::path::PathBuf;
2635
use tempfile::TempDir;
@@ -263,6 +272,114 @@ async fn test_send_message_session_not_found() -> Result<()> {
263272
Ok(())
264273
}
265274

275+
#[tokio::test]
276+
async fn resume_with_model_mismatch_appends_model_switch_once() -> Result<()> {
277+
let server = responses::start_mock_server().await;
278+
let response_mock = responses::mount_sse_sequence(
279+
&server,
280+
vec![
281+
responses::sse(vec![
282+
responses::ev_response_created("resp-1"),
283+
responses::ev_assistant_message("msg-1", "Done"),
284+
responses::ev_completed("resp-1"),
285+
]),
286+
responses::sse(vec![
287+
responses::ev_response_created("resp-2"),
288+
responses::ev_assistant_message("msg-2", "Done again"),
289+
responses::ev_completed("resp-2"),
290+
]),
291+
],
292+
)
293+
.await;
294+
295+
let codex_home = TempDir::new()?;
296+
create_config_toml(codex_home.path(), &server.uri())?;
297+
298+
let filename_ts = "2025-01-02T12-00-00";
299+
let meta_rfc3339 = "2025-01-02T12:00:00Z";
300+
let preview = "Resume me";
301+
let conversation_id = create_fake_rollout(
302+
codex_home.path(),
303+
filename_ts,
304+
meta_rfc3339,
305+
preview,
306+
Some("mock_provider"),
307+
None,
308+
)?;
309+
let rollout_path = rollout_path(codex_home.path(), filename_ts, &conversation_id);
310+
append_rollout_turn_context(&rollout_path, meta_rfc3339, "previous-model")?;
311+
312+
let mut mcp = McpProcess::new(codex_home.path()).await?;
313+
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
314+
315+
let resume_id = mcp
316+
.send_resume_conversation_request(ResumeConversationParams {
317+
path: Some(rollout_path.clone()),
318+
conversation_id: None,
319+
history: None,
320+
overrides: Some(NewConversationParams {
321+
model: Some("gpt-5.2-codex".to_string()),
322+
..Default::default()
323+
}),
324+
})
325+
.await?;
326+
timeout(
327+
DEFAULT_READ_TIMEOUT,
328+
mcp.read_stream_until_notification_message("sessionConfigured"),
329+
)
330+
.await??;
331+
let resume_resp: JSONRPCResponse = timeout(
332+
DEFAULT_READ_TIMEOUT,
333+
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
334+
)
335+
.await??;
336+
let ResumeConversationResponse {
337+
conversation_id, ..
338+
} = to_response::<ResumeConversationResponse>(resume_resp)?;
339+
340+
let add_listener_id = mcp
341+
.send_add_conversation_listener_request(AddConversationListenerParams {
342+
conversation_id,
343+
experimental_raw_events: false,
344+
})
345+
.await?;
346+
let add_listener_resp: JSONRPCResponse = timeout(
347+
DEFAULT_READ_TIMEOUT,
348+
mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)),
349+
)
350+
.await??;
351+
let AddConversationSubscriptionResponse { subscription_id: _ } =
352+
to_response::<_>(add_listener_resp)?;
353+
354+
send_message("hello after resume", conversation_id, &mut mcp).await?;
355+
send_message("second turn", conversation_id, &mut mcp).await?;
356+
357+
let requests = response_mock.requests();
358+
assert_eq!(requests.len(), 2, "expected two model requests");
359+
360+
let first_developer_texts = requests[0].message_input_texts("developer");
361+
let first_model_switch_count = first_developer_texts
362+
.iter()
363+
.filter(|text| text.contains("<model_switch>"))
364+
.count();
365+
assert!(
366+
first_model_switch_count >= 1,
367+
"expected model switch message on first post-resume turn, got {first_developer_texts:?}"
368+
);
369+
370+
let second_developer_texts = requests[1].message_input_texts("developer");
371+
let second_model_switch_count = second_developer_texts
372+
.iter()
373+
.filter(|text| text.contains("<model_switch>"))
374+
.count();
375+
assert_eq!(
376+
second_model_switch_count, 1,
377+
"did not expect duplicate model switch message on second post-resume turn, got {second_developer_texts:?}"
378+
);
379+
380+
Ok(())
381+
}
382+
266383
// ---------------------------------------------------------------------------
267384
// Helpers
268385
// ---------------------------------------------------------------------------
@@ -438,3 +555,28 @@ fn content_texts(content: &[ContentItem]) -> Vec<&str> {
438555
})
439556
.collect()
440557
}
558+
559+
fn append_rollout_turn_context(path: &Path, timestamp: &str, model: &str) -> std::io::Result<()> {
560+
let line = RolloutLine {
561+
timestamp: timestamp.to_string(),
562+
item: RolloutItem::TurnContext(TurnContextItem {
563+
cwd: PathBuf::from("/"),
564+
approval_policy: AskForApproval::Never,
565+
sandbox_policy: SandboxPolicy::DangerFullAccess,
566+
model: model.to_string(),
567+
personality: None,
568+
collaboration_mode: None,
569+
effort: None,
570+
summary: ReasoningSummary::Auto,
571+
user_instructions: None,
572+
developer_instructions: None,
573+
final_output_json_schema: None,
574+
truncation_policy: None,
575+
}),
576+
};
577+
let serialized = serde_json::to_string(&line).map_err(std::io::Error::other)?;
578+
std::fs::OpenOptions::new()
579+
.append(true)
580+
.open(path)?
581+
.write_all(format!("{serialized}\n").as_bytes())
582+
}

codex-rs/core/src/codex.rs

Lines changed: 52 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1176,32 +1176,26 @@ impl Session {
11761176
{
11771177
let mut state = self.state.lock().await;
11781178
state.initial_context_seeded = false;
1179+
state.pending_resume_previous_model = None;
11791180
}
11801181

11811182
// If resuming, warn when the last recorded model differs from the current one.
1182-
if let Some(prev) = rollout_items.iter().rev().find_map(|it| {
1183-
if let RolloutItem::TurnContext(ctx) = it {
1184-
Some(ctx.model.as_str())
1185-
} else {
1186-
None
1187-
}
1188-
}) {
1189-
let curr = turn_context.model_info.slug.as_str();
1190-
if prev != curr {
1191-
warn!(
1192-
"resuming session with different model: previous={prev}, current={curr}"
1193-
);
1194-
self.send_event(
1195-
&turn_context,
1196-
EventMsg::Warning(WarningEvent {
1197-
message: format!(
1198-
"This session was recorded with model `{prev}` but is resuming with `{curr}`. \
1183+
let curr = turn_context.model_info.slug.as_str();
1184+
if let Some(prev) = Self::last_model_name(&rollout_items, curr) {
1185+
warn!("resuming session with different model: previous={prev}, current={curr}");
1186+
self.send_event(
1187+
&turn_context,
1188+
EventMsg::Warning(WarningEvent {
1189+
message: format!(
1190+
"This session was recorded with model `{prev}` but is resuming with `{curr}`. \
11991191
Consider switching back to `{prev}` as it may affect Codex performance."
1200-
),
1201-
}),
1202-
)
1203-
.await;
1204-
}
1192+
),
1193+
}),
1194+
)
1195+
.await;
1196+
1197+
let mut state = self.state.lock().await;
1198+
state.pending_resume_previous_model = Some(prev.to_string());
12051199
}
12061200

12071201
// Always add response items to conversation history
@@ -1260,13 +1254,33 @@ impl Session {
12601254
}
12611255
}
12621256

1257+
fn last_model_name<'a>(rollout_items: &'a [RolloutItem], current: &str) -> Option<&'a str> {
1258+
let previous = rollout_items.iter().rev().find_map(|it| {
1259+
if let RolloutItem::TurnContext(ctx) = it {
1260+
Some(ctx.model.as_str())
1261+
} else {
1262+
None
1263+
}
1264+
})?;
1265+
if previous == current {
1266+
None
1267+
} else {
1268+
Some(previous)
1269+
}
1270+
}
1271+
12631272
fn last_token_info_from_rollout(rollout_items: &[RolloutItem]) -> Option<TokenUsageInfo> {
12641273
rollout_items.iter().rev().find_map(|item| match item {
12651274
RolloutItem::EventMsg(EventMsg::TokenCount(ev)) => ev.info.clone(),
12661275
_ => None,
12671276
})
12681277
}
12691278

1279+
async fn take_pending_resume_previous_model(&self) -> Option<String> {
1280+
let mut state = self.state.lock().await;
1281+
state.pending_resume_previous_model.take()
1282+
}
1283+
12701284
pub(crate) async fn update_settings(
12711285
&self,
12721286
updates: SessionSettingsUpdate,
@@ -1504,10 +1518,12 @@ impl Session {
15041518
fn build_model_instructions_update_item(
15051519
&self,
15061520
previous: Option<&Arc<TurnContext>>,
1521+
resumed_model: Option<&str>,
15071522
next: &TurnContext,
15081523
) -> Option<ResponseItem> {
1509-
let prev = previous?;
1510-
if prev.model_info.slug == next.model_info.slug {
1524+
let previous_model =
1525+
resumed_model.or_else(|| previous.map(|prev| prev.model_info.slug.as_str()))?;
1526+
if previous_model == next.model_info.slug {
15111527
return None;
15121528
}
15131529

@@ -1522,6 +1538,7 @@ impl Session {
15221538
fn build_settings_update_items(
15231539
&self,
15241540
previous_context: Option<&Arc<TurnContext>>,
1541+
resumed_model: Option<&str>,
15251542
current_context: &TurnContext,
15261543
) -> Vec<ResponseItem> {
15271544
let mut update_items = Vec::new();
@@ -1540,9 +1557,11 @@ impl Session {
15401557
{
15411558
update_items.push(collaboration_mode_item);
15421559
}
1543-
if let Some(model_instructions_item) =
1544-
self.build_model_instructions_update_item(previous_context, current_context)
1545-
{
1560+
if let Some(model_instructions_item) = self.build_model_instructions_update_item(
1561+
previous_context,
1562+
resumed_model,
1563+
current_context,
1564+
) {
15461565
update_items.push(model_instructions_item);
15471566
}
15481567
if let Some(personality_item) =
@@ -2819,8 +2838,12 @@ mod handlers {
28192838
// Attempt to inject input into current task
28202839
if let Err(items) = sess.inject_input(items).await {
28212840
sess.seed_initial_context_if_needed(&current_context).await;
2822-
let update_items =
2823-
sess.build_settings_update_items(previous_context.as_ref(), &current_context);
2841+
let resumed_model = sess.take_pending_resume_previous_model().await;
2842+
let update_items = sess.build_settings_update_items(
2843+
previous_context.as_ref(),
2844+
resumed_model.as_deref(),
2845+
&current_context,
2846+
);
28242847
if !update_items.is_empty() {
28252848
sess.record_conversation_items(&current_context, &update_items)
28262849
.await;

codex-rs/core/src/state/session.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@ pub(crate) struct SessionState {
2424
/// TODO(owen): This is a temporary solution to avoid updating a thread's updated_at
2525
/// timestamp when resuming a session. Remove this once SQLite is in place.
2626
pub(crate) initial_context_seeded: bool,
27+
/// Previous rollout model for one-shot model-switch handling on first turn after resume.
28+
pub(crate) pending_resume_previous_model: Option<String>,
2729
}
2830

2931
impl SessionState {
@@ -38,6 +40,7 @@ impl SessionState {
3840
dependency_env: HashMap::new(),
3941
mcp_dependency_prompted: HashSet::new(),
4042
initial_context_seeded: false,
43+
pending_resume_previous_model: None,
4144
}
4245
}
4346

0 commit comments

Comments
 (0)