repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/rollout/tests.rs
codex-rs/core/src/rollout/tests.rs
#![allow(clippy::unwrap_used, clippy::expect_used)] use std::fs::File; use std::fs::{self}; use std::io::Write; use std::path::Path; use tempfile::TempDir; use time::OffsetDateTime; use time::PrimitiveDateTime; use time::format_description::FormatItem; use time::macros::format_description; use uuid::Uuid; use crate::rollout::INTERACTIVE_SESSION_SOURCES; use crate::rollout::list::ConversationItem; use crate::rollout::list::ConversationsPage; use crate::rollout::list::Cursor; use crate::rollout::list::get_conversations; use anyhow::Result; use codex_protocol::ConversationId; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseItem; use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::RolloutItem; use codex_protocol::protocol::RolloutLine; use codex_protocol::protocol::SessionMeta; use codex_protocol::protocol::SessionMetaLine; use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::UserMessageEvent; const NO_SOURCE_FILTER: &[SessionSource] = &[]; const TEST_PROVIDER: &str = "test-provider"; fn provider_vec(providers: &[&str]) -> Vec<String> { providers .iter() .map(std::string::ToString::to_string) .collect() } fn write_session_file( root: &Path, ts_str: &str, uuid: Uuid, num_records: usize, source: Option<SessionSource>, ) -> std::io::Result<(OffsetDateTime, Uuid)> { write_session_file_with_provider( root, ts_str, uuid, num_records, source, Some("test-provider"), ) } fn write_session_file_with_provider( root: &Path, ts_str: &str, uuid: Uuid, num_records: usize, source: Option<SessionSource>, model_provider: Option<&str>, ) -> std::io::Result<(OffsetDateTime, Uuid)> { let format: &[FormatItem] = format_description!("[year]-[month]-[day]T[hour]-[minute]-[second]"); let dt = PrimitiveDateTime::parse(ts_str, format) .unwrap() .assume_utc(); let dir = root .join("sessions") .join(format!("{:04}", dt.year())) .join(format!("{:02}", u8::from(dt.month()))) .join(format!("{:02}", dt.day())); fs::create_dir_all(&dir)?; let filename = format!("rollout-{ts_str}-{uuid}.jsonl"); let file_path = dir.join(filename); let mut file = File::create(file_path)?; let mut payload = serde_json::json!({ "id": uuid, "timestamp": ts_str, "instructions": null, "cwd": ".", "originator": "test_originator", "cli_version": "test_version", }); if let Some(source) = source { payload["source"] = serde_json::to_value(source).unwrap(); } if let Some(provider) = model_provider { payload["model_provider"] = serde_json::Value::String(provider.to_string()); } let meta = serde_json::json!({ "timestamp": ts_str, "type": "session_meta", "payload": payload, }); writeln!(file, "{meta}")?; // Include at least one user message event to satisfy listing filters let user_event = serde_json::json!({ "timestamp": ts_str, "type": "event_msg", "payload": { "type": "user_message", "message": "Hello from user", "kind": "plain" } }); writeln!(file, "{user_event}")?; for i in 0..num_records { let rec = serde_json::json!({ "record_type": "response", "index": i }); writeln!(file, "{rec}")?; } Ok((dt, uuid)) } #[tokio::test] async fn test_list_conversations_latest_first() { let temp = TempDir::new().unwrap(); let home = temp.path(); // Fixed UUIDs for deterministic expectations let u1 = Uuid::from_u128(1); let u2 = Uuid::from_u128(2); let u3 = Uuid::from_u128(3); // Create three sessions across three days write_session_file( home, "2025-01-01T12-00-00", u1, 3, Some(SessionSource::VSCode), ) .unwrap(); write_session_file( home, "2025-01-02T12-00-00", u2, 3, Some(SessionSource::VSCode), ) .unwrap(); write_session_file( home, "2025-01-03T12-00-00", u3, 3, Some(SessionSource::VSCode), ) .unwrap(); let provider_filter = provider_vec(&[TEST_PROVIDER]); let page = get_conversations( home, 10, None, INTERACTIVE_SESSION_SOURCES, Some(provider_filter.as_slice()), TEST_PROVIDER, ) .await .unwrap(); // Build expected objects let p1 = home .join("sessions") .join("2025") .join("01") .join("03") .join(format!("rollout-2025-01-03T12-00-00-{u3}.jsonl")); let p2 = home .join("sessions") .join("2025") .join("01") .join("02") .join(format!("rollout-2025-01-02T12-00-00-{u2}.jsonl")); let p3 = home .join("sessions") .join("2025") .join("01") .join("01") .join(format!("rollout-2025-01-01T12-00-00-{u1}.jsonl")); let head_3 = vec![serde_json::json!({ "id": u3, "timestamp": "2025-01-03T12-00-00", "instructions": null, "cwd": ".", "originator": "test_originator", "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", })]; let head_2 = vec![serde_json::json!({ "id": u2, "timestamp": "2025-01-02T12-00-00", "instructions": null, "cwd": ".", "originator": "test_originator", "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", })]; let head_1 = vec![serde_json::json!({ "id": u1, "timestamp": "2025-01-01T12-00-00", "instructions": null, "cwd": ".", "originator": "test_originator", "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", })]; let updated_times: Vec<Option<String>> = page.items.iter().map(|i| i.updated_at.clone()).collect(); let expected = ConversationsPage { items: vec![ ConversationItem { path: p1, head: head_3, created_at: Some("2025-01-03T12-00-00".into()), updated_at: updated_times.first().cloned().flatten(), }, ConversationItem { path: p2, head: head_2, created_at: Some("2025-01-02T12-00-00".into()), updated_at: updated_times.get(1).cloned().flatten(), }, ConversationItem { path: p3, head: head_1, created_at: Some("2025-01-01T12-00-00".into()), updated_at: updated_times.get(2).cloned().flatten(), }, ], next_cursor: None, num_scanned_files: 3, reached_scan_cap: false, }; assert_eq!(page, expected); } #[tokio::test] async fn test_pagination_cursor() { let temp = TempDir::new().unwrap(); let home = temp.path(); // Fixed UUIDs for deterministic expectations let u1 = Uuid::from_u128(11); let u2 = Uuid::from_u128(22); let u3 = Uuid::from_u128(33); let u4 = Uuid::from_u128(44); let u5 = Uuid::from_u128(55); // Oldest to newest write_session_file( home, "2025-03-01T09-00-00", u1, 1, Some(SessionSource::VSCode), ) .unwrap(); write_session_file( home, "2025-03-02T09-00-00", u2, 1, Some(SessionSource::VSCode), ) .unwrap(); write_session_file( home, "2025-03-03T09-00-00", u3, 1, Some(SessionSource::VSCode), ) .unwrap(); write_session_file( home, "2025-03-04T09-00-00", u4, 1, Some(SessionSource::VSCode), ) .unwrap(); write_session_file( home, "2025-03-05T09-00-00", u5, 1, Some(SessionSource::VSCode), ) .unwrap(); let provider_filter = provider_vec(&[TEST_PROVIDER]); let page1 = get_conversations( home, 2, None, INTERACTIVE_SESSION_SOURCES, Some(provider_filter.as_slice()), TEST_PROVIDER, ) .await .unwrap(); let p5 = home .join("sessions") .join("2025") .join("03") .join("05") .join(format!("rollout-2025-03-05T09-00-00-{u5}.jsonl")); let p4 = home .join("sessions") .join("2025") .join("03") .join("04") .join(format!("rollout-2025-03-04T09-00-00-{u4}.jsonl")); let head_5 = vec![serde_json::json!({ "id": u5, "timestamp": "2025-03-05T09-00-00", "instructions": null, "cwd": ".", "originator": "test_originator", "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", })]; let head_4 = vec![serde_json::json!({ "id": u4, "timestamp": "2025-03-04T09-00-00", "instructions": null, "cwd": ".", "originator": "test_originator", "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", })]; let updated_page1: Vec<Option<String>> = page1.items.iter().map(|i| i.updated_at.clone()).collect(); let expected_cursor1: Cursor = serde_json::from_str(&format!("\"2025-03-04T09-00-00|{u4}\"")).unwrap(); let expected_page1 = ConversationsPage { items: vec![ ConversationItem { path: p5, head: head_5, created_at: Some("2025-03-05T09-00-00".into()), updated_at: updated_page1.first().cloned().flatten(), }, ConversationItem { path: p4, head: head_4, created_at: Some("2025-03-04T09-00-00".into()), updated_at: updated_page1.get(1).cloned().flatten(), }, ], next_cursor: Some(expected_cursor1.clone()), num_scanned_files: 3, // scanned 05, 04, and peeked at 03 before breaking reached_scan_cap: false, }; assert_eq!(page1, expected_page1); let page2 = get_conversations( home, 2, page1.next_cursor.as_ref(), INTERACTIVE_SESSION_SOURCES, Some(provider_filter.as_slice()), TEST_PROVIDER, ) .await .unwrap(); let p3 = home .join("sessions") .join("2025") .join("03") .join("03") .join(format!("rollout-2025-03-03T09-00-00-{u3}.jsonl")); let p2 = home .join("sessions") .join("2025") .join("03") .join("02") .join(format!("rollout-2025-03-02T09-00-00-{u2}.jsonl")); let head_3 = vec![serde_json::json!({ "id": u3, "timestamp": "2025-03-03T09-00-00", "instructions": null, "cwd": ".", "originator": "test_originator", "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", })]; let head_2 = vec![serde_json::json!({ "id": u2, "timestamp": "2025-03-02T09-00-00", "instructions": null, "cwd": ".", "originator": "test_originator", "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", })]; let updated_page2: Vec<Option<String>> = page2.items.iter().map(|i| i.updated_at.clone()).collect(); let expected_cursor2: Cursor = serde_json::from_str(&format!("\"2025-03-02T09-00-00|{u2}\"")).unwrap(); let expected_page2 = ConversationsPage { items: vec![ ConversationItem { path: p3, head: head_3, created_at: Some("2025-03-03T09-00-00".into()), updated_at: updated_page2.first().cloned().flatten(), }, ConversationItem { path: p2, head: head_2, created_at: Some("2025-03-02T09-00-00".into()), updated_at: updated_page2.get(1).cloned().flatten(), }, ], next_cursor: Some(expected_cursor2.clone()), num_scanned_files: 5, // scanned 05, 04 (anchor), 03, 02, and peeked at 01 reached_scan_cap: false, }; assert_eq!(page2, expected_page2); let page3 = get_conversations( home, 2, page2.next_cursor.as_ref(), INTERACTIVE_SESSION_SOURCES, Some(provider_filter.as_slice()), TEST_PROVIDER, ) .await .unwrap(); let p1 = home .join("sessions") .join("2025") .join("03") .join("01") .join(format!("rollout-2025-03-01T09-00-00-{u1}.jsonl")); let head_1 = vec![serde_json::json!({ "id": u1, "timestamp": "2025-03-01T09-00-00", "instructions": null, "cwd": ".", "originator": "test_originator", "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", })]; let updated_page3: Vec<Option<String>> = page3.items.iter().map(|i| i.updated_at.clone()).collect(); let expected_page3 = ConversationsPage { items: vec![ConversationItem { path: p1, head: head_1, created_at: Some("2025-03-01T09-00-00".into()), updated_at: updated_page3.first().cloned().flatten(), }], next_cursor: None, num_scanned_files: 5, // scanned 05, 04 (anchor), 03, 02 (anchor), 01 reached_scan_cap: false, }; assert_eq!(page3, expected_page3); } #[tokio::test] async fn test_get_conversation_contents() { let temp = TempDir::new().unwrap(); let home = temp.path(); let uuid = Uuid::new_v4(); let ts = "2025-04-01T10-30-00"; write_session_file(home, ts, uuid, 2, Some(SessionSource::VSCode)).unwrap(); let provider_filter = provider_vec(&[TEST_PROVIDER]); let page = get_conversations( home, 1, None, INTERACTIVE_SESSION_SOURCES, Some(provider_filter.as_slice()), TEST_PROVIDER, ) .await .unwrap(); let path = &page.items[0].path; let content = tokio::fs::read_to_string(path).await.unwrap(); // Page equality (single item) let expected_path = home .join("sessions") .join("2025") .join("04") .join("01") .join(format!("rollout-2025-04-01T10-30-00-{uuid}.jsonl")); let expected_head = vec![serde_json::json!({ "id": uuid, "timestamp": ts, "instructions": null, "cwd": ".", "originator": "test_originator", "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", })]; let expected_page = ConversationsPage { items: vec![ConversationItem { path: expected_path, head: expected_head, created_at: Some(ts.into()), updated_at: page.items[0].updated_at.clone(), }], next_cursor: None, num_scanned_files: 1, reached_scan_cap: false, }; assert_eq!(page, expected_page); // Entire file contents equality let meta = serde_json::json!({ "timestamp": ts, "type": "session_meta", "payload": { "id": uuid, "timestamp": ts, "instructions": null, "cwd": ".", "originator": "test_originator", "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", } }); let user_event = serde_json::json!({ "timestamp": ts, "type": "event_msg", "payload": {"type": "user_message", "message": "Hello from user", "kind": "plain"} }); let rec0 = serde_json::json!({"record_type": "response", "index": 0}); let rec1 = serde_json::json!({"record_type": "response", "index": 1}); let expected_content = format!("{meta}\n{user_event}\n{rec0}\n{rec1}\n"); assert_eq!(content, expected_content); } #[tokio::test] async fn test_updated_at_uses_file_mtime() -> Result<()> { let temp = TempDir::new().unwrap(); let home = temp.path(); let ts = "2025-06-01T08-00-00"; let uuid = Uuid::from_u128(42); let day_dir = home.join("sessions").join("2025").join("06").join("01"); fs::create_dir_all(&day_dir)?; let file_path = day_dir.join(format!("rollout-{ts}-{uuid}.jsonl")); let mut file = File::create(&file_path)?; let conversation_id = ConversationId::from_string(&uuid.to_string())?; let meta_line = RolloutLine { timestamp: ts.to_string(), item: RolloutItem::SessionMeta(SessionMetaLine { meta: SessionMeta { id: conversation_id, timestamp: ts.to_string(), instructions: None, cwd: ".".into(), originator: "test_originator".into(), cli_version: "test_version".into(), source: SessionSource::VSCode, model_provider: Some("test-provider".into()), }, git: None, }), }; writeln!(file, "{}", serde_json::to_string(&meta_line)?)?; let user_event_line = RolloutLine { timestamp: ts.to_string(), item: RolloutItem::EventMsg(EventMsg::UserMessage(UserMessageEvent { message: "hello".into(), images: None, })), }; writeln!(file, "{}", serde_json::to_string(&user_event_line)?)?; let total_messages = 12usize; for idx in 0..total_messages { let response_line = RolloutLine { timestamp: format!("{ts}-{idx:02}"), item: RolloutItem::ResponseItem(ResponseItem::Message { id: None, role: "assistant".into(), content: vec![ContentItem::OutputText { text: format!("reply-{idx}"), }], }), }; writeln!(file, "{}", serde_json::to_string(&response_line)?)?; } drop(file); let provider_filter = provider_vec(&[TEST_PROVIDER]); let page = get_conversations( home, 1, None, INTERACTIVE_SESSION_SOURCES, Some(provider_filter.as_slice()), TEST_PROVIDER, ) .await?; let item = page.items.first().expect("conversation item"); assert_eq!(item.created_at.as_deref(), Some(ts)); let updated = item .updated_at .as_deref() .and_then(|s| chrono::DateTime::parse_from_rfc3339(s).ok()) .map(|dt| dt.with_timezone(&chrono::Utc)) .expect("updated_at set from file mtime"); let now = chrono::Utc::now(); let age = now - updated; assert!(age.num_seconds().abs() < 30); Ok(()) } #[tokio::test] async fn test_stable_ordering_same_second_pagination() { let temp = TempDir::new().unwrap(); let home = temp.path(); let ts = "2025-07-01T00-00-00"; let u1 = Uuid::from_u128(1); let u2 = Uuid::from_u128(2); let u3 = Uuid::from_u128(3); write_session_file(home, ts, u1, 0, Some(SessionSource::VSCode)).unwrap(); write_session_file(home, ts, u2, 0, Some(SessionSource::VSCode)).unwrap(); write_session_file(home, ts, u3, 0, Some(SessionSource::VSCode)).unwrap(); let provider_filter = provider_vec(&[TEST_PROVIDER]); let page1 = get_conversations( home, 2, None, INTERACTIVE_SESSION_SOURCES, Some(provider_filter.as_slice()), TEST_PROVIDER, ) .await .unwrap(); let p3 = home .join("sessions") .join("2025") .join("07") .join("01") .join(format!("rollout-2025-07-01T00-00-00-{u3}.jsonl")); let p2 = home .join("sessions") .join("2025") .join("07") .join("01") .join(format!("rollout-2025-07-01T00-00-00-{u2}.jsonl")); let head = |u: Uuid| -> Vec<serde_json::Value> { vec![serde_json::json!({ "id": u, "timestamp": ts, "instructions": null, "cwd": ".", "originator": "test_originator", "cli_version": "test_version", "source": "vscode", "model_provider": "test-provider", })] }; let updated_page1: Vec<Option<String>> = page1.items.iter().map(|i| i.updated_at.clone()).collect(); let expected_cursor1: Cursor = serde_json::from_str(&format!("\"{ts}|{u2}\"")).unwrap(); let expected_page1 = ConversationsPage { items: vec![ ConversationItem { path: p3, head: head(u3), created_at: Some(ts.to_string()), updated_at: updated_page1.first().cloned().flatten(), }, ConversationItem { path: p2, head: head(u2), created_at: Some(ts.to_string()), updated_at: updated_page1.get(1).cloned().flatten(), }, ], next_cursor: Some(expected_cursor1.clone()), num_scanned_files: 3, // scanned u3, u2, peeked u1 reached_scan_cap: false, }; assert_eq!(page1, expected_page1); let page2 = get_conversations( home, 2, page1.next_cursor.as_ref(), INTERACTIVE_SESSION_SOURCES, Some(provider_filter.as_slice()), TEST_PROVIDER, ) .await .unwrap(); let p1 = home .join("sessions") .join("2025") .join("07") .join("01") .join(format!("rollout-2025-07-01T00-00-00-{u1}.jsonl")); let updated_page2: Vec<Option<String>> = page2.items.iter().map(|i| i.updated_at.clone()).collect(); let expected_page2 = ConversationsPage { items: vec![ConversationItem { path: p1, head: head(u1), created_at: Some(ts.to_string()), updated_at: updated_page2.first().cloned().flatten(), }], next_cursor: None, num_scanned_files: 3, // scanned u3, u2 (anchor), u1 reached_scan_cap: false, }; assert_eq!(page2, expected_page2); } #[tokio::test] async fn test_source_filter_excludes_non_matching_sessions() { let temp = TempDir::new().unwrap(); let home = temp.path(); let interactive_id = Uuid::from_u128(42); let non_interactive_id = Uuid::from_u128(77); write_session_file( home, "2025-08-02T10-00-00", interactive_id, 2, Some(SessionSource::Cli), ) .unwrap(); write_session_file( home, "2025-08-01T10-00-00", non_interactive_id, 2, Some(SessionSource::Exec), ) .unwrap(); let provider_filter = provider_vec(&[TEST_PROVIDER]); let interactive_only = get_conversations( home, 10, None, INTERACTIVE_SESSION_SOURCES, Some(provider_filter.as_slice()), TEST_PROVIDER, ) .await .unwrap(); let paths: Vec<_> = interactive_only .items .iter() .map(|item| item.path.as_path()) .collect(); assert_eq!(paths.len(), 1); assert!(paths.iter().all(|path| { path.ends_with("rollout-2025-08-02T10-00-00-00000000-0000-0000-0000-00000000002a.jsonl") })); let all_sessions = get_conversations(home, 10, None, NO_SOURCE_FILTER, None, TEST_PROVIDER) .await .unwrap(); let all_paths: Vec<_> = all_sessions .items .into_iter() .map(|item| item.path) .collect(); assert_eq!(all_paths.len(), 2); assert!(all_paths.iter().any(|path| { path.ends_with("rollout-2025-08-02T10-00-00-00000000-0000-0000-0000-00000000002a.jsonl") })); assert!(all_paths.iter().any(|path| { path.ends_with("rollout-2025-08-01T10-00-00-00000000-0000-0000-0000-00000000004d.jsonl") })); } #[tokio::test] async fn test_model_provider_filter_selects_only_matching_sessions() -> Result<()> { let temp = TempDir::new().unwrap(); let home = temp.path(); let openai_id = Uuid::from_u128(1); let beta_id = Uuid::from_u128(2); let none_id = Uuid::from_u128(3); write_session_file_with_provider( home, "2025-09-01T12-00-00", openai_id, 1, Some(SessionSource::VSCode), Some("openai"), )?; write_session_file_with_provider( home, "2025-09-01T11-00-00", beta_id, 1, Some(SessionSource::VSCode), Some("beta"), )?; write_session_file_with_provider( home, "2025-09-01T10-00-00", none_id, 1, Some(SessionSource::VSCode), None, )?; let openai_id_str = openai_id.to_string(); let none_id_str = none_id.to_string(); let openai_filter = provider_vec(&["openai"]); let openai_sessions = get_conversations( home, 10, None, NO_SOURCE_FILTER, Some(openai_filter.as_slice()), "openai", ) .await?; assert_eq!(openai_sessions.items.len(), 2); let openai_ids: Vec<_> = openai_sessions .items .iter() .filter_map(|item| { item.head .first() .and_then(|value| value.get("id")) .and_then(serde_json::Value::as_str) .map(str::to_string) }) .collect(); assert!(openai_ids.contains(&openai_id_str)); assert!(openai_ids.contains(&none_id_str)); let beta_filter = provider_vec(&["beta"]); let beta_sessions = get_conversations( home, 10, None, NO_SOURCE_FILTER, Some(beta_filter.as_slice()), "openai", ) .await?; assert_eq!(beta_sessions.items.len(), 1); let beta_id_str = beta_id.to_string(); let beta_head = beta_sessions .items .first() .and_then(|item| item.head.first()) .and_then(|value| value.get("id")) .and_then(serde_json::Value::as_str); assert_eq!(beta_head, Some(beta_id_str.as_str())); let unknown_filter = provider_vec(&["unknown"]); let unknown_sessions = get_conversations( home, 10, None, NO_SOURCE_FILTER, Some(unknown_filter.as_slice()), "openai", ) .await?; assert!(unknown_sessions.items.is_empty()); let all_sessions = get_conversations(home, 10, None, NO_SOURCE_FILTER, None, "openai").await?; assert_eq!(all_sessions.items.len(), 3); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/rollout/list.rs
codex-rs/core/src/rollout/list.rs
use std::cmp::Reverse; use std::io::{self}; use std::num::NonZero; use std::path::Path; use std::path::PathBuf; use std::sync::Arc; use std::sync::atomic::AtomicBool; use time::OffsetDateTime; use time::PrimitiveDateTime; use time::format_description::FormatItem; use time::format_description::well_known::Rfc3339; use time::macros::format_description; use uuid::Uuid; use super::SESSIONS_SUBDIR; use crate::protocol::EventMsg; use codex_file_search as file_search; use codex_protocol::protocol::RolloutItem; use codex_protocol::protocol::RolloutLine; use codex_protocol::protocol::SessionSource; /// Returned page of conversation summaries. #[derive(Debug, Default, PartialEq)] pub struct ConversationsPage { /// Conversation summaries ordered newest first. pub items: Vec<ConversationItem>, /// Opaque pagination token to resume after the last item, or `None` if end. pub next_cursor: Option<Cursor>, /// Total number of files touched while scanning this request. pub num_scanned_files: usize, /// True if a hard scan cap was hit; consider resuming with `next_cursor`. pub reached_scan_cap: bool, } /// Summary information for a conversation rollout file. #[derive(Debug, PartialEq)] pub struct ConversationItem { /// Absolute path to the rollout file. pub path: PathBuf, /// First up to `HEAD_RECORD_LIMIT` JSONL records parsed as JSON (includes meta line). pub head: Vec<serde_json::Value>, /// RFC3339 timestamp string for when the session was created, if available. pub created_at: Option<String>, /// RFC3339 timestamp string for the most recent update (from file mtime). pub updated_at: Option<String>, } #[derive(Default)] struct HeadTailSummary { head: Vec<serde_json::Value>, saw_session_meta: bool, saw_user_event: bool, source: Option<SessionSource>, model_provider: Option<String>, created_at: Option<String>, updated_at: Option<String>, } /// Hard cap to bound worst‑case work per request. const MAX_SCAN_FILES: usize = 10000; const HEAD_RECORD_LIMIT: usize = 10; /// Pagination cursor identifying a file by timestamp and UUID. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Cursor { ts: OffsetDateTime, id: Uuid, } impl Cursor { fn new(ts: OffsetDateTime, id: Uuid) -> Self { Self { ts, id } } } impl serde::Serialize for Cursor { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { let ts_str = self .ts .format(&format_description!( "[year]-[month]-[day]T[hour]-[minute]-[second]" )) .map_err(|e| serde::ser::Error::custom(format!("format error: {e}")))?; serializer.serialize_str(&format!("{ts_str}|{}", self.id)) } } impl<'de> serde::Deserialize<'de> for Cursor { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { let s = String::deserialize(deserializer)?; parse_cursor(&s).ok_or_else(|| serde::de::Error::custom("invalid cursor")) } } /// Retrieve recorded conversation file paths with token pagination. The returned `next_cursor` /// can be supplied on the next call to resume after the last returned item, resilient to /// concurrent new sessions being appended. Ordering is stable by timestamp desc, then UUID desc. pub(crate) async fn get_conversations( codex_home: &Path, page_size: usize, cursor: Option<&Cursor>, allowed_sources: &[SessionSource], model_providers: Option<&[String]>, default_provider: &str, ) -> io::Result<ConversationsPage> { let mut root = codex_home.to_path_buf(); root.push(SESSIONS_SUBDIR); if !root.exists() { return Ok(ConversationsPage { items: Vec::new(), next_cursor: None, num_scanned_files: 0, reached_scan_cap: false, }); } let anchor = cursor.cloned(); let provider_matcher = model_providers.and_then(|filters| ProviderMatcher::new(filters, default_provider)); let result = traverse_directories_for_paths( root.clone(), page_size, anchor, allowed_sources, provider_matcher.as_ref(), ) .await?; Ok(result) } /// Load conversation file paths from disk using directory traversal. /// /// Directory layout: `~/.codex/sessions/YYYY/MM/DD/rollout-YYYY-MM-DDThh-mm-ss-<uuid>.jsonl` /// Returned newest (latest) first. async fn traverse_directories_for_paths( root: PathBuf, page_size: usize, anchor: Option<Cursor>, allowed_sources: &[SessionSource], provider_matcher: Option<&ProviderMatcher<'_>>, ) -> io::Result<ConversationsPage> { let mut items: Vec<ConversationItem> = Vec::with_capacity(page_size); let mut scanned_files = 0usize; let mut anchor_passed = anchor.is_none(); let (anchor_ts, anchor_id) = match anchor { Some(c) => (c.ts, c.id), None => (OffsetDateTime::UNIX_EPOCH, Uuid::nil()), }; let mut more_matches_available = false; let year_dirs = collect_dirs_desc(&root, |s| s.parse::<u16>().ok()).await?; 'outer: for (_year, year_path) in year_dirs.iter() { if scanned_files >= MAX_SCAN_FILES { break; } let month_dirs = collect_dirs_desc(year_path, |s| s.parse::<u8>().ok()).await?; for (_month, month_path) in month_dirs.iter() { if scanned_files >= MAX_SCAN_FILES { break 'outer; } let day_dirs = collect_dirs_desc(month_path, |s| s.parse::<u8>().ok()).await?; for (_day, day_path) in day_dirs.iter() { if scanned_files >= MAX_SCAN_FILES { break 'outer; } let mut day_files = collect_files(day_path, |name_str, path| { if !name_str.starts_with("rollout-") || !name_str.ends_with(".jsonl") { return None; } parse_timestamp_uuid_from_filename(name_str) .map(|(ts, id)| (ts, id, name_str.to_string(), path.to_path_buf())) }) .await?; // Stable ordering within the same second: (timestamp desc, uuid desc) day_files.sort_by_key(|(ts, sid, _name_str, _path)| (Reverse(*ts), Reverse(*sid))); for (ts, sid, _name_str, path) in day_files.into_iter() { scanned_files += 1; if scanned_files >= MAX_SCAN_FILES && items.len() >= page_size { more_matches_available = true; break 'outer; } if !anchor_passed { if ts < anchor_ts || (ts == anchor_ts && sid < anchor_id) { anchor_passed = true; } else { continue; } } if items.len() == page_size { more_matches_available = true; break 'outer; } // Read head and detect message events; stop once meta + user are found. let summary = read_head_summary(&path, HEAD_RECORD_LIMIT) .await .unwrap_or_default(); if !allowed_sources.is_empty() && !summary .source .is_some_and(|source| allowed_sources.iter().any(|s| s == &source)) { continue; } if let Some(matcher) = provider_matcher && !matcher.matches(summary.model_provider.as_deref()) { continue; } // Apply filters: must have session meta and at least one user message event if summary.saw_session_meta && summary.saw_user_event { let HeadTailSummary { head, created_at, mut updated_at, .. } = summary; if updated_at.is_none() { updated_at = file_modified_rfc3339(&path) .await .unwrap_or(None) .or_else(|| created_at.clone()); } items.push(ConversationItem { path, head, created_at, updated_at, }); } } } } } let reached_scan_cap = scanned_files >= MAX_SCAN_FILES; if reached_scan_cap && !items.is_empty() { more_matches_available = true; } let next = if more_matches_available { build_next_cursor(&items) } else { None }; Ok(ConversationsPage { items, next_cursor: next, num_scanned_files: scanned_files, reached_scan_cap, }) } /// Pagination cursor token format: "<file_ts>|<uuid>" where `file_ts` matches the /// filename timestamp portion (YYYY-MM-DDThh-mm-ss) used in rollout filenames. /// The cursor orders files by timestamp desc, then UUID desc. pub fn parse_cursor(token: &str) -> Option<Cursor> { let (file_ts, uuid_str) = token.split_once('|')?; let Ok(uuid) = Uuid::parse_str(uuid_str) else { return None; }; let format: &[FormatItem] = format_description!("[year]-[month]-[day]T[hour]-[minute]-[second]"); let ts = PrimitiveDateTime::parse(file_ts, format).ok()?.assume_utc(); Some(Cursor::new(ts, uuid)) } fn build_next_cursor(items: &[ConversationItem]) -> Option<Cursor> { let last = items.last()?; let file_name = last.path.file_name()?.to_string_lossy(); let (ts, id) = parse_timestamp_uuid_from_filename(&file_name)?; Some(Cursor::new(ts, id)) } /// Collects immediate subdirectories of `parent`, parses their (string) names with `parse`, /// and returns them sorted descending by the parsed key. async fn collect_dirs_desc<T, F>(parent: &Path, parse: F) -> io::Result<Vec<(T, PathBuf)>> where T: Ord + Copy, F: Fn(&str) -> Option<T>, { let mut dir = tokio::fs::read_dir(parent).await?; let mut vec: Vec<(T, PathBuf)> = Vec::new(); while let Some(entry) = dir.next_entry().await? { if entry .file_type() .await .map(|ft| ft.is_dir()) .unwrap_or(false) && let Some(s) = entry.file_name().to_str() && let Some(v) = parse(s) { vec.push((v, entry.path())); } } vec.sort_by_key(|(v, _)| Reverse(*v)); Ok(vec) } /// Collects files in a directory and parses them with `parse`. async fn collect_files<T, F>(parent: &Path, parse: F) -> io::Result<Vec<T>> where F: Fn(&str, &Path) -> Option<T>, { let mut dir = tokio::fs::read_dir(parent).await?; let mut collected: Vec<T> = Vec::new(); while let Some(entry) = dir.next_entry().await? { if entry .file_type() .await .map(|ft| ft.is_file()) .unwrap_or(false) && let Some(s) = entry.file_name().to_str() && let Some(v) = parse(s, &entry.path()) { collected.push(v); } } Ok(collected) } fn parse_timestamp_uuid_from_filename(name: &str) -> Option<(OffsetDateTime, Uuid)> { // Expected: rollout-YYYY-MM-DDThh-mm-ss-<uuid>.jsonl let core = name.strip_prefix("rollout-")?.strip_suffix(".jsonl")?; // Scan from the right for a '-' such that the suffix parses as a UUID. let (sep_idx, uuid) = core .match_indices('-') .rev() .find_map(|(i, _)| Uuid::parse_str(&core[i + 1..]).ok().map(|u| (i, u)))?; let ts_str = &core[..sep_idx]; let format: &[FormatItem] = format_description!("[year]-[month]-[day]T[hour]-[minute]-[second]"); let ts = PrimitiveDateTime::parse(ts_str, format).ok()?.assume_utc(); Some((ts, uuid)) } struct ProviderMatcher<'a> { filters: &'a [String], matches_default_provider: bool, } impl<'a> ProviderMatcher<'a> { fn new(filters: &'a [String], default_provider: &'a str) -> Option<Self> { if filters.is_empty() { return None; } let matches_default_provider = filters.iter().any(|provider| provider == default_provider); Some(Self { filters, matches_default_provider, }) } fn matches(&self, session_provider: Option<&str>) -> bool { match session_provider { Some(provider) => self.filters.iter().any(|candidate| candidate == provider), None => self.matches_default_provider, } } } async fn read_head_summary(path: &Path, head_limit: usize) -> io::Result<HeadTailSummary> { use tokio::io::AsyncBufReadExt; let file = tokio::fs::File::open(path).await?; let reader = tokio::io::BufReader::new(file); let mut lines = reader.lines(); let mut summary = HeadTailSummary::default(); while summary.head.len() < head_limit { let line_opt = lines.next_line().await?; let Some(line) = line_opt else { break }; let trimmed = line.trim(); if trimmed.is_empty() { continue; } let parsed: Result<RolloutLine, _> = serde_json::from_str(trimmed); let Ok(rollout_line) = parsed else { continue }; match rollout_line.item { RolloutItem::SessionMeta(session_meta_line) => { summary.source = Some(session_meta_line.meta.source.clone()); summary.model_provider = session_meta_line.meta.model_provider.clone(); summary.created_at = summary .created_at .clone() .or_else(|| Some(rollout_line.timestamp.clone())); if let Ok(val) = serde_json::to_value(session_meta_line) { summary.head.push(val); summary.saw_session_meta = true; } } RolloutItem::ResponseItem(item) => { summary.created_at = summary .created_at .clone() .or_else(|| Some(rollout_line.timestamp.clone())); if let Ok(val) = serde_json::to_value(item) { summary.head.push(val); } } RolloutItem::TurnContext(_) => { // Not included in `head`; skip. } RolloutItem::Compacted(_) => { // Not included in `head`; skip. } RolloutItem::EventMsg(ev) => { if matches!(ev, EventMsg::UserMessage(_)) { summary.saw_user_event = true; } } } if summary.saw_session_meta && summary.saw_user_event { break; } } Ok(summary) } /// Read up to `HEAD_RECORD_LIMIT` records from the start of the rollout file at `path`. /// This should be enough to produce a summary including the session meta line. pub async fn read_head_for_summary(path: &Path) -> io::Result<Vec<serde_json::Value>> { let summary = read_head_summary(path, HEAD_RECORD_LIMIT).await?; Ok(summary.head) } async fn file_modified_rfc3339(path: &Path) -> io::Result<Option<String>> { let meta = tokio::fs::metadata(path).await?; let modified = meta.modified().ok(); let Some(modified) = modified else { return Ok(None); }; let dt = OffsetDateTime::from(modified); Ok(dt.format(&Rfc3339).ok()) } /// Locate a recorded conversation rollout file by its UUID string using the existing /// paginated listing implementation. Returns `Ok(Some(path))` if found, `Ok(None)` if not present /// or the id is invalid. pub async fn find_conversation_path_by_id_str( codex_home: &Path, id_str: &str, ) -> io::Result<Option<PathBuf>> { // Validate UUID format early. if Uuid::parse_str(id_str).is_err() { return Ok(None); } let mut root = codex_home.to_path_buf(); root.push(SESSIONS_SUBDIR); if !root.exists() { return Ok(None); } // This is safe because we know the values are valid. #[allow(clippy::unwrap_used)] let limit = NonZero::new(1).unwrap(); // This is safe because we know the values are valid. #[allow(clippy::unwrap_used)] let threads = NonZero::new(2).unwrap(); let cancel = Arc::new(AtomicBool::new(false)); let exclude: Vec<String> = Vec::new(); let compute_indices = false; let results = file_search::run( id_str, limit, &root, exclude, threads, cancel, compute_indices, false, ) .map_err(|e| io::Error::other(format!("file search failed: {e}")))?; Ok(results .matches .into_iter() .next() .map(|m| root.join(m.path))) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/rollout/policy.rs
codex-rs/core/src/rollout/policy.rs
use crate::protocol::EventMsg; use crate::protocol::RolloutItem; use codex_protocol::models::ResponseItem; /// Whether a rollout `item` should be persisted in rollout files. #[inline] pub(crate) fn is_persisted_response_item(item: &RolloutItem) -> bool { match item { RolloutItem::ResponseItem(item) => should_persist_response_item(item), RolloutItem::EventMsg(ev) => should_persist_event_msg(ev), // Persist Codex executive markers so we can analyze flows (e.g., compaction, API turns). RolloutItem::Compacted(_) | RolloutItem::TurnContext(_) | RolloutItem::SessionMeta(_) => { true } } } /// Whether a `ResponseItem` should be persisted in rollout files. #[inline] pub(crate) fn should_persist_response_item(item: &ResponseItem) -> bool { match item { ResponseItem::Message { .. } | ResponseItem::Reasoning { .. } | ResponseItem::LocalShellCall { .. } | ResponseItem::FunctionCall { .. } | ResponseItem::FunctionCallOutput { .. } | ResponseItem::CustomToolCall { .. } | ResponseItem::CustomToolCallOutput { .. } | ResponseItem::WebSearchCall { .. } | ResponseItem::GhostSnapshot { .. } | ResponseItem::Compaction { .. } => true, ResponseItem::Other => false, } } /// Whether an `EventMsg` should be persisted in rollout files. #[inline] pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool { match ev { EventMsg::UserMessage(_) | EventMsg::AgentMessage(_) | EventMsg::AgentReasoning(_) | EventMsg::AgentReasoningRawContent(_) | EventMsg::TokenCount(_) | EventMsg::ContextCompacted(_) | EventMsg::EnteredReviewMode(_) | EventMsg::ExitedReviewMode(_) | EventMsg::UndoCompleted(_) | EventMsg::TurnAborted(_) => true, EventMsg::Error(_) | EventMsg::Warning(_) | EventMsg::TaskStarted(_) | EventMsg::TaskComplete(_) | EventMsg::AgentMessageDelta(_) | EventMsg::AgentReasoningDelta(_) | EventMsg::AgentReasoningRawContentDelta(_) | EventMsg::AgentReasoningSectionBreak(_) | EventMsg::RawResponseItem(_) | EventMsg::SessionConfigured(_) | EventMsg::McpToolCallBegin(_) | EventMsg::McpToolCallEnd(_) | EventMsg::WebSearchBegin(_) | EventMsg::WebSearchEnd(_) | EventMsg::ExecCommandBegin(_) | EventMsg::TerminalInteraction(_) | EventMsg::ExecCommandOutputDelta(_) | EventMsg::ExecCommandEnd(_) | EventMsg::ExecApprovalRequest(_) | EventMsg::ElicitationRequest(_) | EventMsg::ApplyPatchApprovalRequest(_) | EventMsg::BackgroundEvent(_) | EventMsg::StreamError(_) | EventMsg::PatchApplyBegin(_) | EventMsg::PatchApplyEnd(_) | EventMsg::TurnDiff(_) | EventMsg::GetHistoryEntryResponse(_) | EventMsg::UndoStarted(_) | EventMsg::McpListToolsResponse(_) | EventMsg::McpStartupUpdate(_) | EventMsg::McpStartupComplete(_) | EventMsg::ListCustomPromptsResponse(_) | EventMsg::ListSkillsResponse(_) | EventMsg::PlanUpdate(_) | EventMsg::ShutdownComplete | EventMsg::ViewImageToolCall(_) | EventMsg::DeprecationNotice(_) | EventMsg::ItemStarted(_) | EventMsg::ItemCompleted(_) | EventMsg::AgentMessageContentDelta(_) | EventMsg::ReasoningContentDelta(_) | EventMsg::ReasoningRawContentDelta(_) | EventMsg::SkillsUpdateAvailable => false, } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/rollout/error.rs
codex-rs/core/src/rollout/error.rs
use std::io::ErrorKind; use std::path::Path; use crate::error::CodexErr; use crate::rollout::SESSIONS_SUBDIR; pub(crate) fn map_session_init_error(err: &anyhow::Error, codex_home: &Path) -> CodexErr { if let Some(mapped) = err .chain() .filter_map(|cause| cause.downcast_ref::<std::io::Error>()) .find_map(|io_err| map_rollout_io_error(io_err, codex_home)) { return mapped; } CodexErr::Fatal(format!("Failed to initialize session: {err:#}")) } fn map_rollout_io_error(io_err: &std::io::Error, codex_home: &Path) -> Option<CodexErr> { let sessions_dir = codex_home.join(SESSIONS_SUBDIR); let hint = match io_err.kind() { ErrorKind::PermissionDenied => format!( "Codex cannot access session files at {} (permission denied). If sessions were created using sudo, fix ownership: sudo chown -R $(whoami) {}", sessions_dir.display(), codex_home.display() ), ErrorKind::NotFound => format!( "Session storage missing at {}. Create the directory or choose a different Codex home.", sessions_dir.display() ), ErrorKind::AlreadyExists => format!( "Session storage path {} is blocked by an existing file. Remove or rename it so Codex can create sessions.", sessions_dir.display() ), ErrorKind::InvalidData | ErrorKind::InvalidInput => format!( "Session data under {} looks corrupt or unreadable. Clearing the sessions directory may help (this will remove saved conversations).", sessions_dir.display() ), ErrorKind::IsADirectory | ErrorKind::NotADirectory => format!( "Session storage path {} has an unexpected type. Ensure it is a directory Codex can use for session files.", sessions_dir.display() ), _ => return None, }; Some(CodexErr::Fatal(format!( "{hint} (underlying error: {io_err})" ))) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/rollout/recorder.rs
codex-rs/core/src/rollout/recorder.rs
//! Persist Codex session rollouts (.jsonl) so sessions can be replayed or inspected later. use std::fs::File; use std::fs::{self}; use std::io::Error as IoError; use std::path::Path; use std::path::PathBuf; use codex_protocol::ConversationId; use serde_json::Value; use time::OffsetDateTime; use time::format_description::FormatItem; use time::macros::format_description; use tokio::io::AsyncWriteExt; use tokio::sync::mpsc::Sender; use tokio::sync::mpsc::{self}; use tokio::sync::oneshot; use tracing::info; use tracing::warn; use super::SESSIONS_SUBDIR; use super::list::ConversationsPage; use super::list::Cursor; use super::list::get_conversations; use super::policy::is_persisted_response_item; use crate::config::Config; use crate::default_client::originator; use crate::git_info::collect_git_info; use codex_protocol::protocol::InitialHistory; use codex_protocol::protocol::ResumedHistory; use codex_protocol::protocol::RolloutItem; use codex_protocol::protocol::RolloutLine; use codex_protocol::protocol::SessionMeta; use codex_protocol::protocol::SessionMetaLine; use codex_protocol::protocol::SessionSource; /// Records all [`ResponseItem`]s for a session and flushes them to disk after /// every update. /// /// Rollouts are recorded as JSONL and can be inspected with tools such as: /// /// ```ignore /// $ jq -C . ~/.codex/sessions/rollout-2025-05-07T17-24-21-5973b6c0-94b8-487b-a530-2aeb6098ae0e.jsonl /// $ fx ~/.codex/sessions/rollout-2025-05-07T17-24-21-5973b6c0-94b8-487b-a530-2aeb6098ae0e.jsonl /// ``` #[derive(Clone)] pub struct RolloutRecorder { tx: Sender<RolloutCmd>, pub(crate) rollout_path: PathBuf, } #[derive(Clone)] pub enum RolloutRecorderParams { Create { conversation_id: ConversationId, instructions: Option<String>, source: SessionSource, }, Resume { path: PathBuf, }, } enum RolloutCmd { AddItems(Vec<RolloutItem>), /// Ensure all prior writes are processed; respond when flushed. Flush { ack: oneshot::Sender<()>, }, Shutdown { ack: oneshot::Sender<()>, }, } impl RolloutRecorderParams { pub fn new( conversation_id: ConversationId, instructions: Option<String>, source: SessionSource, ) -> Self { Self::Create { conversation_id, instructions, source, } } pub fn resume(path: PathBuf) -> Self { Self::Resume { path } } } impl RolloutRecorder { /// List conversations (rollout files) under the provided Codex home directory. pub async fn list_conversations( codex_home: &Path, page_size: usize, cursor: Option<&Cursor>, allowed_sources: &[SessionSource], model_providers: Option<&[String]>, default_provider: &str, ) -> std::io::Result<ConversationsPage> { get_conversations( codex_home, page_size, cursor, allowed_sources, model_providers, default_provider, ) .await } /// Attempt to create a new [`RolloutRecorder`]. If the sessions directory /// cannot be created or the rollout file cannot be opened we return the /// error so the caller can decide whether to disable persistence. pub async fn new(config: &Config, params: RolloutRecorderParams) -> std::io::Result<Self> { let (file, rollout_path, meta) = match params { RolloutRecorderParams::Create { conversation_id, instructions, source, } => { let LogFileInfo { file, path, conversation_id: session_id, timestamp, } = create_log_file(config, conversation_id)?; let timestamp_format: &[FormatItem] = format_description!( "[year]-[month]-[day]T[hour]:[minute]:[second].[subsecond digits:3]Z" ); let timestamp = timestamp .to_offset(time::UtcOffset::UTC) .format(timestamp_format) .map_err(|e| IoError::other(format!("failed to format timestamp: {e}")))?; ( tokio::fs::File::from_std(file), path, Some(SessionMeta { id: session_id, timestamp, cwd: config.cwd.clone(), originator: originator().value.clone(), cli_version: env!("CARGO_PKG_VERSION").to_string(), instructions, source, model_provider: Some(config.model_provider_id.clone()), }), ) } RolloutRecorderParams::Resume { path } => ( tokio::fs::OpenOptions::new() .append(true) .open(&path) .await?, path, None, ), }; // Clone the cwd for the spawned task to collect git info asynchronously let cwd = config.cwd.clone(); // A reasonably-sized bounded channel. If the buffer fills up the send // future will yield, which is fine – we only need to ensure we do not // perform *blocking* I/O on the caller's thread. let (tx, rx) = mpsc::channel::<RolloutCmd>(256); // Spawn a Tokio task that owns the file handle and performs async // writes. Using `tokio::fs::File` keeps everything on the async I/O // driver instead of blocking the runtime. tokio::task::spawn(rollout_writer(file, rx, meta, cwd)); Ok(Self { tx, rollout_path }) } pub(crate) async fn record_items(&self, items: &[RolloutItem]) -> std::io::Result<()> { let mut filtered = Vec::new(); for item in items { // Note that function calls may look a bit strange if they are // "fully qualified MCP tool calls," so we could consider // reformatting them in that case. if is_persisted_response_item(item) { filtered.push(item.clone()); } } if filtered.is_empty() { return Ok(()); } self.tx .send(RolloutCmd::AddItems(filtered)) .await .map_err(|e| IoError::other(format!("failed to queue rollout items: {e}"))) } /// Flush all queued writes and wait until they are committed by the writer task. pub async fn flush(&self) -> std::io::Result<()> { let (tx, rx) = oneshot::channel(); self.tx .send(RolloutCmd::Flush { ack: tx }) .await .map_err(|e| IoError::other(format!("failed to queue rollout flush: {e}")))?; rx.await .map_err(|e| IoError::other(format!("failed waiting for rollout flush: {e}"))) } pub async fn get_rollout_history(path: &Path) -> std::io::Result<InitialHistory> { info!("Resuming rollout from {path:?}"); let text = tokio::fs::read_to_string(path).await?; if text.trim().is_empty() { return Err(IoError::other("empty session file")); } let mut items: Vec<RolloutItem> = Vec::new(); let mut conversation_id: Option<ConversationId> = None; for line in text.lines() { if line.trim().is_empty() { continue; } let v: Value = match serde_json::from_str(line) { Ok(v) => v, Err(e) => { warn!("failed to parse line as JSON: {line:?}, error: {e}"); continue; } }; // Parse the rollout line structure match serde_json::from_value::<RolloutLine>(v.clone()) { Ok(rollout_line) => match rollout_line.item { RolloutItem::SessionMeta(session_meta_line) => { // Use the FIRST SessionMeta encountered in the file as the canonical // conversation id and main session information. Keep all items intact. if conversation_id.is_none() { conversation_id = Some(session_meta_line.meta.id); } items.push(RolloutItem::SessionMeta(session_meta_line)); } RolloutItem::ResponseItem(item) => { items.push(RolloutItem::ResponseItem(item)); } RolloutItem::Compacted(item) => { items.push(RolloutItem::Compacted(item)); } RolloutItem::TurnContext(item) => { items.push(RolloutItem::TurnContext(item)); } RolloutItem::EventMsg(_ev) => { items.push(RolloutItem::EventMsg(_ev)); } }, Err(e) => { warn!("failed to parse rollout line: {v:?}, error: {e}"); } } } info!( "Resumed rollout with {} items, conversation ID: {:?}", items.len(), conversation_id ); let conversation_id = conversation_id .ok_or_else(|| IoError::other("failed to parse conversation ID from rollout file"))?; if items.is_empty() { return Ok(InitialHistory::New); } info!("Resumed rollout successfully from {path:?}"); Ok(InitialHistory::Resumed(ResumedHistory { conversation_id, history: items, rollout_path: path.to_path_buf(), })) } pub async fn shutdown(&self) -> std::io::Result<()> { let (tx_done, rx_done) = oneshot::channel(); match self.tx.send(RolloutCmd::Shutdown { ack: tx_done }).await { Ok(_) => rx_done .await .map_err(|e| IoError::other(format!("failed waiting for rollout shutdown: {e}"))), Err(e) => { warn!("failed to send rollout shutdown command: {e}"); Err(IoError::other(format!( "failed to send rollout shutdown command: {e}" ))) } } } } struct LogFileInfo { /// Opened file handle to the rollout file. file: File, /// Full path to the rollout file. path: PathBuf, /// Session ID (also embedded in filename). conversation_id: ConversationId, /// Timestamp for the start of the session. timestamp: OffsetDateTime, } fn create_log_file( config: &Config, conversation_id: ConversationId, ) -> std::io::Result<LogFileInfo> { // Resolve ~/.codex/sessions/YYYY/MM/DD and create it if missing. let timestamp = OffsetDateTime::now_local() .map_err(|e| IoError::other(format!("failed to get local time: {e}")))?; let mut dir = config.codex_home.clone(); dir.push(SESSIONS_SUBDIR); dir.push(timestamp.year().to_string()); dir.push(format!("{:02}", u8::from(timestamp.month()))); dir.push(format!("{:02}", timestamp.day())); fs::create_dir_all(&dir)?; // Custom format for YYYY-MM-DDThh-mm-ss. Use `-` instead of `:` for // compatibility with filesystems that do not allow colons in filenames. let format: &[FormatItem] = format_description!("[year]-[month]-[day]T[hour]-[minute]-[second]"); let date_str = timestamp .format(format) .map_err(|e| IoError::other(format!("failed to format timestamp: {e}")))?; let filename = format!("rollout-{date_str}-{conversation_id}.jsonl"); let path = dir.join(filename); let file = std::fs::OpenOptions::new() .append(true) .create(true) .open(&path)?; Ok(LogFileInfo { file, path, conversation_id, timestamp, }) } async fn rollout_writer( file: tokio::fs::File, mut rx: mpsc::Receiver<RolloutCmd>, mut meta: Option<SessionMeta>, cwd: std::path::PathBuf, ) -> std::io::Result<()> { let mut writer = JsonlWriter { file }; // If we have a meta, collect git info asynchronously and write meta first if let Some(session_meta) = meta.take() { let git_info = collect_git_info(&cwd).await; let session_meta_line = SessionMetaLine { meta: session_meta, git: git_info, }; // Write the SessionMeta as the first item in the file, wrapped in a rollout line writer .write_rollout_item(RolloutItem::SessionMeta(session_meta_line)) .await?; } // Process rollout commands while let Some(cmd) = rx.recv().await { match cmd { RolloutCmd::AddItems(items) => { for item in items { if is_persisted_response_item(&item) { writer.write_rollout_item(item).await?; } } } RolloutCmd::Flush { ack } => { // Ensure underlying file is flushed and then ack. if let Err(e) = writer.file.flush().await { let _ = ack.send(()); return Err(e); } let _ = ack.send(()); } RolloutCmd::Shutdown { ack } => { let _ = ack.send(()); } } } Ok(()) } struct JsonlWriter { file: tokio::fs::File, } impl JsonlWriter { async fn write_rollout_item(&mut self, rollout_item: RolloutItem) -> std::io::Result<()> { let timestamp_format: &[FormatItem] = format_description!( "[year]-[month]-[day]T[hour]:[minute]:[second].[subsecond digits:3]Z" ); let timestamp = OffsetDateTime::now_utc() .format(timestamp_format) .map_err(|e| IoError::other(format!("failed to format timestamp: {e}")))?; let line = RolloutLine { timestamp, item: rollout_item, }; self.write_line(&line).await } async fn write_line(&mut self, item: &impl serde::Serialize) -> std::io::Result<()> { let mut json = serde_json::to_string(item)?; json.push('\n'); self.file.write_all(json.as_bytes()).await?; self.file.flush().await?; Ok(()) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/rollout/mod.rs
codex-rs/core/src/rollout/mod.rs
//! Rollout module: persistence and discovery of session rollout files. use codex_protocol::protocol::SessionSource; pub const SESSIONS_SUBDIR: &str = "sessions"; pub const ARCHIVED_SESSIONS_SUBDIR: &str = "archived_sessions"; pub const INTERACTIVE_SESSION_SOURCES: &[SessionSource] = &[SessionSource::Cli, SessionSource::VSCode]; pub(crate) mod error; pub mod list; pub(crate) mod policy; pub mod recorder; pub use codex_protocol::protocol::SessionMeta; pub(crate) use error::map_session_init_error; pub use list::find_conversation_path_by_id_str; pub use recorder::RolloutRecorder; pub use recorder::RolloutRecorderParams; #[cfg(test)] pub mod tests;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/features/legacy.rs
codex-rs/core/src/features/legacy.rs
use super::Feature; use super::Features; use tracing::info; #[derive(Clone, Copy)] struct Alias { legacy_key: &'static str, feature: Feature, } const ALIASES: &[Alias] = &[ Alias { legacy_key: "enable_experimental_windows_sandbox", feature: Feature::WindowsSandbox, }, Alias { legacy_key: "experimental_use_unified_exec_tool", feature: Feature::UnifiedExec, }, Alias { legacy_key: "experimental_use_freeform_apply_patch", feature: Feature::ApplyPatchFreeform, }, Alias { legacy_key: "include_apply_patch_tool", feature: Feature::ApplyPatchFreeform, }, Alias { legacy_key: "web_search", feature: Feature::WebSearchRequest, }, ]; pub(crate) fn feature_for_key(key: &str) -> Option<Feature> { ALIASES .iter() .find(|alias| alias.legacy_key == key) .map(|alias| { log_alias(alias.legacy_key, alias.feature); alias.feature }) } #[derive(Debug, Default)] pub struct LegacyFeatureToggles { pub include_apply_patch_tool: Option<bool>, pub experimental_use_freeform_apply_patch: Option<bool>, pub experimental_use_unified_exec_tool: Option<bool>, pub tools_web_search: Option<bool>, pub tools_view_image: Option<bool>, } impl LegacyFeatureToggles { pub fn apply(self, features: &mut Features) { set_if_some( features, Feature::ApplyPatchFreeform, self.include_apply_patch_tool, "include_apply_patch_tool", ); set_if_some( features, Feature::ApplyPatchFreeform, self.experimental_use_freeform_apply_patch, "experimental_use_freeform_apply_patch", ); set_if_some( features, Feature::UnifiedExec, self.experimental_use_unified_exec_tool, "experimental_use_unified_exec_tool", ); set_if_some( features, Feature::WebSearchRequest, self.tools_web_search, "tools.web_search", ); set_if_some( features, Feature::ViewImageTool, self.tools_view_image, "tools.view_image", ); } } fn set_if_some( features: &mut Features, feature: Feature, maybe_value: Option<bool>, alias_key: &'static str, ) { if let Some(enabled) = maybe_value { set_feature(features, feature, enabled); log_alias(alias_key, feature); features.record_legacy_usage(alias_key, feature); } } fn set_feature(features: &mut Features, feature: Feature, enabled: bool) { if enabled { features.enable(feature); } else { features.disable(feature); } } fn log_alias(alias: &str, feature: Feature) { let canonical = feature.key(); if alias == canonical { return; } info!( %alias, canonical, "legacy feature toggle detected; prefer `[features].{canonical}`" ); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/unified_exec/errors.rs
codex-rs/core/src/unified_exec/errors.rs
use crate::exec::ExecToolCallOutput; use thiserror::Error; #[derive(Debug, Error)] pub(crate) enum UnifiedExecError { #[error("Failed to create unified exec session: {message}")] CreateSession { message: String }, // Called "session" in the model's training. #[error("Unknown session id {process_id}")] UnknownSessionId { process_id: String }, #[error("failed to write to stdin")] WriteToStdin, #[error("missing command line for unified exec request")] MissingCommandLine, #[error("Command denied by sandbox: {message}")] SandboxDenied { message: String, output: ExecToolCallOutput, }, } impl UnifiedExecError { pub(crate) fn create_session(message: String) -> Self { Self::CreateSession { message } } pub(crate) fn sandbox_denied(message: String, output: ExecToolCallOutput) -> Self { Self::SandboxDenied { message, output } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/unified_exec/session_manager.rs
codex-rs/core/src/unified_exec/session_manager.rs
use rand::Rng; use std::cmp::Reverse; use std::collections::HashMap; use std::collections::HashSet; use std::path::PathBuf; use std::sync::Arc; use tokio::sync::Notify; use tokio::sync::mpsc; use tokio::time::Duration; use tokio::time::Instant; use tokio_util::sync::CancellationToken; use crate::bash::extract_bash_command; use crate::codex::Session; use crate::codex::TurnContext; use crate::exec_env::create_env; use crate::protocol::BackgroundEventEvent; use crate::protocol::EventMsg; use crate::sandboxing::ExecEnv; use crate::sandboxing::SandboxPermissions; use crate::tools::orchestrator::ToolOrchestrator; use crate::tools::runtimes::unified_exec::UnifiedExecRequest as UnifiedExecToolRequest; use crate::tools::runtimes::unified_exec::UnifiedExecRuntime; use crate::tools::sandboxing::ToolCtx; use crate::truncate::TruncationPolicy; use crate::truncate::approx_token_count; use crate::truncate::formatted_truncate_text; use super::CommandTranscript; use super::ExecCommandRequest; use super::MAX_UNIFIED_EXEC_SESSIONS; use super::SessionEntry; use super::SessionStore; use super::UnifiedExecContext; use super::UnifiedExecError; use super::UnifiedExecResponse; use super::UnifiedExecSessionManager; use super::WARNING_UNIFIED_EXEC_SESSIONS; use super::WriteStdinRequest; use super::async_watcher::emit_exec_end_for_unified_exec; use super::async_watcher::spawn_exit_watcher; use super::async_watcher::start_streaming_output; use super::clamp_yield_time; use super::generate_chunk_id; use super::resolve_max_tokens; use super::session::OutputBuffer; use super::session::OutputHandles; use super::session::UnifiedExecSession; const UNIFIED_EXEC_ENV: [(&str, &str); 8] = [ ("NO_COLOR", "1"), ("TERM", "dumb"), ("LANG", "C.UTF-8"), ("LC_CTYPE", "C.UTF-8"), ("LC_ALL", "C.UTF-8"), ("COLORTERM", ""), ("PAGER", "cat"), ("GIT_PAGER", "cat"), ]; fn apply_unified_exec_env(mut env: HashMap<String, String>) -> HashMap<String, String> { for (key, value) in UNIFIED_EXEC_ENV { env.insert(key.to_string(), value.to_string()); } env } struct PreparedSessionHandles { writer_tx: mpsc::Sender<Vec<u8>>, output_buffer: OutputBuffer, output_notify: Arc<Notify>, cancellation_token: CancellationToken, session_ref: Arc<Session>, turn_ref: Arc<TurnContext>, command: Vec<String>, process_id: String, } impl UnifiedExecSessionManager { pub(crate) async fn allocate_process_id(&self) -> String { loop { let mut store = self.session_store.lock().await; let process_id = if !cfg!(test) && !cfg!(feature = "deterministic_process_ids") { // production mode → random rand::rng().random_range(1_000..100_000).to_string() } else { // test or deterministic mode let next = store .reserved_sessions_id .iter() .filter_map(|s| s.parse::<i32>().ok()) .max() .map(|m| std::cmp::max(m, 999) + 1) .unwrap_or(1000); next.to_string() }; if store.reserved_sessions_id.contains(&process_id) { continue; } store.reserved_sessions_id.insert(process_id.clone()); return process_id; } } pub(crate) async fn release_process_id(&self, process_id: &str) { let mut store = self.session_store.lock().await; store.remove(process_id); } pub(crate) async fn exec_command( &self, request: ExecCommandRequest, context: &UnifiedExecContext, ) -> Result<UnifiedExecResponse, UnifiedExecError> { let cwd = request .workdir .clone() .unwrap_or_else(|| context.turn.cwd.clone()); let session = self .open_session_with_sandbox( &request.command, cwd.clone(), request.sandbox_permissions, request.justification, context, ) .await; let session = match session { Ok(session) => Arc::new(session), Err(err) => { self.release_process_id(&request.process_id).await; return Err(err); } }; let transcript = Arc::new(tokio::sync::Mutex::new(CommandTranscript::default())); start_streaming_output(&session, context, Arc::clone(&transcript)); let max_tokens = resolve_max_tokens(request.max_output_tokens); let yield_time_ms = clamp_yield_time(request.yield_time_ms); let start = Instant::now(); // For the initial exec_command call, we both stream output to events // (via start_streaming_output above) and collect a snapshot here for // the tool response body. let OutputHandles { output_buffer, output_notify, cancellation_token, } = session.output_handles(); let deadline = start + Duration::from_millis(yield_time_ms); let collected = Self::collect_output_until_deadline( &output_buffer, &output_notify, &cancellation_token, deadline, ) .await; let wall_time = Instant::now().saturating_duration_since(start); let text = String::from_utf8_lossy(&collected).to_string(); let output = formatted_truncate_text(&text, TruncationPolicy::Tokens(max_tokens)); let exit_code = session.exit_code(); let has_exited = session.has_exited() || exit_code.is_some(); let chunk_id = generate_chunk_id(); let process_id = request.process_id.clone(); if has_exited { // Short‑lived command: emit ExecCommandEnd immediately using the // same helper as the background watcher, so all end events share // one implementation. let exit = exit_code.unwrap_or(-1); emit_exec_end_for_unified_exec( Arc::clone(&context.session), Arc::clone(&context.turn), context.call_id.clone(), request.command.clone(), cwd, Some(process_id), Arc::clone(&transcript), output.clone(), exit, wall_time, ) .await; self.release_process_id(&request.process_id).await; session.check_for_sandbox_denial_with_text(&text).await?; } else { // Long‑lived command: persist the session so write_stdin can reuse // it, and register a background watcher that will emit // ExecCommandEnd when the PTY eventually exits (even if no further // tool calls are made). self.store_session( Arc::clone(&session), context, &request.command, cwd.clone(), start, process_id, Arc::clone(&transcript), ) .await; Self::emit_waiting_status(&context.session, &context.turn, &request.command).await; }; let original_token_count = approx_token_count(&text); let response = UnifiedExecResponse { event_call_id: context.call_id.clone(), chunk_id, wall_time, output, raw_output: collected, process_id: if has_exited { None } else { Some(request.process_id.clone()) }, exit_code, original_token_count: Some(original_token_count), session_command: Some(request.command.clone()), }; Ok(response) } pub(crate) async fn write_stdin( &self, request: WriteStdinRequest<'_>, ) -> Result<UnifiedExecResponse, UnifiedExecError> { let process_id = request.process_id.to_string(); let PreparedSessionHandles { writer_tx, output_buffer, output_notify, cancellation_token, session_ref, turn_ref, command: session_command, process_id, .. } = self.prepare_session_handles(process_id.as_str()).await?; if !request.input.is_empty() { Self::send_input(&writer_tx, request.input.as_bytes()).await?; // Give the remote process a brief window to react so that we are // more likely to capture its output in the poll below. tokio::time::sleep(Duration::from_millis(100)).await; } let max_tokens = resolve_max_tokens(request.max_output_tokens); let yield_time_ms = clamp_yield_time(request.yield_time_ms); let start = Instant::now(); let deadline = start + Duration::from_millis(yield_time_ms); let collected = Self::collect_output_until_deadline( &output_buffer, &output_notify, &cancellation_token, deadline, ) .await; let wall_time = Instant::now().saturating_duration_since(start); let text = String::from_utf8_lossy(&collected).to_string(); let output = formatted_truncate_text(&text, TruncationPolicy::Tokens(max_tokens)); let original_token_count = approx_token_count(&text); let chunk_id = generate_chunk_id(); // After polling, refresh_session_state tells us whether the PTY is // still alive or has exited and been removed from the store; we thread // that through so the handler can tag TerminalInteraction with an // appropriate process_id and exit_code. let status = self.refresh_session_state(process_id.as_str()).await; let (process_id, exit_code, event_call_id) = match status { SessionStatus::Alive { exit_code, call_id, process_id, } => (Some(process_id), exit_code, call_id), SessionStatus::Exited { exit_code, entry } => { let call_id = entry.call_id.clone(); (None, exit_code, call_id) } SessionStatus::Unknown => { return Err(UnifiedExecError::UnknownSessionId { process_id: request.process_id.to_string(), }); } }; let response = UnifiedExecResponse { event_call_id, chunk_id, wall_time, output, raw_output: collected, process_id, exit_code, original_token_count: Some(original_token_count), session_command: Some(session_command.clone()), }; if response.process_id.is_some() { Self::emit_waiting_status(&session_ref, &turn_ref, &session_command).await; } Ok(response) } async fn refresh_session_state(&self, process_id: &str) -> SessionStatus { let mut store = self.session_store.lock().await; let Some(entry) = store.sessions.get(process_id) else { return SessionStatus::Unknown; }; let exit_code = entry.session.exit_code(); let process_id = entry.process_id.clone(); if entry.session.has_exited() { let Some(entry) = store.remove(&process_id) else { return SessionStatus::Unknown; }; SessionStatus::Exited { exit_code, entry: Box::new(entry), } } else { SessionStatus::Alive { exit_code, call_id: entry.call_id.clone(), process_id, } } } async fn prepare_session_handles( &self, process_id: &str, ) -> Result<PreparedSessionHandles, UnifiedExecError> { let mut store = self.session_store.lock().await; let entry = store .sessions .get_mut(process_id) .ok_or(UnifiedExecError::UnknownSessionId { process_id: process_id.to_string(), })?; entry.last_used = Instant::now(); let OutputHandles { output_buffer, output_notify, cancellation_token, } = entry.session.output_handles(); Ok(PreparedSessionHandles { writer_tx: entry.session.writer_sender(), output_buffer, output_notify, cancellation_token, session_ref: Arc::clone(&entry.session_ref), turn_ref: Arc::clone(&entry.turn_ref), command: entry.command.clone(), process_id: entry.process_id.clone(), }) } async fn send_input( writer_tx: &mpsc::Sender<Vec<u8>>, data: &[u8], ) -> Result<(), UnifiedExecError> { writer_tx .send(data.to_vec()) .await .map_err(|_| UnifiedExecError::WriteToStdin) } #[allow(clippy::too_many_arguments)] async fn store_session( &self, session: Arc<UnifiedExecSession>, context: &UnifiedExecContext, command: &[String], cwd: PathBuf, started_at: Instant, process_id: String, transcript: Arc<tokio::sync::Mutex<CommandTranscript>>, ) { let entry = SessionEntry { session: Arc::clone(&session), session_ref: Arc::clone(&context.session), turn_ref: Arc::clone(&context.turn), call_id: context.call_id.clone(), process_id: process_id.clone(), command: command.to_vec(), last_used: started_at, }; let number_sessions = { let mut store = self.session_store.lock().await; Self::prune_sessions_if_needed(&mut store); store.sessions.insert(process_id.clone(), entry); store.sessions.len() }; if number_sessions >= WARNING_UNIFIED_EXEC_SESSIONS { context .session .record_model_warning( format!("The maximum number of unified exec sessions you can keep open is {WARNING_UNIFIED_EXEC_SESSIONS} and you currently have {number_sessions} sessions open. Reuse older sessions or close them to prevent automatic pruning of old session"), &context.turn ) .await; }; spawn_exit_watcher( Arc::clone(&session), Arc::clone(&context.session), Arc::clone(&context.turn), context.call_id.clone(), command.to_vec(), cwd, process_id, transcript, started_at, ); } async fn emit_waiting_status( session: &Arc<Session>, turn: &Arc<TurnContext>, command: &[String], ) { let command_display = if let Some((_, script)) = extract_bash_command(command) { script.to_string() } else { command.join(" ") }; let message = format!("Waiting for `{command_display}`"); session .send_event( turn.as_ref(), EventMsg::BackgroundEvent(BackgroundEventEvent { message }), ) .await; } pub(crate) async fn open_session_with_exec_env( &self, env: &ExecEnv, ) -> Result<UnifiedExecSession, UnifiedExecError> { let (program, args) = env .command .split_first() .ok_or(UnifiedExecError::MissingCommandLine)?; let spawned = codex_utils_pty::spawn_pty_process( program, args, env.cwd.as_path(), &env.env, &env.arg0, ) .await .map_err(|err| UnifiedExecError::create_session(err.to_string()))?; UnifiedExecSession::from_spawned(spawned, env.sandbox).await } pub(super) async fn open_session_with_sandbox( &self, command: &[String], cwd: PathBuf, sandbox_permissions: SandboxPermissions, justification: Option<String>, context: &UnifiedExecContext, ) -> Result<UnifiedExecSession, UnifiedExecError> { let env = apply_unified_exec_env(create_env(&context.turn.shell_environment_policy)); let features = context.session.features(); let mut orchestrator = ToolOrchestrator::new(); let mut runtime = UnifiedExecRuntime::new(self); let exec_approval_requirement = context .session .services .exec_policy .create_exec_approval_requirement_for_command( &features, command, context.turn.approval_policy, &context.turn.sandbox_policy, sandbox_permissions, ) .await; let req = UnifiedExecToolRequest::new( command.to_vec(), cwd, env, sandbox_permissions, justification, exec_approval_requirement, ); let tool_ctx = ToolCtx { session: context.session.as_ref(), turn: context.turn.as_ref(), call_id: context.call_id.clone(), tool_name: "exec_command".to_string(), }; orchestrator .run( &mut runtime, &req, &tool_ctx, context.turn.as_ref(), context.turn.approval_policy, ) .await .map_err(|e| UnifiedExecError::create_session(format!("{e:?}"))) } pub(super) async fn collect_output_until_deadline( output_buffer: &OutputBuffer, output_notify: &Arc<Notify>, cancellation_token: &CancellationToken, deadline: Instant, ) -> Vec<u8> { const POST_EXIT_OUTPUT_GRACE: Duration = Duration::from_millis(50); let mut collected: Vec<u8> = Vec::with_capacity(4096); let mut exit_signal_received = cancellation_token.is_cancelled(); loop { let drained_chunks; let mut wait_for_output = None; { let mut guard = output_buffer.lock().await; drained_chunks = guard.drain(); if drained_chunks.is_empty() { wait_for_output = Some(output_notify.notified()); } } if drained_chunks.is_empty() { exit_signal_received |= cancellation_token.is_cancelled(); let remaining = deadline.saturating_duration_since(Instant::now()); if remaining == Duration::ZERO { break; } let notified = wait_for_output.unwrap_or_else(|| output_notify.notified()); if exit_signal_received { let grace = remaining.min(POST_EXIT_OUTPUT_GRACE); if tokio::time::timeout(grace, notified).await.is_err() { break; } continue; } tokio::pin!(notified); let exit_notified = cancellation_token.cancelled(); tokio::pin!(exit_notified); tokio::select! { _ = &mut notified => {} _ = &mut exit_notified => exit_signal_received = true, _ = tokio::time::sleep(remaining) => break, } continue; } for chunk in drained_chunks { collected.extend_from_slice(&chunk); } exit_signal_received |= cancellation_token.is_cancelled(); if Instant::now() >= deadline { break; } } collected } fn prune_sessions_if_needed(store: &mut SessionStore) -> bool { if store.sessions.len() < MAX_UNIFIED_EXEC_SESSIONS { return false; } let meta: Vec<(String, Instant, bool)> = store .sessions .iter() .map(|(id, entry)| (id.clone(), entry.last_used, entry.session.has_exited())) .collect(); if let Some(session_id) = Self::session_id_to_prune_from_meta(&meta) { if let Some(entry) = store.remove(&session_id) { entry.session.terminate(); } return true; } false } // Centralized pruning policy so we can easily swap strategies later. fn session_id_to_prune_from_meta(meta: &[(String, Instant, bool)]) -> Option<String> { if meta.is_empty() { return None; } let mut by_recency = meta.to_vec(); by_recency.sort_by_key(|(_, last_used, _)| Reverse(*last_used)); let protected: HashSet<String> = by_recency .iter() .take(8) .map(|(process_id, _, _)| process_id.clone()) .collect(); let mut lru = meta.to_vec(); lru.sort_by_key(|(_, last_used, _)| *last_used); if let Some((process_id, _, _)) = lru .iter() .find(|(process_id, _, exited)| !protected.contains(process_id) && *exited) { return Some(process_id.clone()); } lru.into_iter() .find(|(process_id, _, _)| !protected.contains(process_id)) .map(|(process_id, _, _)| process_id) } pub(crate) async fn terminate_all_sessions(&self) { let entries: Vec<SessionEntry> = { let mut sessions = self.session_store.lock().await; let entries: Vec<SessionEntry> = sessions.sessions.drain().map(|(_, entry)| entry).collect(); sessions.reserved_sessions_id.clear(); entries }; for entry in entries { entry.session.terminate(); } } } enum SessionStatus { Alive { exit_code: Option<i32>, call_id: String, process_id: String, }, Exited { exit_code: Option<i32>, entry: Box<SessionEntry>, }, Unknown, } #[cfg(test)] mod tests { use super::*; use pretty_assertions::assert_eq; use tokio::time::Duration; use tokio::time::Instant; #[test] fn unified_exec_env_injects_defaults() { let env = apply_unified_exec_env(HashMap::new()); let expected = HashMap::from([ ("NO_COLOR".to_string(), "1".to_string()), ("TERM".to_string(), "dumb".to_string()), ("LANG".to_string(), "C.UTF-8".to_string()), ("LC_CTYPE".to_string(), "C.UTF-8".to_string()), ("LC_ALL".to_string(), "C.UTF-8".to_string()), ("COLORTERM".to_string(), String::new()), ("PAGER".to_string(), "cat".to_string()), ("GIT_PAGER".to_string(), "cat".to_string()), ]); assert_eq!(env, expected); } #[test] fn unified_exec_env_overrides_existing_values() { let mut base = HashMap::new(); base.insert("NO_COLOR".to_string(), "0".to_string()); base.insert("PATH".to_string(), "/usr/bin".to_string()); let env = apply_unified_exec_env(base); assert_eq!(env.get("NO_COLOR"), Some(&"1".to_string())); assert_eq!(env.get("PATH"), Some(&"/usr/bin".to_string())); } #[test] fn pruning_prefers_exited_sessions_outside_recently_used() { let now = Instant::now(); let id = |n: i32| n.to_string(); let meta = vec![ (id(1), now - Duration::from_secs(40), false), (id(2), now - Duration::from_secs(30), true), (id(3), now - Duration::from_secs(20), false), (id(4), now - Duration::from_secs(19), false), (id(5), now - Duration::from_secs(18), false), (id(6), now - Duration::from_secs(17), false), (id(7), now - Duration::from_secs(16), false), (id(8), now - Duration::from_secs(15), false), (id(9), now - Duration::from_secs(14), false), (id(10), now - Duration::from_secs(13), false), ]; let candidate = UnifiedExecSessionManager::session_id_to_prune_from_meta(&meta); assert_eq!(candidate, Some(id(2))); } #[test] fn pruning_falls_back_to_lru_when_no_exited() { let now = Instant::now(); let id = |n: i32| n.to_string(); let meta = vec![ (id(1), now - Duration::from_secs(40), false), (id(2), now - Duration::from_secs(30), false), (id(3), now - Duration::from_secs(20), false), (id(4), now - Duration::from_secs(19), false), (id(5), now - Duration::from_secs(18), false), (id(6), now - Duration::from_secs(17), false), (id(7), now - Duration::from_secs(16), false), (id(8), now - Duration::from_secs(15), false), (id(9), now - Duration::from_secs(14), false), (id(10), now - Duration::from_secs(13), false), ]; let candidate = UnifiedExecSessionManager::session_id_to_prune_from_meta(&meta); assert_eq!(candidate, Some(id(1))); } #[test] fn pruning_protects_recent_sessions_even_if_exited() { let now = Instant::now(); let id = |n: i32| n.to_string(); let meta = vec![ (id(1), now - Duration::from_secs(40), false), (id(2), now - Duration::from_secs(30), false), (id(3), now - Duration::from_secs(20), true), (id(4), now - Duration::from_secs(19), false), (id(5), now - Duration::from_secs(18), false), (id(6), now - Duration::from_secs(17), false), (id(7), now - Duration::from_secs(16), false), (id(8), now - Duration::from_secs(15), false), (id(9), now - Duration::from_secs(14), false), (id(10), now - Duration::from_secs(13), true), ]; let candidate = UnifiedExecSessionManager::session_id_to_prune_from_meta(&meta); // (10) is exited but among the last 8; we should drop the LRU outside that set. assert_eq!(candidate, Some(id(1))); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/unified_exec/session.rs
codex-rs/core/src/unified_exec/session.rs
#![allow(clippy::module_inception)] use std::collections::VecDeque; use std::sync::Arc; use tokio::sync::Mutex; use tokio::sync::Notify; use tokio::sync::mpsc; use tokio::sync::oneshot::error::TryRecvError; use tokio::task::JoinHandle; use tokio::time::Duration; use tokio_util::sync::CancellationToken; use crate::exec::ExecToolCallOutput; use crate::exec::SandboxType; use crate::exec::StreamOutput; use crate::exec::is_likely_sandbox_denied; use crate::truncate::TruncationPolicy; use crate::truncate::formatted_truncate_text; use codex_utils_pty::ExecCommandSession; use codex_utils_pty::SpawnedPty; use super::UNIFIED_EXEC_OUTPUT_MAX_BYTES; use super::UNIFIED_EXEC_OUTPUT_MAX_TOKENS; use super::UnifiedExecError; #[derive(Debug, Default)] pub(crate) struct OutputBufferState { chunks: VecDeque<Vec<u8>>, pub(crate) total_bytes: usize, } impl OutputBufferState { pub(super) fn push_chunk(&mut self, chunk: Vec<u8>) { self.total_bytes = self.total_bytes.saturating_add(chunk.len()); self.chunks.push_back(chunk); let mut excess = self .total_bytes .saturating_sub(UNIFIED_EXEC_OUTPUT_MAX_BYTES); while excess > 0 { match self.chunks.front_mut() { Some(front) if excess >= front.len() => { excess -= front.len(); self.total_bytes = self.total_bytes.saturating_sub(front.len()); self.chunks.pop_front(); } Some(front) => { front.drain(..excess); self.total_bytes = self.total_bytes.saturating_sub(excess); break; } None => break, } } } pub(super) fn drain(&mut self) -> Vec<Vec<u8>> { let drained: Vec<Vec<u8>> = self.chunks.drain(..).collect(); self.total_bytes = 0; drained } pub(super) fn snapshot(&self) -> Vec<Vec<u8>> { self.chunks.iter().cloned().collect() } } pub(crate) type OutputBuffer = Arc<Mutex<OutputBufferState>>; pub(crate) struct OutputHandles { pub(crate) output_buffer: OutputBuffer, pub(crate) output_notify: Arc<Notify>, pub(crate) cancellation_token: CancellationToken, } #[derive(Debug)] pub(crate) struct UnifiedExecSession { session: ExecCommandSession, output_buffer: OutputBuffer, output_notify: Arc<Notify>, cancellation_token: CancellationToken, output_drained: Arc<Notify>, output_task: JoinHandle<()>, sandbox_type: SandboxType, } impl UnifiedExecSession { pub(super) fn new( session: ExecCommandSession, initial_output_rx: tokio::sync::broadcast::Receiver<Vec<u8>>, sandbox_type: SandboxType, ) -> Self { let output_buffer = Arc::new(Mutex::new(OutputBufferState::default())); let output_notify = Arc::new(Notify::new()); let cancellation_token = CancellationToken::new(); let output_drained = Arc::new(Notify::new()); let mut receiver = initial_output_rx; let buffer_clone = Arc::clone(&output_buffer); let notify_clone = Arc::clone(&output_notify); let output_task = tokio::spawn(async move { loop { match receiver.recv().await { Ok(chunk) => { let mut guard = buffer_clone.lock().await; guard.push_chunk(chunk); drop(guard); notify_clone.notify_waiters(); } Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => continue, Err(tokio::sync::broadcast::error::RecvError::Closed) => break, }; } }); Self { session, output_buffer, output_notify, cancellation_token, output_drained, output_task, sandbox_type, } } pub(super) fn writer_sender(&self) -> mpsc::Sender<Vec<u8>> { self.session.writer_sender() } pub(super) fn output_handles(&self) -> OutputHandles { OutputHandles { output_buffer: Arc::clone(&self.output_buffer), output_notify: Arc::clone(&self.output_notify), cancellation_token: self.cancellation_token.clone(), } } pub(super) fn output_receiver(&self) -> tokio::sync::broadcast::Receiver<Vec<u8>> { self.session.output_receiver() } pub(super) fn cancellation_token(&self) -> CancellationToken { self.cancellation_token.clone() } pub(super) fn output_drained_notify(&self) -> Arc<Notify> { Arc::clone(&self.output_drained) } pub(super) fn has_exited(&self) -> bool { self.session.has_exited() } pub(super) fn exit_code(&self) -> Option<i32> { self.session.exit_code() } pub(super) fn terminate(&self) { self.session.terminate(); self.cancellation_token.cancel(); self.output_task.abort(); } async fn snapshot_output(&self) -> Vec<Vec<u8>> { let guard = self.output_buffer.lock().await; guard.snapshot() } pub(crate) fn sandbox_type(&self) -> SandboxType { self.sandbox_type } pub(super) async fn check_for_sandbox_denial(&self) -> Result<(), UnifiedExecError> { let _ = tokio::time::timeout(Duration::from_millis(20), self.output_notify.notified()).await; let collected_chunks = self.snapshot_output().await; let mut aggregated: Vec<u8> = Vec::new(); for chunk in collected_chunks { aggregated.extend_from_slice(&chunk); } let aggregated_text = String::from_utf8_lossy(&aggregated).to_string(); self.check_for_sandbox_denial_with_text(&aggregated_text) .await?; Ok(()) } pub(super) async fn check_for_sandbox_denial_with_text( &self, text: &str, ) -> Result<(), UnifiedExecError> { let sandbox_type = self.sandbox_type(); if sandbox_type == SandboxType::None || !self.has_exited() { return Ok(()); } let exit_code = self.exit_code().unwrap_or(-1); let exec_output = ExecToolCallOutput { exit_code, stderr: StreamOutput::new(text.to_string()), aggregated_output: StreamOutput::new(text.to_string()), ..Default::default() }; if is_likely_sandbox_denied(sandbox_type, &exec_output) { let snippet = formatted_truncate_text( text, TruncationPolicy::Tokens(UNIFIED_EXEC_OUTPUT_MAX_TOKENS), ); let message = if snippet.is_empty() { format!("Session exited with code {exit_code}") } else { snippet }; return Err(UnifiedExecError::sandbox_denied(message, exec_output)); } Ok(()) } pub(super) async fn from_spawned( spawned: SpawnedPty, sandbox_type: SandboxType, ) -> Result<Self, UnifiedExecError> { let SpawnedPty { session, output_rx, mut exit_rx, } = spawned; let managed = Self::new(session, output_rx, sandbox_type); let exit_ready = matches!(exit_rx.try_recv(), Ok(_) | Err(TryRecvError::Closed)); if exit_ready { managed.signal_exit(); managed.check_for_sandbox_denial().await?; return Ok(managed); } if tokio::time::timeout(Duration::from_millis(150), &mut exit_rx) .await .is_ok() { managed.signal_exit(); managed.check_for_sandbox_denial().await?; return Ok(managed); } tokio::spawn({ let cancellation_token = managed.cancellation_token.clone(); async move { let _ = exit_rx.await; cancellation_token.cancel(); } }); Ok(managed) } fn signal_exit(&self) { self.cancellation_token.cancel(); } } impl Drop for UnifiedExecSession { fn drop(&mut self) { self.terminate(); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/unified_exec/mod.rs
codex-rs/core/src/unified_exec/mod.rs
//! Unified Exec: interactive PTY execution orchestrated with approvals + sandboxing. //! //! Responsibilities //! - Manages interactive PTY sessions (create, reuse, buffer output with caps). //! - Uses the shared ToolOrchestrator to handle approval, sandbox selection, and //! retry semantics in a single, descriptive flow. //! - Spawns the PTY from a sandbox‑transformed `ExecEnv`; on sandbox denial, //! retries without sandbox when policy allows (no re‑prompt thanks to caching). //! - Uses the shared `is_likely_sandbox_denied` heuristic to keep denial messages //! consistent with other exec paths. //! //! Flow at a glance (open session) //! 1) Build a small request `{ command, cwd }`. //! 2) Orchestrator: approval (bypass/cache/prompt) → select sandbox → run. //! 3) Runtime: transform `CommandSpec` → `ExecEnv` → spawn PTY. //! 4) If denial, orchestrator retries with `SandboxType::None`. //! 5) Session is returned with streaming output + metadata. //! //! This keeps policy logic and user interaction centralized while the PTY/session //! concerns remain isolated here. The implementation is split between: //! - `session.rs`: PTY session lifecycle + output buffering. //! - `session_manager.rs`: orchestration (approvals, sandboxing, reuse) and request handling. use std::collections::HashMap; use std::collections::HashSet; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; use rand::Rng; use rand::rng; use tokio::sync::Mutex; use crate::codex::Session; use crate::codex::TurnContext; use crate::sandboxing::SandboxPermissions; mod async_watcher; mod errors; mod session; mod session_manager; pub(crate) use errors::UnifiedExecError; pub(crate) use session::UnifiedExecSession; pub(crate) const MIN_YIELD_TIME_MS: u64 = 250; pub(crate) const MAX_YIELD_TIME_MS: u64 = 30_000; pub(crate) const DEFAULT_MAX_OUTPUT_TOKENS: usize = 10_000; pub(crate) const UNIFIED_EXEC_OUTPUT_MAX_BYTES: usize = 1024 * 1024; // 1 MiB pub(crate) const UNIFIED_EXEC_OUTPUT_MAX_TOKENS: usize = UNIFIED_EXEC_OUTPUT_MAX_BYTES / 4; pub(crate) const MAX_UNIFIED_EXEC_SESSIONS: usize = 64; // Send a warning message to the models when it reaches this number of sessions. pub(crate) const WARNING_UNIFIED_EXEC_SESSIONS: usize = 60; #[derive(Debug, Default)] pub(crate) struct CommandTranscript { pub data: Vec<u8>, } impl CommandTranscript { pub fn append(&mut self, bytes: &[u8]) { self.data.extend_from_slice(bytes); if self.data.len() > UNIFIED_EXEC_OUTPUT_MAX_BYTES { let excess = self .data .len() .saturating_sub(UNIFIED_EXEC_OUTPUT_MAX_BYTES); self.data.drain(..excess); } } } pub(crate) struct UnifiedExecContext { pub session: Arc<Session>, pub turn: Arc<TurnContext>, pub call_id: String, } impl UnifiedExecContext { pub fn new(session: Arc<Session>, turn: Arc<TurnContext>, call_id: String) -> Self { Self { session, turn, call_id, } } } #[derive(Debug)] pub(crate) struct ExecCommandRequest { pub command: Vec<String>, pub process_id: String, pub yield_time_ms: u64, pub max_output_tokens: Option<usize>, pub workdir: Option<PathBuf>, pub sandbox_permissions: SandboxPermissions, pub justification: Option<String>, } #[derive(Debug)] pub(crate) struct WriteStdinRequest<'a> { pub process_id: &'a str, pub input: &'a str, pub yield_time_ms: u64, pub max_output_tokens: Option<usize>, } #[derive(Debug, Clone, PartialEq)] pub(crate) struct UnifiedExecResponse { pub event_call_id: String, pub chunk_id: String, pub wall_time: Duration, pub output: String, /// Raw bytes returned for this unified exec call before any truncation. pub raw_output: Vec<u8>, pub process_id: Option<String>, pub exit_code: Option<i32>, pub original_token_count: Option<usize>, pub session_command: Option<Vec<String>>, } #[derive(Default)] pub(crate) struct SessionStore { sessions: HashMap<String, SessionEntry>, reserved_sessions_id: HashSet<String>, } impl SessionStore { fn remove(&mut self, session_id: &str) -> Option<SessionEntry> { self.reserved_sessions_id.remove(session_id); self.sessions.remove(session_id) } } pub(crate) struct UnifiedExecSessionManager { session_store: Mutex<SessionStore>, } impl Default for UnifiedExecSessionManager { fn default() -> Self { Self { session_store: Mutex::new(SessionStore::default()), } } } struct SessionEntry { session: Arc<UnifiedExecSession>, session_ref: Arc<Session>, turn_ref: Arc<TurnContext>, call_id: String, process_id: String, command: Vec<String>, last_used: tokio::time::Instant, } pub(crate) fn clamp_yield_time(yield_time_ms: u64) -> u64 { yield_time_ms.clamp(MIN_YIELD_TIME_MS, MAX_YIELD_TIME_MS) } pub(crate) fn resolve_max_tokens(max_tokens: Option<usize>) -> usize { max_tokens.unwrap_or(DEFAULT_MAX_OUTPUT_TOKENS) } pub(crate) fn generate_chunk_id() -> String { let mut rng = rng(); (0..6) .map(|_| format!("{:x}", rng.random_range(0..16))) .collect() } #[cfg(test)] #[cfg(unix)] mod tests { use super::*; use crate::codex::Session; use crate::codex::TurnContext; use crate::codex::make_session_and_context; use crate::protocol::AskForApproval; use crate::protocol::SandboxPolicy; use crate::unified_exec::ExecCommandRequest; use crate::unified_exec::WriteStdinRequest; use core_test_support::skip_if_sandbox; use std::sync::Arc; use tokio::time::Duration; use super::session::OutputBufferState; async fn test_session_and_turn() -> (Arc<Session>, Arc<TurnContext>) { let (session, mut turn) = make_session_and_context().await; turn.approval_policy = AskForApproval::Never; turn.sandbox_policy = SandboxPolicy::DangerFullAccess; (Arc::new(session), Arc::new(turn)) } async fn exec_command( session: &Arc<Session>, turn: &Arc<TurnContext>, cmd: &str, yield_time_ms: u64, ) -> Result<UnifiedExecResponse, UnifiedExecError> { let context = UnifiedExecContext::new(Arc::clone(session), Arc::clone(turn), "call".to_string()); let process_id = session .services .unified_exec_manager .allocate_process_id() .await; session .services .unified_exec_manager .exec_command( ExecCommandRequest { command: vec!["bash".to_string(), "-lc".to_string(), cmd.to_string()], process_id, yield_time_ms, max_output_tokens: None, workdir: None, sandbox_permissions: SandboxPermissions::UseDefault, justification: None, }, &context, ) .await } async fn write_stdin( session: &Arc<Session>, process_id: &str, input: &str, yield_time_ms: u64, ) -> Result<UnifiedExecResponse, UnifiedExecError> { session .services .unified_exec_manager .write_stdin(WriteStdinRequest { process_id, input, yield_time_ms, max_output_tokens: None, }) .await } #[test] fn push_chunk_trims_only_excess_bytes() { let mut buffer = OutputBufferState::default(); buffer.push_chunk(vec![b'a'; UNIFIED_EXEC_OUTPUT_MAX_BYTES]); buffer.push_chunk(vec![b'b']); buffer.push_chunk(vec![b'c']); assert_eq!(buffer.total_bytes, UNIFIED_EXEC_OUTPUT_MAX_BYTES); let snapshot = buffer.snapshot(); assert_eq!(snapshot.len(), 3); assert_eq!( snapshot.first().unwrap().len(), UNIFIED_EXEC_OUTPUT_MAX_BYTES - 2 ); assert_eq!(snapshot.get(2).unwrap(), &vec![b'c']); assert_eq!(snapshot.get(1).unwrap(), &vec![b'b']); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn unified_exec_persists_across_requests() -> anyhow::Result<()> { skip_if_sandbox!(Ok(())); let (session, turn) = test_session_and_turn().await; let open_shell = exec_command(&session, &turn, "bash -i", 2_500).await?; let process_id = open_shell .process_id .as_ref() .expect("expected process_id") .as_str(); write_stdin( &session, process_id, "export CODEX_INTERACTIVE_SHELL_VAR=codex\n", 2_500, ) .await?; let out_2 = write_stdin( &session, process_id, "echo $CODEX_INTERACTIVE_SHELL_VAR\n", 2_500, ) .await?; assert!( out_2.output.contains("codex"), "expected environment variable output" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn multi_unified_exec_sessions() -> anyhow::Result<()> { skip_if_sandbox!(Ok(())); let (session, turn) = test_session_and_turn().await; let shell_a = exec_command(&session, &turn, "bash -i", 2_500).await?; let session_a = shell_a .process_id .as_ref() .expect("expected process id") .clone(); write_stdin( &session, session_a.as_str(), "export CODEX_INTERACTIVE_SHELL_VAR=codex\n", 2_500, ) .await?; let out_2 = exec_command(&session, &turn, "echo $CODEX_INTERACTIVE_SHELL_VAR", 2_500).await?; tokio::time::sleep(Duration::from_secs(2)).await; assert!( out_2.process_id.is_none(), "short command should not report a process id if it exits quickly" ); assert!( !out_2.output.contains("codex"), "short command should run in a fresh shell" ); let out_3 = write_stdin( &session, shell_a .process_id .as_ref() .expect("expected process id") .as_str(), "echo $CODEX_INTERACTIVE_SHELL_VAR\n", 2_500, ) .await?; assert!( out_3.output.contains("codex"), "session should preserve state" ); Ok(()) } #[tokio::test] async fn unified_exec_timeouts() -> anyhow::Result<()> { skip_if_sandbox!(Ok(())); let (session, turn) = test_session_and_turn().await; let open_shell = exec_command(&session, &turn, "bash -i", 2_500).await?; let process_id = open_shell .process_id .as_ref() .expect("expected process id") .as_str(); write_stdin( &session, process_id, "export CODEX_INTERACTIVE_SHELL_VAR=codex\n", 2_500, ) .await?; let out_2 = write_stdin( &session, process_id, "sleep 5 && echo $CODEX_INTERACTIVE_SHELL_VAR\n", 10, ) .await?; assert!( !out_2.output.contains("codex"), "timeout too short should yield incomplete output" ); tokio::time::sleep(Duration::from_secs(7)).await; let out_3 = write_stdin(&session, process_id, "", 100).await?; assert!( out_3.output.contains("codex"), "subsequent poll should retrieve output" ); Ok(()) } #[tokio::test] #[ignore] // Ignored while we have a better way to test this. async fn requests_with_large_timeout_are_capped() -> anyhow::Result<()> { let (session, turn) = test_session_and_turn().await; let result = exec_command(&session, &turn, "echo codex", 120_000).await?; assert!(result.process_id.is_some()); assert!(result.output.contains("codex")); Ok(()) } #[tokio::test] #[ignore] // Ignored while we have a better way to test this. async fn completed_commands_do_not_persist_sessions() -> anyhow::Result<()> { let (session, turn) = test_session_and_turn().await; let result = exec_command(&session, &turn, "echo codex", 2_500).await?; assert!( result.process_id.is_some(), "completed command should report a process id" ); assert!(result.output.contains("codex")); assert!( session .services .unified_exec_manager .session_store .lock() .await .sessions .is_empty() ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn reusing_completed_session_returns_unknown_session() -> anyhow::Result<()> { skip_if_sandbox!(Ok(())); let (session, turn) = test_session_and_turn().await; let open_shell = exec_command(&session, &turn, "bash -i", 2_500).await?; let process_id = open_shell .process_id .as_ref() .expect("expected process id") .as_str(); write_stdin(&session, process_id, "exit\n", 2_500).await?; tokio::time::sleep(Duration::from_millis(200)).await; let err = write_stdin(&session, process_id, "", 100) .await .expect_err("expected unknown session error"); match err { UnifiedExecError::UnknownSessionId { process_id: err_id } => { assert_eq!(err_id, process_id, "process id should match request"); } other => panic!("expected UnknownSessionId, got {other:?}"), } assert!( session .services .unified_exec_manager .session_store .lock() .await .sessions .is_empty() ); Ok(()) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/unified_exec/async_watcher.rs
codex-rs/core/src/unified_exec/async_watcher.rs
use std::path::PathBuf; use std::pin::Pin; use std::sync::Arc; use tokio::sync::Mutex; use tokio::time::Duration; use tokio::time::Instant; use tokio::time::Sleep; use crate::codex::Session; use crate::codex::TurnContext; use crate::exec::ExecToolCallOutput; use crate::exec::MAX_EXEC_OUTPUT_DELTAS_PER_CALL; use crate::exec::StreamOutput; use crate::protocol::EventMsg; use crate::protocol::ExecCommandOutputDeltaEvent; use crate::protocol::ExecCommandSource; use crate::protocol::ExecOutputStream; use crate::tools::events::ToolEmitter; use crate::tools::events::ToolEventCtx; use crate::tools::events::ToolEventStage; use super::CommandTranscript; use super::UnifiedExecContext; use super::session::UnifiedExecSession; pub(crate) const TRAILING_OUTPUT_GRACE: Duration = Duration::from_millis(100); /// Upper bound for a single ExecCommandOutputDelta chunk emitted by unified exec. /// /// The unified exec output buffer already caps *retained* output (see /// `UNIFIED_EXEC_OUTPUT_MAX_BYTES`), but we also cap per-event payload size so /// downstream event consumers (especially app-server JSON-RPC) don't have to /// process arbitrarily large delta payloads. const UNIFIED_EXEC_OUTPUT_DELTA_MAX_BYTES: usize = 8192; /// Spawn a background task that continuously reads from the PTY, appends to the /// shared transcript, and emits ExecCommandOutputDelta events on UTF‑8 /// boundaries. pub(crate) fn start_streaming_output( session: &UnifiedExecSession, context: &UnifiedExecContext, transcript: Arc<Mutex<CommandTranscript>>, ) { let mut receiver = session.output_receiver(); let output_drained = session.output_drained_notify(); let exit_token = session.cancellation_token(); let session_ref = Arc::clone(&context.session); let turn_ref = Arc::clone(&context.turn); let call_id = context.call_id.clone(); tokio::spawn(async move { use tokio::sync::broadcast::error::RecvError; let mut pending = Vec::<u8>::new(); let mut emitted_deltas: usize = 0; let mut grace_sleep: Option<Pin<Box<Sleep>>> = None; loop { tokio::select! { _ = exit_token.cancelled(), if grace_sleep.is_none() => { let deadline = Instant::now() + TRAILING_OUTPUT_GRACE; grace_sleep.replace(Box::pin(tokio::time::sleep_until(deadline))); } _ = async { if let Some(sleep) = grace_sleep.as_mut() { sleep.as_mut().await; } }, if grace_sleep.is_some() => { output_drained.notify_one(); break; } received = receiver.recv() => { let chunk = match received { Ok(chunk) => chunk, Err(RecvError::Lagged(_)) => { continue; }, Err(RecvError::Closed) => { output_drained.notify_one(); break; } }; process_chunk( &mut pending, &transcript, &call_id, &session_ref, &turn_ref, &mut emitted_deltas, chunk, ).await; } } } }); } /// Spawn a background watcher that waits for the PTY to exit and then emits a /// single ExecCommandEnd event with the aggregated transcript. #[allow(clippy::too_many_arguments)] pub(crate) fn spawn_exit_watcher( session: Arc<UnifiedExecSession>, session_ref: Arc<Session>, turn_ref: Arc<TurnContext>, call_id: String, command: Vec<String>, cwd: PathBuf, process_id: String, transcript: Arc<Mutex<CommandTranscript>>, started_at: Instant, ) { let exit_token = session.cancellation_token(); let output_drained = session.output_drained_notify(); tokio::spawn(async move { exit_token.cancelled().await; output_drained.notified().await; let exit_code = session.exit_code().unwrap_or(-1); let duration = Instant::now().saturating_duration_since(started_at); emit_exec_end_for_unified_exec( session_ref, turn_ref, call_id, command, cwd, Some(process_id), transcript, String::new(), exit_code, duration, ) .await; }); } async fn process_chunk( pending: &mut Vec<u8>, transcript: &Arc<Mutex<CommandTranscript>>, call_id: &str, session_ref: &Arc<Session>, turn_ref: &Arc<TurnContext>, emitted_deltas: &mut usize, chunk: Vec<u8>, ) { pending.extend_from_slice(&chunk); while let Some(prefix) = split_valid_utf8_prefix(pending) { { let mut guard = transcript.lock().await; guard.append(&prefix); } if *emitted_deltas >= MAX_EXEC_OUTPUT_DELTAS_PER_CALL { continue; } let event = ExecCommandOutputDeltaEvent { call_id: call_id.to_string(), stream: ExecOutputStream::Stdout, chunk: prefix, }; session_ref .send_event(turn_ref.as_ref(), EventMsg::ExecCommandOutputDelta(event)) .await; *emitted_deltas += 1; } } /// Emit an ExecCommandEnd event for a unified exec session, using the transcript /// as the primary source of aggregated_output and falling back to the provided /// text when the transcript is empty. #[allow(clippy::too_many_arguments)] pub(crate) async fn emit_exec_end_for_unified_exec( session_ref: Arc<Session>, turn_ref: Arc<TurnContext>, call_id: String, command: Vec<String>, cwd: PathBuf, process_id: Option<String>, transcript: Arc<Mutex<CommandTranscript>>, fallback_output: String, exit_code: i32, duration: Duration, ) { let aggregated_output = resolve_aggregated_output(&transcript, fallback_output).await; let output = ExecToolCallOutput { exit_code, stdout: StreamOutput::new(aggregated_output.clone()), stderr: StreamOutput::new(String::new()), aggregated_output: StreamOutput::new(aggregated_output), duration, timed_out: false, }; let event_ctx = ToolEventCtx::new(session_ref.as_ref(), turn_ref.as_ref(), &call_id, None); let emitter = ToolEmitter::unified_exec( &command, cwd, ExecCommandSource::UnifiedExecStartup, process_id, ); emitter .emit(event_ctx, ToolEventStage::Success(output)) .await; } fn split_valid_utf8_prefix(buffer: &mut Vec<u8>) -> Option<Vec<u8>> { split_valid_utf8_prefix_with_max(buffer, UNIFIED_EXEC_OUTPUT_DELTA_MAX_BYTES) } fn split_valid_utf8_prefix_with_max(buffer: &mut Vec<u8>, max_bytes: usize) -> Option<Vec<u8>> { if buffer.is_empty() { return None; } let max_len = buffer.len().min(max_bytes); let mut split = max_len; while split > 0 { if std::str::from_utf8(&buffer[..split]).is_ok() { let prefix = buffer[..split].to_vec(); buffer.drain(..split); return Some(prefix); } if max_len - split > 4 { break; } split -= 1; } // If no valid UTF-8 prefix was found, emit the first byte so the stream // keeps making progress and the transcript reflects all bytes. let byte = buffer.drain(..1).collect(); Some(byte) } async fn resolve_aggregated_output( transcript: &Arc<Mutex<CommandTranscript>>, fallback: String, ) -> String { let guard = transcript.lock().await; if guard.data.is_empty() { return fallback; } String::from_utf8_lossy(&guard.data).to_string() } #[cfg(test)] mod tests { use super::split_valid_utf8_prefix_with_max; use pretty_assertions::assert_eq; #[test] fn split_valid_utf8_prefix_respects_max_bytes_for_ascii() { let mut buf = b"hello word!".to_vec(); let first = split_valid_utf8_prefix_with_max(&mut buf, 5).expect("expected prefix"); assert_eq!(first, b"hello".to_vec()); assert_eq!(buf, b" word!".to_vec()); let second = split_valid_utf8_prefix_with_max(&mut buf, 5).expect("expected prefix"); assert_eq!(second, b" word".to_vec()); assert_eq!(buf, b"!".to_vec()); } #[test] fn split_valid_utf8_prefix_avoids_splitting_utf8_codepoints() { // "é" is 2 bytes in UTF-8. With a max of 3 bytes, we should only emit 1 char (2 bytes). let mut buf = "ééé".as_bytes().to_vec(); let first = split_valid_utf8_prefix_with_max(&mut buf, 3).expect("expected prefix"); assert_eq!(std::str::from_utf8(&first).unwrap(), "é"); assert_eq!(buf, "éé".as_bytes().to_vec()); } #[test] fn split_valid_utf8_prefix_makes_progress_on_invalid_utf8() { let mut buf = vec![0xff, b'a', b'b']; let first = split_valid_utf8_prefix_with_max(&mut buf, 2).expect("expected prefix"); assert_eq!(first, vec![0xff]); assert_eq!(buf, b"ab".to_vec()); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/config/service.rs
codex-rs/core/src/config/service.rs
use super::CONFIG_TOML_FILE; use super::ConfigToml; use crate::config::edit::ConfigEdit; use crate::config::edit::ConfigEditsBuilder; use crate::config_loader::ConfigLayerEntry; use crate::config_loader::ConfigLayerStack; use crate::config_loader::LoaderOverrides; use crate::config_loader::load_config_layers_state; use crate::config_loader::merge_toml_values; use crate::path_utils; use codex_app_server_protocol::Config as ApiConfig; use codex_app_server_protocol::ConfigBatchWriteParams; use codex_app_server_protocol::ConfigLayerMetadata; use codex_app_server_protocol::ConfigLayerSource; use codex_app_server_protocol::ConfigReadParams; use codex_app_server_protocol::ConfigReadResponse; use codex_app_server_protocol::ConfigValueWriteParams; use codex_app_server_protocol::ConfigWriteErrorCode; use codex_app_server_protocol::ConfigWriteResponse; use codex_app_server_protocol::MergeStrategy; use codex_app_server_protocol::OverriddenMetadata; use codex_app_server_protocol::WriteStatus; use codex_utils_absolute_path::AbsolutePathBuf; use serde_json::Value as JsonValue; use std::borrow::Cow; use std::path::Path; use std::path::PathBuf; use thiserror::Error; use toml::Value as TomlValue; use toml_edit::Item as TomlItem; #[derive(Debug, Error)] pub enum ConfigServiceError { #[error("{message}")] Write { code: ConfigWriteErrorCode, message: String, }, #[error("{context}: {source}")] Io { context: &'static str, #[source] source: std::io::Error, }, #[error("{context}: {source}")] Json { context: &'static str, #[source] source: serde_json::Error, }, #[error("{context}: {source}")] Toml { context: &'static str, #[source] source: toml::de::Error, }, #[error("{context}: {source}")] Anyhow { context: &'static str, #[source] source: anyhow::Error, }, } impl ConfigServiceError { fn write(code: ConfigWriteErrorCode, message: impl Into<String>) -> Self { Self::Write { code, message: message.into(), } } fn io(context: &'static str, source: std::io::Error) -> Self { Self::Io { context, source } } fn json(context: &'static str, source: serde_json::Error) -> Self { Self::Json { context, source } } fn toml(context: &'static str, source: toml::de::Error) -> Self { Self::Toml { context, source } } fn anyhow(context: &'static str, source: anyhow::Error) -> Self { Self::Anyhow { context, source } } pub fn write_error_code(&self) -> Option<ConfigWriteErrorCode> { match self { Self::Write { code, .. } => Some(code.clone()), _ => None, } } } #[derive(Clone)] pub struct ConfigService { codex_home: PathBuf, cli_overrides: Vec<(String, TomlValue)>, loader_overrides: LoaderOverrides, } impl ConfigService { pub fn new(codex_home: PathBuf, cli_overrides: Vec<(String, TomlValue)>) -> Self { Self { codex_home, cli_overrides, loader_overrides: LoaderOverrides::default(), } } #[cfg(test)] fn with_overrides( codex_home: PathBuf, cli_overrides: Vec<(String, TomlValue)>, loader_overrides: LoaderOverrides, ) -> Self { Self { codex_home, cli_overrides, loader_overrides, } } pub async fn read( &self, params: ConfigReadParams, ) -> Result<ConfigReadResponse, ConfigServiceError> { let layers = self .load_thread_agnostic_config() .await .map_err(|err| ConfigServiceError::io("failed to read configuration layers", err))?; let effective = layers.effective_config(); validate_config(&effective) .map_err(|err| ConfigServiceError::toml("invalid configuration", err))?; let json_value = serde_json::to_value(&effective) .map_err(|err| ConfigServiceError::json("failed to serialize configuration", err))?; let config: ApiConfig = serde_json::from_value(json_value) .map_err(|err| ConfigServiceError::json("failed to deserialize configuration", err))?; Ok(ConfigReadResponse { config, origins: layers.origins(), layers: params.include_layers.then(|| { layers .layers_high_to_low() .iter() .map(|layer| layer.as_layer()) .collect() }), }) } pub async fn write_value( &self, params: ConfigValueWriteParams, ) -> Result<ConfigWriteResponse, ConfigServiceError> { let edits = vec![(params.key_path, params.value, params.merge_strategy)]; self.apply_edits(params.file_path, params.expected_version, edits) .await } pub async fn batch_write( &self, params: ConfigBatchWriteParams, ) -> Result<ConfigWriteResponse, ConfigServiceError> { let edits = params .edits .into_iter() .map(|edit| (edit.key_path, edit.value, edit.merge_strategy)) .collect(); self.apply_edits(params.file_path, params.expected_version, edits) .await } pub async fn load_user_saved_config( &self, ) -> Result<codex_app_server_protocol::UserSavedConfig, ConfigServiceError> { let layers = self .load_thread_agnostic_config() .await .map_err(|err| ConfigServiceError::io("failed to load configuration", err))?; let toml_value = layers.effective_config(); let cfg: ConfigToml = toml_value .try_into() .map_err(|err| ConfigServiceError::toml("failed to parse config.toml", err))?; Ok(cfg.into()) } async fn apply_edits( &self, file_path: Option<String>, expected_version: Option<String>, edits: Vec<(String, JsonValue, MergeStrategy)>, ) -> Result<ConfigWriteResponse, ConfigServiceError> { let allowed_path = AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, &self.codex_home) .map_err(|err| ConfigServiceError::io("failed to resolve user config path", err))?; let provided_path = match file_path { Some(path) => AbsolutePathBuf::from_absolute_path(PathBuf::from(path)) .map_err(|err| ConfigServiceError::io("failed to resolve user config path", err))?, None => allowed_path.clone(), }; if !paths_match(&allowed_path, &provided_path) { return Err(ConfigServiceError::write( ConfigWriteErrorCode::ConfigLayerReadonly, "Only writes to the user config are allowed", )); } let layers = self .load_thread_agnostic_config() .await .map_err(|err| ConfigServiceError::io("failed to load configuration", err))?; let user_layer = match layers.get_user_layer() { Some(layer) => Cow::Borrowed(layer), None => Cow::Owned(create_empty_user_layer(&allowed_path).await?), }; if let Some(expected) = expected_version.as_deref() && expected != user_layer.version { return Err(ConfigServiceError::write( ConfigWriteErrorCode::ConfigVersionConflict, "Configuration was modified since last read. Fetch latest version and retry.", )); } let mut user_config = user_layer.config.clone(); let mut parsed_segments = Vec::new(); let mut config_edits = Vec::new(); for (key_path, value, strategy) in edits.into_iter() { let segments = parse_key_path(&key_path).map_err(|message| { ConfigServiceError::write(ConfigWriteErrorCode::ConfigValidationError, message) })?; let original_value = value_at_path(&user_config, &segments).cloned(); let parsed_value = parse_value(value).map_err(|message| { ConfigServiceError::write(ConfigWriteErrorCode::ConfigValidationError, message) })?; apply_merge(&mut user_config, &segments, parsed_value.as_ref(), strategy).map_err( |err| match err { MergeError::PathNotFound => ConfigServiceError::write( ConfigWriteErrorCode::ConfigPathNotFound, "Path not found", ), MergeError::Validation(message) => ConfigServiceError::write( ConfigWriteErrorCode::ConfigValidationError, message, ), }, )?; let updated_value = value_at_path(&user_config, &segments).cloned(); if original_value != updated_value { let edit = match updated_value { Some(value) => ConfigEdit::SetPath { segments: segments.clone(), value: toml_value_to_item(&value).map_err(|err| { ConfigServiceError::anyhow("failed to build config edits", err) })?, }, None => ConfigEdit::ClearPath { segments: segments.clone(), }, }; config_edits.push(edit); } parsed_segments.push(segments); } validate_config(&user_config).map_err(|err| { ConfigServiceError::write( ConfigWriteErrorCode::ConfigValidationError, format!("Invalid configuration: {err}"), ) })?; let updated_layers = layers.with_user_config(&provided_path, user_config.clone()); let effective = updated_layers.effective_config(); validate_config(&effective).map_err(|err| { ConfigServiceError::write( ConfigWriteErrorCode::ConfigValidationError, format!("Invalid configuration: {err}"), ) })?; if !config_edits.is_empty() { ConfigEditsBuilder::new(&self.codex_home) .with_edits(config_edits) .apply() .await .map_err(|err| ConfigServiceError::anyhow("failed to persist config.toml", err))?; } let overridden = first_overridden_edit(&updated_layers, &effective, &parsed_segments); let status = overridden .as_ref() .map(|_| WriteStatus::OkOverridden) .unwrap_or(WriteStatus::Ok); Ok(ConfigWriteResponse { status, version: updated_layers .get_user_layer() .ok_or_else(|| { ConfigServiceError::write( ConfigWriteErrorCode::UserLayerNotFound, "user layer not found in updated layers", ) })? .version .clone(), file_path: provided_path, overridden_metadata: overridden, }) } /// Loads a "thread-agnostic" config, which means the config layers do not /// include any in-repo .codex/ folders because there is no cwd/project root /// associated with this query. async fn load_thread_agnostic_config(&self) -> std::io::Result<ConfigLayerStack> { let cwd: Option<AbsolutePathBuf> = None; load_config_layers_state( &self.codex_home, cwd, &self.cli_overrides, self.loader_overrides.clone(), ) .await } } async fn create_empty_user_layer( config_toml: &AbsolutePathBuf, ) -> Result<ConfigLayerEntry, ConfigServiceError> { let toml_value = match tokio::fs::read_to_string(config_toml).await { Ok(contents) => toml::from_str(&contents).map_err(|e| { ConfigServiceError::toml("failed to parse existing user config.toml", e) })?, Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { tokio::fs::write(config_toml, "").await.map_err(|e| { ConfigServiceError::io("failed to create empty user config.toml", e) })?; TomlValue::Table(toml::map::Map::new()) } else { return Err(ConfigServiceError::io("failed to read user config.toml", e)); } } }; Ok(ConfigLayerEntry::new( ConfigLayerSource::User { file: config_toml.clone(), }, toml_value, )) } fn parse_value(value: JsonValue) -> Result<Option<TomlValue>, String> { if value.is_null() { return Ok(None); } serde_json::from_value::<TomlValue>(value) .map(Some) .map_err(|err| format!("invalid value: {err}")) } fn parse_key_path(path: &str) -> Result<Vec<String>, String> { if path.trim().is_empty() { return Err("keyPath must not be empty".to_string()); } Ok(path .split('.') .map(std::string::ToString::to_string) .collect()) } #[derive(Debug)] enum MergeError { PathNotFound, Validation(String), } fn apply_merge( root: &mut TomlValue, segments: &[String], value: Option<&TomlValue>, strategy: MergeStrategy, ) -> Result<bool, MergeError> { let Some(value) = value else { return clear_path(root, segments); }; let Some((last, parents)) = segments.split_last() else { return Err(MergeError::Validation( "keyPath must not be empty".to_string(), )); }; let mut current = root; for segment in parents { match current { TomlValue::Table(table) => { current = table .entry(segment.clone()) .or_insert_with(|| TomlValue::Table(toml::map::Map::new())); } _ => { *current = TomlValue::Table(toml::map::Map::new()); if let TomlValue::Table(table) = current { current = table .entry(segment.clone()) .or_insert_with(|| TomlValue::Table(toml::map::Map::new())); } } } } let table = current.as_table_mut().ok_or_else(|| { MergeError::Validation("cannot set value on non-table parent".to_string()) })?; if matches!(strategy, MergeStrategy::Upsert) && let Some(existing) = table.get_mut(last) && matches!(existing, TomlValue::Table(_)) && matches!(value, TomlValue::Table(_)) { merge_toml_values(existing, value); return Ok(true); } let changed = table .get(last) .map(|existing| Some(existing) != Some(value)) .unwrap_or(true); table.insert(last.clone(), value.clone()); Ok(changed) } fn clear_path(root: &mut TomlValue, segments: &[String]) -> Result<bool, MergeError> { let Some((last, parents)) = segments.split_last() else { return Err(MergeError::Validation( "keyPath must not be empty".to_string(), )); }; let mut current = root; for segment in parents { match current { TomlValue::Table(table) => { current = table.get_mut(segment).ok_or(MergeError::PathNotFound)?; } _ => return Err(MergeError::PathNotFound), } } let Some(parent) = current.as_table_mut() else { return Err(MergeError::PathNotFound); }; Ok(parent.remove(last).is_some()) } fn toml_value_to_item(value: &TomlValue) -> anyhow::Result<TomlItem> { match value { TomlValue::Table(table) => { let mut table_item = toml_edit::Table::new(); table_item.set_implicit(false); for (key, val) in table { table_item.insert(key, toml_value_to_item(val)?); } Ok(TomlItem::Table(table_item)) } other => Ok(TomlItem::Value(toml_value_to_value(other)?)), } } fn toml_value_to_value(value: &TomlValue) -> anyhow::Result<toml_edit::Value> { match value { TomlValue::String(val) => Ok(toml_edit::Value::from(val.clone())), TomlValue::Integer(val) => Ok(toml_edit::Value::from(*val)), TomlValue::Float(val) => Ok(toml_edit::Value::from(*val)), TomlValue::Boolean(val) => Ok(toml_edit::Value::from(*val)), TomlValue::Datetime(val) => Ok(toml_edit::Value::from(*val)), TomlValue::Array(items) => { let mut array = toml_edit::Array::new(); for item in items { array.push(toml_value_to_value(item)?); } Ok(toml_edit::Value::Array(array)) } TomlValue::Table(table) => { let mut inline = toml_edit::InlineTable::new(); for (key, val) in table { inline.insert(key, toml_value_to_value(val)?); } Ok(toml_edit::Value::InlineTable(inline)) } } } fn validate_config(value: &TomlValue) -> Result<(), toml::de::Error> { let _: ConfigToml = value.clone().try_into()?; Ok(()) } fn paths_match(expected: impl AsRef<Path>, provided: impl AsRef<Path>) -> bool { if let (Ok(expanded_expected), Ok(expanded_provided)) = ( path_utils::normalize_for_path_comparison(&expected), path_utils::normalize_for_path_comparison(&provided), ) { expanded_expected == expanded_provided } else { expected.as_ref() == provided.as_ref() } } fn value_at_path<'a>(root: &'a TomlValue, segments: &[String]) -> Option<&'a TomlValue> { let mut current = root; for segment in segments { match current { TomlValue::Table(table) => { current = table.get(segment)?; } TomlValue::Array(items) => { let idx = segment.parse::<i64>().ok()?; let idx = usize::try_from(idx).ok()?; current = items.get(idx)?; } _ => return None, } } Some(current) } fn override_message(layer: &ConfigLayerSource) -> String { match layer { ConfigLayerSource::Mdm { domain, key: _ } => { format!("Overridden by managed policy (MDM): {domain}") } ConfigLayerSource::System { file } => { format!("Overridden by managed config (system): {}", file.display()) } ConfigLayerSource::Project { dot_codex_folder } => format!( "Overridden by project config: {}/{CONFIG_TOML_FILE}", dot_codex_folder.display(), ), ConfigLayerSource::SessionFlags => "Overridden by session flags".to_string(), ConfigLayerSource::User { file } => { format!("Overridden by user config: {}", file.display()) } ConfigLayerSource::LegacyManagedConfigTomlFromFile { file } => { format!( "Overridden by legacy managed_config.toml: {}", file.display() ) } ConfigLayerSource::LegacyManagedConfigTomlFromMdm => { "Overridden by legacy managed configuration from MDM".to_string() } } } fn compute_override_metadata( layers: &ConfigLayerStack, effective: &TomlValue, segments: &[String], ) -> Option<OverriddenMetadata> { let user_value = match layers.get_user_layer() { Some(user_layer) => value_at_path(&user_layer.config, segments), None => return None, }; let effective_value = value_at_path(effective, segments); if user_value.is_some() && user_value == effective_value { return None; } if user_value.is_none() && effective_value.is_none() { return None; } let overriding_layer = find_effective_layer(layers, segments)?; let message = override_message(&overriding_layer.name); Some(OverriddenMetadata { message, overriding_layer, effective_value: effective_value .and_then(|value| serde_json::to_value(value).ok()) .unwrap_or(JsonValue::Null), }) } fn first_overridden_edit( layers: &ConfigLayerStack, effective: &TomlValue, edits: &[Vec<String>], ) -> Option<OverriddenMetadata> { for segments in edits { if let Some(meta) = compute_override_metadata(layers, effective, segments) { return Some(meta); } } None } fn find_effective_layer( layers: &ConfigLayerStack, segments: &[String], ) -> Option<ConfigLayerMetadata> { for layer in layers.layers_high_to_low() { if let Some(meta) = value_at_path(&layer.config, segments).map(|_| layer.metadata()) { return Some(meta); } } None } #[cfg(test)] mod tests { use super::*; use anyhow::Result; use codex_app_server_protocol::AskForApproval; use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use tempfile::tempdir; #[test] fn toml_value_to_item_handles_nested_config_tables() { let config = r#" [mcp_servers.docs] command = "docs-server" [mcp_servers.docs.http_headers] X-Doc = "42" "#; let value: TomlValue = toml::from_str(config).expect("parse config example"); let item = toml_value_to_item(&value).expect("convert to toml_edit item"); let root = item.as_table().expect("root table"); assert!(!root.is_implicit(), "root table should be explicit"); let mcp_servers = root .get("mcp_servers") .and_then(TomlItem::as_table) .expect("mcp_servers table"); assert!( !mcp_servers.is_implicit(), "mcp_servers table should be explicit" ); let docs = mcp_servers .get("docs") .and_then(TomlItem::as_table) .expect("docs table"); assert_eq!( docs.get("command") .and_then(TomlItem::as_value) .and_then(toml_edit::Value::as_str), Some("docs-server") ); let http_headers = docs .get("http_headers") .and_then(TomlItem::as_table) .expect("http_headers table"); assert_eq!( http_headers .get("X-Doc") .and_then(TomlItem::as_value) .and_then(toml_edit::Value::as_str), Some("42") ); } #[tokio::test] async fn write_value_preserves_comments_and_order() -> Result<()> { let tmp = tempdir().expect("tempdir"); let original = r#"# Codex user configuration model = "gpt-5" approval_policy = "on-request" [notice] # Preserve this comment hide_full_access_warning = true [features] unified_exec = true "#; std::fs::write(tmp.path().join(CONFIG_TOML_FILE), original)?; let service = ConfigService::new(tmp.path().to_path_buf(), vec![]); service .write_value(ConfigValueWriteParams { file_path: Some(tmp.path().join(CONFIG_TOML_FILE).display().to_string()), key_path: "features.remote_compaction".to_string(), value: serde_json::json!(true), merge_strategy: MergeStrategy::Replace, expected_version: None, }) .await .expect("write succeeds"); let updated = std::fs::read_to_string(tmp.path().join(CONFIG_TOML_FILE)).expect("read config"); let expected = r#"# Codex user configuration model = "gpt-5" approval_policy = "on-request" [notice] # Preserve this comment hide_full_access_warning = true [features] unified_exec = true remote_compaction = true "#; assert_eq!(updated, expected); Ok(()) } #[tokio::test] async fn read_includes_origins_and_layers() { let tmp = tempdir().expect("tempdir"); let user_path = tmp.path().join(CONFIG_TOML_FILE); std::fs::write(&user_path, "model = \"user\"").unwrap(); let user_file = AbsolutePathBuf::try_from(user_path.clone()).expect("user file"); let managed_path = tmp.path().join("managed_config.toml"); std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap(); let managed_file = AbsolutePathBuf::try_from(managed_path.clone()).expect("managed file"); let service = ConfigService::with_overrides( tmp.path().to_path_buf(), vec![], LoaderOverrides { managed_config_path: Some(managed_path.clone()), #[cfg(target_os = "macos")] managed_preferences_base64: None, }, ); let response = service .read(ConfigReadParams { include_layers: true, }) .await .expect("response"); assert_eq!(response.config.approval_policy, Some(AskForApproval::Never)); assert_eq!( response .origins .get("approval_policy") .expect("origin") .name, ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file.clone() }, ); let layers = response.layers.expect("layers present"); if cfg!(unix) { let system_file = AbsolutePathBuf::from_absolute_path( crate::config_loader::SYSTEM_CONFIG_TOML_FILE_UNIX, ) .expect("system file"); assert_eq!(layers.len(), 3, "expected three layers on unix"); assert_eq!( layers.first().unwrap().name, ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file.clone() } ); assert_eq!( layers.get(1).unwrap().name, ConfigLayerSource::User { file: user_file.clone() } ); assert_eq!( layers.get(2).unwrap().name, ConfigLayerSource::System { file: system_file } ); } else { assert_eq!(layers.len(), 2, "expected two layers"); assert_eq!( layers.first().unwrap().name, ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file.clone() } ); assert_eq!( layers.get(1).unwrap().name, ConfigLayerSource::User { file: user_file } ); } } #[tokio::test] async fn write_value_reports_override() { let tmp = tempdir().expect("tempdir"); std::fs::write( tmp.path().join(CONFIG_TOML_FILE), "approval_policy = \"on-request\"", ) .unwrap(); let managed_path = tmp.path().join("managed_config.toml"); std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap(); let managed_file = AbsolutePathBuf::try_from(managed_path.clone()).expect("managed file"); let service = ConfigService::with_overrides( tmp.path().to_path_buf(), vec![], LoaderOverrides { managed_config_path: Some(managed_path.clone()), #[cfg(target_os = "macos")] managed_preferences_base64: None, }, ); let result = service .write_value(ConfigValueWriteParams { file_path: Some(tmp.path().join(CONFIG_TOML_FILE).display().to_string()), key_path: "approval_policy".to_string(), value: serde_json::json!("never"), merge_strategy: MergeStrategy::Replace, expected_version: None, }) .await .expect("result"); let read_after = service .read(ConfigReadParams { include_layers: true, }) .await .expect("read"); assert_eq!( read_after.config.approval_policy, Some(AskForApproval::Never) ); assert_eq!( read_after .origins .get("approval_policy") .expect("origin") .name, ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file.clone() } ); assert_eq!(result.status, WriteStatus::Ok); assert!(result.overridden_metadata.is_none()); } #[tokio::test] async fn version_conflict_rejected() { let tmp = tempdir().expect("tempdir"); let user_path = tmp.path().join(CONFIG_TOML_FILE); std::fs::write(&user_path, "model = \"user\"").unwrap(); let service = ConfigService::new(tmp.path().to_path_buf(), vec![]); let error = service .write_value(ConfigValueWriteParams { file_path: Some(tmp.path().join(CONFIG_TOML_FILE).display().to_string()), key_path: "model".to_string(), value: serde_json::json!("gpt-5"), merge_strategy: MergeStrategy::Replace, expected_version: Some("sha256:bogus".to_string()), }) .await .expect_err("should fail"); assert_eq!( error.write_error_code(), Some(ConfigWriteErrorCode::ConfigVersionConflict) ); } #[tokio::test] async fn write_value_defaults_to_user_config_path() { let tmp = tempdir().expect("tempdir"); std::fs::write(tmp.path().join(CONFIG_TOML_FILE), "").unwrap(); let service = ConfigService::new(tmp.path().to_path_buf(), vec![]); service .write_value(ConfigValueWriteParams { file_path: None, key_path: "model".to_string(), value: serde_json::json!("gpt-new"), merge_strategy: MergeStrategy::Replace, expected_version: None, }) .await .expect("write succeeds"); let contents = std::fs::read_to_string(tmp.path().join(CONFIG_TOML_FILE)).expect("read config"); assert!( contents.contains("model = \"gpt-new\""), "config.toml should be updated even when file_path is omitted" ); } #[tokio::test] async fn invalid_user_value_rejected_even_if_overridden_by_managed() { let tmp = tempdir().expect("tempdir"); std::fs::write(tmp.path().join(CONFIG_TOML_FILE), "model = \"user\"").unwrap(); let managed_path = tmp.path().join("managed_config.toml"); std::fs::write(&managed_path, "approval_policy = \"never\"").unwrap(); let service = ConfigService::with_overrides( tmp.path().to_path_buf(), vec![], LoaderOverrides { managed_config_path: Some(managed_path.clone()), #[cfg(target_os = "macos")] managed_preferences_base64: None, }, ); let error = service .write_value(ConfigValueWriteParams { file_path: Some(tmp.path().join(CONFIG_TOML_FILE).display().to_string()), key_path: "approval_policy".to_string(), value: serde_json::json!("bogus"), merge_strategy: MergeStrategy::Replace, expected_version: None, }) .await .expect_err("should fail validation"); assert_eq!( error.write_error_code(), Some(ConfigWriteErrorCode::ConfigValidationError) ); let contents = std::fs::read_to_string(tmp.path().join(CONFIG_TOML_FILE)).expect("read config"); assert_eq!(contents.trim(), "model = \"user\""); } #[tokio::test] async fn read_reports_managed_overrides_user_and_session_flags() { let tmp = tempdir().expect("tempdir"); let user_path = tmp.path().join(CONFIG_TOML_FILE); std::fs::write(&user_path, "model = \"user\"").unwrap(); let user_file = AbsolutePathBuf::try_from(user_path.clone()).expect("user file"); let managed_path = tmp.path().join("managed_config.toml"); std::fs::write(&managed_path, "model = \"system\"").unwrap(); let managed_file = AbsolutePathBuf::try_from(managed_path.clone()).expect("managed file");
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/config/types.rs
codex-rs/core/src/config/types.rs
//! Types used to define the fields of [`crate::config::Config`]. // Note this file should generally be restricted to simple struct/enum // definitions that do not contain business logic. use codex_utils_absolute_path::AbsolutePathBuf; use std::collections::BTreeMap; use std::collections::HashMap; use std::path::PathBuf; use std::time::Duration; use wildmatch::WildMatchPattern; use serde::Deserialize; use serde::Deserializer; use serde::Serialize; use serde::de::Error as SerdeError; pub const DEFAULT_OTEL_ENVIRONMENT: &str = "dev"; #[derive(Serialize, Debug, Clone, PartialEq)] pub struct McpServerConfig { #[serde(flatten)] pub transport: McpServerTransportConfig, /// When `false`, Codex skips initializing this MCP server. #[serde(default = "default_enabled")] pub enabled: bool, /// Startup timeout in seconds for initializing MCP server & initially listing tools. #[serde( default, with = "option_duration_secs", skip_serializing_if = "Option::is_none" )] pub startup_timeout_sec: Option<Duration>, /// Default timeout for MCP tool calls initiated via this server. #[serde(default, with = "option_duration_secs")] pub tool_timeout_sec: Option<Duration>, /// Explicit allow-list of tools exposed from this server. When set, only these tools will be registered. #[serde(default, skip_serializing_if = "Option::is_none")] pub enabled_tools: Option<Vec<String>>, /// Explicit deny-list of tools. These tools will be removed after applying `enabled_tools`. #[serde(default, skip_serializing_if = "Option::is_none")] pub disabled_tools: Option<Vec<String>>, } impl<'de> Deserialize<'de> for McpServerConfig { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { #[derive(Deserialize, Clone)] struct RawMcpServerConfig { // stdio command: Option<String>, #[serde(default)] args: Option<Vec<String>>, #[serde(default)] env: Option<HashMap<String, String>>, #[serde(default)] env_vars: Option<Vec<String>>, #[serde(default)] cwd: Option<PathBuf>, http_headers: Option<HashMap<String, String>>, #[serde(default)] env_http_headers: Option<HashMap<String, String>>, // streamable_http url: Option<String>, bearer_token: Option<String>, bearer_token_env_var: Option<String>, // shared #[serde(default)] startup_timeout_sec: Option<f64>, #[serde(default)] startup_timeout_ms: Option<u64>, #[serde(default, with = "option_duration_secs")] tool_timeout_sec: Option<Duration>, #[serde(default)] enabled: Option<bool>, #[serde(default)] enabled_tools: Option<Vec<String>>, #[serde(default)] disabled_tools: Option<Vec<String>>, } let mut raw = RawMcpServerConfig::deserialize(deserializer)?; let startup_timeout_sec = match (raw.startup_timeout_sec, raw.startup_timeout_ms) { (Some(sec), _) => { let duration = Duration::try_from_secs_f64(sec).map_err(SerdeError::custom)?; Some(duration) } (None, Some(ms)) => Some(Duration::from_millis(ms)), (None, None) => None, }; let tool_timeout_sec = raw.tool_timeout_sec; let enabled = raw.enabled.unwrap_or_else(default_enabled); let enabled_tools = raw.enabled_tools.clone(); let disabled_tools = raw.disabled_tools.clone(); fn throw_if_set<E, T>(transport: &str, field: &str, value: Option<&T>) -> Result<(), E> where E: SerdeError, { if value.is_none() { return Ok(()); } Err(E::custom(format!( "{field} is not supported for {transport}", ))) } let transport = if let Some(command) = raw.command.clone() { throw_if_set("stdio", "url", raw.url.as_ref())?; throw_if_set( "stdio", "bearer_token_env_var", raw.bearer_token_env_var.as_ref(), )?; throw_if_set("stdio", "bearer_token", raw.bearer_token.as_ref())?; throw_if_set("stdio", "http_headers", raw.http_headers.as_ref())?; throw_if_set("stdio", "env_http_headers", raw.env_http_headers.as_ref())?; McpServerTransportConfig::Stdio { command, args: raw.args.clone().unwrap_or_default(), env: raw.env.clone(), env_vars: raw.env_vars.clone().unwrap_or_default(), cwd: raw.cwd.take(), } } else if let Some(url) = raw.url.clone() { throw_if_set("streamable_http", "args", raw.args.as_ref())?; throw_if_set("streamable_http", "env", raw.env.as_ref())?; throw_if_set("streamable_http", "env_vars", raw.env_vars.as_ref())?; throw_if_set("streamable_http", "cwd", raw.cwd.as_ref())?; throw_if_set("streamable_http", "bearer_token", raw.bearer_token.as_ref())?; McpServerTransportConfig::StreamableHttp { url, bearer_token_env_var: raw.bearer_token_env_var.clone(), http_headers: raw.http_headers.clone(), env_http_headers: raw.env_http_headers.take(), } } else { return Err(SerdeError::custom("invalid transport")); }; Ok(Self { transport, startup_timeout_sec, tool_timeout_sec, enabled, enabled_tools, disabled_tools, }) } } const fn default_enabled() -> bool { true } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] #[serde(untagged, deny_unknown_fields, rename_all = "snake_case")] pub enum McpServerTransportConfig { /// https://modelcontextprotocol.io/specification/2025-06-18/basic/transports#stdio Stdio { command: String, #[serde(default)] args: Vec<String>, #[serde(default, skip_serializing_if = "Option::is_none")] env: Option<HashMap<String, String>>, #[serde(default, skip_serializing_if = "Vec::is_empty")] env_vars: Vec<String>, #[serde(default, skip_serializing_if = "Option::is_none")] cwd: Option<PathBuf>, }, /// https://modelcontextprotocol.io/specification/2025-06-18/basic/transports#streamable-http StreamableHttp { url: String, /// Name of the environment variable to read for an HTTP bearer token. /// When set, requests will include the token via `Authorization: Bearer <token>`. /// The actual secret value must be provided via the environment. #[serde(default, skip_serializing_if = "Option::is_none")] bearer_token_env_var: Option<String>, /// Additional HTTP headers to include in requests to this server. #[serde(default, skip_serializing_if = "Option::is_none")] http_headers: Option<HashMap<String, String>>, /// HTTP headers where the value is sourced from an environment variable. #[serde(default, skip_serializing_if = "Option::is_none")] env_http_headers: Option<HashMap<String, String>>, }, } mod option_duration_secs { use serde::Deserialize; use serde::Deserializer; use serde::Serializer; use std::time::Duration; pub fn serialize<S>(value: &Option<Duration>, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match value { Some(duration) => serializer.serialize_some(&duration.as_secs_f64()), None => serializer.serialize_none(), } } pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<Duration>, D::Error> where D: Deserializer<'de>, { let secs = Option::<f64>::deserialize(deserializer)?; secs.map(|secs| Duration::try_from_secs_f64(secs).map_err(serde::de::Error::custom)) .transpose() } } #[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq)] pub enum UriBasedFileOpener { #[serde(rename = "vscode")] VsCode, #[serde(rename = "vscode-insiders")] VsCodeInsiders, #[serde(rename = "windsurf")] Windsurf, #[serde(rename = "cursor")] Cursor, /// Option to disable the URI-based file opener. #[serde(rename = "none")] None, } impl UriBasedFileOpener { pub fn get_scheme(&self) -> Option<&str> { match self { UriBasedFileOpener::VsCode => Some("vscode"), UriBasedFileOpener::VsCodeInsiders => Some("vscode-insiders"), UriBasedFileOpener::Windsurf => Some("windsurf"), UriBasedFileOpener::Cursor => Some("cursor"), UriBasedFileOpener::None => None, } } } /// Settings that govern if and what will be written to `~/.codex/history.jsonl`. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)] pub struct History { /// If true, history entries will not be written to disk. pub persistence: HistoryPersistence, /// If set, the maximum size of the history file in bytes. The oldest entries /// are dropped once the file exceeds this limit. pub max_bytes: Option<usize>, } #[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Default)] #[serde(rename_all = "kebab-case")] pub enum HistoryPersistence { /// Save all history entries to disk. #[default] SaveAll, /// Do not write history to disk. None, } // ===== OTEL configuration ===== #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] pub enum OtelHttpProtocol { /// Binary payload Binary, /// JSON payload Json, } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)] #[serde(rename_all = "kebab-case")] pub struct OtelTlsConfig { pub ca_certificate: Option<AbsolutePathBuf>, pub client_certificate: Option<AbsolutePathBuf>, pub client_private_key: Option<AbsolutePathBuf>, } /// Which OTEL exporter to use. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] pub enum OtelExporterKind { None, OtlpHttp { endpoint: String, #[serde(default)] headers: HashMap<String, String>, protocol: OtelHttpProtocol, #[serde(default)] tls: Option<OtelTlsConfig>, }, OtlpGrpc { endpoint: String, #[serde(default)] headers: HashMap<String, String>, #[serde(default)] tls: Option<OtelTlsConfig>, }, } /// OTEL settings loaded from config.toml. Fields are optional so we can apply defaults. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)] pub struct OtelConfigToml { /// Log user prompt in traces pub log_user_prompt: Option<bool>, /// Mark traces with environment (dev, staging, prod, test). Defaults to dev. pub environment: Option<String>, /// Optional log exporter pub exporter: Option<OtelExporterKind>, /// Optional trace exporter pub trace_exporter: Option<OtelExporterKind>, } /// Effective OTEL settings after defaults are applied. #[derive(Debug, Clone, PartialEq)] pub struct OtelConfig { pub log_user_prompt: bool, pub environment: String, pub exporter: OtelExporterKind, pub trace_exporter: OtelExporterKind, } impl Default for OtelConfig { fn default() -> Self { OtelConfig { log_user_prompt: false, environment: DEFAULT_OTEL_ENVIRONMENT.to_owned(), exporter: OtelExporterKind::None, trace_exporter: OtelExporterKind::None, } } } #[derive(Serialize, Debug, Clone, PartialEq, Eq, Deserialize)] #[serde(untagged)] pub enum Notifications { Enabled(bool), Custom(Vec<String>), } impl Default for Notifications { fn default() -> Self { Self::Enabled(true) } } /// How TUI2 should interpret mouse scroll events. /// /// Terminals generally encode both mouse wheels and trackpads as the same "scroll up/down" mouse /// button events, without a magnitude. This setting controls whether Codex uses a heuristic to /// infer wheel vs trackpad per stream, or forces a specific behavior. #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)] #[serde(rename_all = "snake_case")] pub enum ScrollInputMode { /// Infer wheel vs trackpad behavior per scroll stream. Auto, /// Always treat scroll events as mouse-wheel input (fixed lines per tick). Wheel, /// Always treat scroll events as trackpad input (fractional accumulation). Trackpad, } impl Default for ScrollInputMode { fn default() -> Self { Self::Auto } } /// Collection of settings that are specific to the TUI. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)] pub struct Tui { /// Enable desktop notifications from the TUI when the terminal is unfocused. /// Defaults to `true`. #[serde(default)] pub notifications: Notifications, /// Enable animations (welcome screen, shimmer effects, spinners). /// Defaults to `true`. #[serde(default = "default_true")] pub animations: bool, /// Show startup tooltips in the TUI welcome screen. /// Defaults to `true`. #[serde(default = "default_true")] pub show_tooltips: bool, /// Override the *wheel* event density used to normalize TUI2 scrolling. /// /// Terminals generally deliver both mouse wheels and trackpads as discrete `scroll up/down` /// mouse events with direction but no magnitude. Unfortunately, the *number* of raw events /// per physical wheel notch varies by terminal (commonly 1, 3, or 9+). TUI2 uses this value /// to normalize that raw event density into consistent "wheel tick" behavior. /// /// Wheel math (conceptually): /// /// - A single event contributes `1 / scroll_events_per_tick` tick-equivalents. /// - Wheel-like streams then scale that by `scroll_wheel_lines` so one physical notch scrolls /// a fixed number of lines. /// /// Trackpad math is intentionally *not* fully tied to this value: in trackpad-like mode, TUI2 /// uses `min(scroll_events_per_tick, 3)` as the divisor so terminals with dense wheel ticks /// (e.g. 9 events per notch) do not make trackpads feel artificially slow. /// /// Defaults are derived per terminal from [`crate::terminal::TerminalInfo`] when TUI2 starts. /// See `codex-rs/tui2/docs/scroll_input_model.md` for the probe data and rationale. pub scroll_events_per_tick: Option<u16>, /// Override how many transcript lines one physical *wheel notch* should scroll in TUI2. /// /// This is the "classic feel" knob. Defaults to 3. /// /// Wheel-like per-event contribution is `scroll_wheel_lines / scroll_events_per_tick`. For /// example, in a terminal that emits 9 events per notch, the default `3 / 9` yields 1/3 of a /// line per event and totals 3 lines once the full notch burst arrives. /// /// See `codex-rs/tui2/docs/scroll_input_model.md` for details on the stream model and the /// wheel/trackpad heuristic. pub scroll_wheel_lines: Option<u16>, /// Override baseline trackpad scroll sensitivity in TUI2. /// /// Trackpads do not have discrete notches, but terminals still emit discrete `scroll up/down` /// events. In trackpad-like mode, TUI2 accumulates fractional scroll and only applies whole /// lines to the viewport. /// /// Trackpad per-event contribution is: /// /// - `scroll_trackpad_lines / min(scroll_events_per_tick, 3)` /// /// (plus optional bounded acceleration; see `scroll_trackpad_accel_*`). The `min(..., 3)` /// divisor is deliberate: `scroll_events_per_tick` is calibrated from *wheel* behavior and /// can be much larger than trackpad event density, which would otherwise make trackpads feel /// too slow in dense-wheel terminals. /// /// Defaults to 1, meaning one tick-equivalent maps to one transcript line. pub scroll_trackpad_lines: Option<u16>, /// Trackpad acceleration: approximate number of events required to gain +1x speed in TUI2. /// /// This keeps small swipes precise while allowing large/faster swipes to cover more content. /// Defaults are chosen to address terminals where trackpad event density is comparatively low. /// /// Concretely, TUI2 computes an acceleration multiplier for trackpad-like streams: /// /// - `multiplier = clamp(1 + abs(events) / scroll_trackpad_accel_events, 1..scroll_trackpad_accel_max)` /// /// The multiplier is applied to the stream’s computed line delta (including any carried /// fractional remainder). pub scroll_trackpad_accel_events: Option<u16>, /// Trackpad acceleration: maximum multiplier applied to trackpad-like streams. /// /// Set to 1 to effectively disable trackpad acceleration. /// /// See [`Tui::scroll_trackpad_accel_events`] for the exact multiplier formula. pub scroll_trackpad_accel_max: Option<u16>, /// Select how TUI2 interprets mouse scroll input. /// /// - `auto` (default): infer wheel vs trackpad per scroll stream. /// - `wheel`: always use wheel behavior (fixed lines per wheel notch). /// - `trackpad`: always use trackpad behavior (fractional accumulation; wheel may feel slow). #[serde(default)] pub scroll_mode: ScrollInputMode, /// Auto-mode threshold: maximum time (ms) for the first tick-worth of events to arrive. /// /// In `scroll_mode = "auto"`, TUI2 starts a stream as trackpad-like (to avoid overshoot) and /// promotes it to wheel-like if `scroll_events_per_tick` events arrive "quickly enough". This /// threshold controls what "quickly enough" means. /// /// Most users should leave this unset; it is primarily for terminals that emit wheel ticks /// batched over longer time spans. pub scroll_wheel_tick_detect_max_ms: Option<u64>, /// Auto-mode fallback: maximum duration (ms) that a very small stream is still treated as wheel-like. /// /// This is only used when `scroll_events_per_tick` is effectively 1 (one event per wheel /// notch). In that case, we cannot observe a "tick completion time", so TUI2 treats a /// short-lived, small stream (<= 2 events) as wheel-like to preserve classic wheel behavior. pub scroll_wheel_like_max_duration_ms: Option<u64>, /// Invert mouse scroll direction in TUI2. /// /// This flips the scroll sign after terminal detection. It is applied consistently to both /// wheel and trackpad input. #[serde(default)] pub scroll_invert: bool, } const fn default_true() -> bool { true } /// Settings for notices we display to users via the tui and app-server clients /// (primarily the Codex IDE extension). NOTE: these are different from /// notifications - notices are warnings, NUX screens, acknowledgements, etc. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)] pub struct Notice { /// Tracks whether the user has acknowledged the full access warning prompt. pub hide_full_access_warning: Option<bool>, /// Tracks whether the user has acknowledged the Windows world-writable directories warning. pub hide_world_writable_warning: Option<bool>, /// Tracks whether the user opted out of the rate limit model switch reminder. pub hide_rate_limit_model_nudge: Option<bool>, /// Tracks whether the user has seen the model migration prompt pub hide_gpt5_1_migration_prompt: Option<bool>, /// Tracks whether the user has seen the gpt-5.1-codex-max migration prompt #[serde(rename = "hide_gpt-5.1-codex-max_migration_prompt")] pub hide_gpt_5_1_codex_max_migration_prompt: Option<bool>, /// Tracks acknowledged model migrations as old->new model slug mappings. #[serde(default)] pub model_migrations: BTreeMap<String, String>, } impl Notice { /// referenced by config_edit helpers when writing notice flags pub(crate) const TABLE_KEY: &'static str = "notice"; } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)] pub struct SandboxWorkspaceWrite { #[serde(default)] pub writable_roots: Vec<AbsolutePathBuf>, #[serde(default)] pub network_access: bool, #[serde(default)] pub exclude_tmpdir_env_var: bool, #[serde(default)] pub exclude_slash_tmp: bool, } impl From<SandboxWorkspaceWrite> for codex_app_server_protocol::SandboxSettings { fn from(sandbox_workspace_write: SandboxWorkspaceWrite) -> Self { Self { writable_roots: sandbox_workspace_write.writable_roots, network_access: Some(sandbox_workspace_write.network_access), exclude_tmpdir_env_var: Some(sandbox_workspace_write.exclude_tmpdir_env_var), exclude_slash_tmp: Some(sandbox_workspace_write.exclude_slash_tmp), } } } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)] #[serde(rename_all = "kebab-case")] pub enum ShellEnvironmentPolicyInherit { /// "Core" environment variables for the platform. On UNIX, this would /// include HOME, LOGNAME, PATH, SHELL, and USER, among others. Core, /// Inherits the full environment from the parent process. #[default] All, /// Do not inherit any environment variables from the parent process. None, } /// Policy for building the `env` when spawning a process via either the /// `shell` or `local_shell` tool. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)] pub struct ShellEnvironmentPolicyToml { pub inherit: Option<ShellEnvironmentPolicyInherit>, pub ignore_default_excludes: Option<bool>, /// List of regular expressions. pub exclude: Option<Vec<String>>, pub r#set: Option<HashMap<String, String>>, /// List of regular expressions. pub include_only: Option<Vec<String>>, pub experimental_use_profile: Option<bool>, } pub type EnvironmentVariablePattern = WildMatchPattern<'*', '?'>; /// Deriving the `env` based on this policy works as follows: /// 1. Create an initial map based on the `inherit` policy. /// 2. If `ignore_default_excludes` is false, filter the map using the default /// exclude pattern(s), which are: `"*KEY*"`, `"*SECRET*"`, and `"*TOKEN*"`. /// 3. If `exclude` is not empty, filter the map using the provided patterns. /// 4. Insert any entries from `r#set` into the map. /// 5. If non-empty, filter the map using the `include_only` patterns. #[derive(Debug, Clone, PartialEq)] pub struct ShellEnvironmentPolicy { /// Starting point when building the environment. pub inherit: ShellEnvironmentPolicyInherit, /// True to skip the check to exclude default environment variables that /// contain "KEY", "SECRET", or "TOKEN" in their name. Defaults to true. pub ignore_default_excludes: bool, /// Environment variable names to exclude from the environment. pub exclude: Vec<EnvironmentVariablePattern>, /// (key, value) pairs to insert in the environment. pub r#set: HashMap<String, String>, /// Environment variable names to retain in the environment. pub include_only: Vec<EnvironmentVariablePattern>, /// If true, the shell profile will be used to run the command. pub use_profile: bool, } impl From<ShellEnvironmentPolicyToml> for ShellEnvironmentPolicy { fn from(toml: ShellEnvironmentPolicyToml) -> Self { // Default to inheriting the full environment when not specified. let inherit = toml.inherit.unwrap_or(ShellEnvironmentPolicyInherit::All); let ignore_default_excludes = toml.ignore_default_excludes.unwrap_or(true); let exclude = toml .exclude .unwrap_or_default() .into_iter() .map(|s| EnvironmentVariablePattern::new_case_insensitive(&s)) .collect(); let r#set = toml.r#set.unwrap_or_default(); let include_only = toml .include_only .unwrap_or_default() .into_iter() .map(|s| EnvironmentVariablePattern::new_case_insensitive(&s)) .collect(); let use_profile = toml.experimental_use_profile.unwrap_or(false); Self { inherit, ignore_default_excludes, exclude, r#set, include_only, use_profile, } } } impl Default for ShellEnvironmentPolicy { fn default() -> Self { Self { inherit: ShellEnvironmentPolicyInherit::All, ignore_default_excludes: true, exclude: Vec::new(), r#set: HashMap::new(), include_only: Vec::new(), use_profile: false, } } } #[cfg(test)] mod tests { use super::*; use pretty_assertions::assert_eq; #[test] fn deserialize_stdio_command_server_config() { let cfg: McpServerConfig = toml::from_str( r#" command = "echo" "#, ) .expect("should deserialize command config"); assert_eq!( cfg.transport, McpServerTransportConfig::Stdio { command: "echo".to_string(), args: vec![], env: None, env_vars: Vec::new(), cwd: None, } ); assert!(cfg.enabled); assert!(cfg.enabled_tools.is_none()); assert!(cfg.disabled_tools.is_none()); } #[test] fn deserialize_stdio_command_server_config_with_args() { let cfg: McpServerConfig = toml::from_str( r#" command = "echo" args = ["hello", "world"] "#, ) .expect("should deserialize command config"); assert_eq!( cfg.transport, McpServerTransportConfig::Stdio { command: "echo".to_string(), args: vec!["hello".to_string(), "world".to_string()], env: None, env_vars: Vec::new(), cwd: None, } ); assert!(cfg.enabled); } #[test] fn deserialize_stdio_command_server_config_with_arg_with_args_and_env() { let cfg: McpServerConfig = toml::from_str( r#" command = "echo" args = ["hello", "world"] env = { "FOO" = "BAR" } "#, ) .expect("should deserialize command config"); assert_eq!( cfg.transport, McpServerTransportConfig::Stdio { command: "echo".to_string(), args: vec!["hello".to_string(), "world".to_string()], env: Some(HashMap::from([("FOO".to_string(), "BAR".to_string())])), env_vars: Vec::new(), cwd: None, } ); assert!(cfg.enabled); } #[test] fn deserialize_stdio_command_server_config_with_env_vars() { let cfg: McpServerConfig = toml::from_str( r#" command = "echo" env_vars = ["FOO", "BAR"] "#, ) .expect("should deserialize command config with env_vars"); assert_eq!( cfg.transport, McpServerTransportConfig::Stdio { command: "echo".to_string(), args: vec![], env: None, env_vars: vec!["FOO".to_string(), "BAR".to_string()], cwd: None, } ); } #[test] fn deserialize_stdio_command_server_config_with_cwd() { let cfg: McpServerConfig = toml::from_str( r#" command = "echo" cwd = "/tmp" "#, ) .expect("should deserialize command config with cwd"); assert_eq!( cfg.transport, McpServerTransportConfig::Stdio { command: "echo".to_string(), args: vec![], env: None, env_vars: Vec::new(), cwd: Some(PathBuf::from("/tmp")), } ); } #[test] fn deserialize_disabled_server_config() { let cfg: McpServerConfig = toml::from_str( r#" command = "echo" enabled = false "#, ) .expect("should deserialize disabled server config"); assert!(!cfg.enabled); } #[test] fn deserialize_streamable_http_server_config() { let cfg: McpServerConfig = toml::from_str( r#" url = "https://example.com/mcp" "#, ) .expect("should deserialize http config"); assert_eq!( cfg.transport, McpServerTransportConfig::StreamableHttp { url: "https://example.com/mcp".to_string(), bearer_token_env_var: None, http_headers: None, env_http_headers: None, } ); assert!(cfg.enabled); } #[test] fn deserialize_streamable_http_server_config_with_env_var() { let cfg: McpServerConfig = toml::from_str( r#" url = "https://example.com/mcp" bearer_token_env_var = "GITHUB_TOKEN" "#, ) .expect("should deserialize http config"); assert_eq!( cfg.transport, McpServerTransportConfig::StreamableHttp { url: "https://example.com/mcp".to_string(), bearer_token_env_var: Some("GITHUB_TOKEN".to_string()), http_headers: None, env_http_headers: None, } ); assert!(cfg.enabled); } #[test] fn deserialize_streamable_http_server_config_with_headers() { let cfg: McpServerConfig = toml::from_str( r#" url = "https://example.com/mcp" http_headers = { "X-Foo" = "bar" } env_http_headers = { "X-Token" = "TOKEN_ENV" } "#, ) .expect("should deserialize http config with headers"); assert_eq!( cfg.transport, McpServerTransportConfig::StreamableHttp { url: "https://example.com/mcp".to_string(), bearer_token_env_var: None, http_headers: Some(HashMap::from([("X-Foo".to_string(), "bar".to_string())])), env_http_headers: Some(HashMap::from([( "X-Token".to_string(), "TOKEN_ENV".to_string() )])), } ); } #[test] fn deserialize_server_config_with_tool_filters() { let cfg: McpServerConfig = toml::from_str( r#" command = "echo" enabled_tools = ["allowed"] disabled_tools = ["blocked"] "#, ) .expect("should deserialize tool filters"); assert_eq!(cfg.enabled_tools, Some(vec!["allowed".to_string()])); assert_eq!(cfg.disabled_tools, Some(vec!["blocked".to_string()])); } #[test] fn deserialize_rejects_command_and_url() { toml::from_str::<McpServerConfig>( r#" command = "echo" url = "https://example.com" "#, ) .expect_err("should reject command+url"); } #[test] fn deserialize_rejects_env_for_http_transport() { toml::from_str::<McpServerConfig>( r#" url = "https://example.com" env = { "FOO" = "BAR" } "#, ) .expect_err("should reject env for http transport"); } #[test] fn deserialize_rejects_headers_for_stdio() { toml::from_str::<McpServerConfig>( r#" command = "echo" http_headers = { "X-Foo" = "bar" } "#, ) .expect_err("should reject http_headers for stdio transport"); toml::from_str::<McpServerConfig>( r#" command = "echo" env_http_headers = { "X-Foo" = "BAR_ENV" } "#, ) .expect_err("should reject env_http_headers for stdio transport"); } #[test] fn deserialize_rejects_inline_bearer_token_field() { let err = toml::from_str::<McpServerConfig>( r#" url = "https://example.com" bearer_token = "secret" "#, )
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/config/mod.rs
codex-rs/core/src/config/mod.rs
use crate::auth::AuthCredentialsStoreMode; use crate::config::types::DEFAULT_OTEL_ENVIRONMENT; use crate::config::types::History; use crate::config::types::McpServerConfig; use crate::config::types::Notice; use crate::config::types::Notifications; use crate::config::types::OtelConfig; use crate::config::types::OtelConfigToml; use crate::config::types::OtelExporterKind; use crate::config::types::SandboxWorkspaceWrite; use crate::config::types::ScrollInputMode; use crate::config::types::ShellEnvironmentPolicy; use crate::config::types::ShellEnvironmentPolicyToml; use crate::config::types::Tui; use crate::config::types::UriBasedFileOpener; use crate::config_loader::ConfigLayerStack; use crate::config_loader::ConfigRequirements; use crate::config_loader::LoaderOverrides; use crate::config_loader::load_config_layers_state; use crate::features::Feature; use crate::features::FeatureOverrides; use crate::features::Features; use crate::features::FeaturesToml; use crate::git_info::resolve_root_git_project_for_trust; use crate::model_provider_info::LMSTUDIO_OSS_PROVIDER_ID; use crate::model_provider_info::ModelProviderInfo; use crate::model_provider_info::OLLAMA_OSS_PROVIDER_ID; use crate::model_provider_info::built_in_model_providers; use crate::project_doc::DEFAULT_PROJECT_DOC_FILENAME; use crate::project_doc::LOCAL_PROJECT_DOC_FILENAME; use crate::protocol::AskForApproval; use crate::protocol::SandboxPolicy; use codex_app_server_protocol::Tools; use codex_app_server_protocol::UserSavedConfig; use codex_protocol::config_types::ForcedLoginMethod; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::config_types::SandboxMode; use codex_protocol::config_types::TrustLevel; use codex_protocol::config_types::Verbosity; use codex_protocol::openai_models::ReasoningEffort; use codex_rmcp_client::OAuthCredentialsStoreMode; use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_absolute_path::AbsolutePathBufGuard; use dirs::home_dir; use serde::Deserialize; use serde::Serialize; use similar::DiffableStr; use std::collections::BTreeMap; use std::collections::HashMap; use std::io::ErrorKind; use std::path::Path; use std::path::PathBuf; #[cfg(test)] use tempfile::tempdir; use crate::config::profile::ConfigProfile; use toml::Value as TomlValue; use toml_edit::DocumentMut; mod constraint; pub mod edit; pub mod profile; pub mod service; pub mod types; pub use constraint::Constrained; pub use constraint::ConstraintError; pub use constraint::ConstraintResult; pub use service::ConfigService; pub use service::ConfigServiceError; const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5.1-codex-max"; pub use codex_git::GhostSnapshotConfig; /// Maximum number of bytes of the documentation that will be embedded. Larger /// files are *silently truncated* to this size so we do not take up too much of /// the context window. pub(crate) const PROJECT_DOC_MAX_BYTES: usize = 32 * 1024; // 32 KiB pub const CONFIG_TOML_FILE: &str = "config.toml"; #[cfg(test)] pub(crate) fn test_config() -> Config { let codex_home = tempdir().expect("create temp dir"); Config::load_from_base_config_with_overrides( ConfigToml::default(), ConfigOverrides::default(), codex_home.path().to_path_buf(), ) .expect("load default test config") } /// Application configuration loaded from disk and merged with overrides. #[derive(Debug, Clone, PartialEq)] pub struct Config { /// Provenance for how this [`Config`] was derived (merged layers + enforced /// requirements). pub config_layer_stack: ConfigLayerStack, /// Optional override of model selection. pub model: Option<String>, /// Model used specifically for review sessions. Defaults to "gpt-5.1-codex-max". pub review_model: String, /// Size of the context window for the model, in tokens. pub model_context_window: Option<i64>, /// Token usage threshold triggering auto-compaction of conversation history. pub model_auto_compact_token_limit: Option<i64>, /// Key into the model_providers map that specifies which provider to use. pub model_provider_id: String, /// Info needed to make an API request to the model. pub model_provider: ModelProviderInfo, /// Approval policy for executing commands. pub approval_policy: Constrained<AskForApproval>, pub sandbox_policy: Constrained<SandboxPolicy>, /// True if the user passed in an override or set a value in config.toml /// for either of approval_policy or sandbox_mode. pub did_user_set_custom_approval_policy_or_sandbox_mode: bool, /// On Windows, indicates that a previously configured workspace-write sandbox /// was coerced to read-only because native auto mode is unsupported. pub forced_auto_mode_downgraded_on_windows: bool, pub shell_environment_policy: ShellEnvironmentPolicy, /// When `true`, `AgentReasoning` events emitted by the backend will be /// suppressed from the frontend output. This can reduce visual noise when /// users are only interested in the final agent responses. pub hide_agent_reasoning: bool, /// When set to `true`, `AgentReasoningRawContentEvent` events will be shown in the UI/output. /// Defaults to `false`. pub show_raw_agent_reasoning: bool, /// User-provided instructions from AGENTS.md. pub user_instructions: Option<String>, /// Base instructions override. pub base_instructions: Option<String>, /// Developer instructions override injected as a separate message. pub developer_instructions: Option<String>, /// Compact prompt override. pub compact_prompt: Option<String>, /// Optional external notifier command. When set, Codex will spawn this /// program after each completed *turn* (i.e. when the agent finishes /// processing a user submission). The value must be the full command /// broken into argv tokens **without** the trailing JSON argument - Codex /// appends one extra argument containing a JSON payload describing the /// event. /// /// Example `~/.codex/config.toml` snippet: /// /// ```toml /// notify = ["notify-send", "Codex"] /// ``` /// /// which will be invoked as: /// /// ```shell /// notify-send Codex '{"type":"agent-turn-complete","turn-id":"12345"}' /// ``` /// /// If unset the feature is disabled. pub notify: Option<Vec<String>>, /// TUI notifications preference. When set, the TUI will send OSC 9 notifications on approvals /// and turn completions when not focused. pub tui_notifications: Notifications, /// Enable ASCII animations and shimmer effects in the TUI. pub animations: bool, /// Show startup tooltips in the TUI welcome screen. pub show_tooltips: bool, /// Override the events-per-wheel-tick factor for TUI2 scroll normalization. /// /// This is the same `tui.scroll_events_per_tick` value from `config.toml`, plumbed through the /// merged [`Config`] object (see [`Tui`]) so TUI2 can normalize scroll event density per /// terminal. pub tui_scroll_events_per_tick: Option<u16>, /// Override the number of lines applied per wheel tick in TUI2. /// /// This is the same `tui.scroll_wheel_lines` value from `config.toml` (see [`Tui`]). TUI2 /// applies it to wheel-like scroll streams. Trackpad-like scrolling uses a separate /// `tui.scroll_trackpad_lines` setting. pub tui_scroll_wheel_lines: Option<u16>, /// Override the number of lines per tick-equivalent used for trackpad scrolling in TUI2. /// /// This is the same `tui.scroll_trackpad_lines` value from `config.toml` (see [`Tui`]). pub tui_scroll_trackpad_lines: Option<u16>, /// Trackpad acceleration: approximate number of events required to gain +1x speed in TUI2. /// /// This is the same `tui.scroll_trackpad_accel_events` value from `config.toml` (see [`Tui`]). pub tui_scroll_trackpad_accel_events: Option<u16>, /// Trackpad acceleration: maximum multiplier applied to trackpad-like streams in TUI2. /// /// This is the same `tui.scroll_trackpad_accel_max` value from `config.toml` (see [`Tui`]). pub tui_scroll_trackpad_accel_max: Option<u16>, /// Control how TUI2 interprets mouse scroll input (wheel vs trackpad). /// /// This is the same `tui.scroll_mode` value from `config.toml` (see [`Tui`]). pub tui_scroll_mode: ScrollInputMode, /// Override the wheel tick detection threshold (ms) for TUI2 auto scroll mode. /// /// This is the same `tui.scroll_wheel_tick_detect_max_ms` value from `config.toml` (see /// [`Tui`]). pub tui_scroll_wheel_tick_detect_max_ms: Option<u64>, /// Override the wheel-like end-of-stream threshold (ms) for TUI2 auto scroll mode. /// /// This is the same `tui.scroll_wheel_like_max_duration_ms` value from `config.toml` (see /// [`Tui`]). pub tui_scroll_wheel_like_max_duration_ms: Option<u64>, /// Invert mouse scroll direction for TUI2. /// /// This is the same `tui.scroll_invert` value from `config.toml` (see [`Tui`]) and is applied /// consistently to both mouse wheels and trackpads. pub tui_scroll_invert: bool, /// The directory that should be treated as the current working directory /// for the session. All relative paths inside the business-logic layer are /// resolved against this path. pub cwd: PathBuf, /// Preferred store for CLI auth credentials. /// file (default): Use a file in the Codex home directory. /// keyring: Use an OS-specific keyring service. /// auto: Use the OS-specific keyring service if available, otherwise use a file. pub cli_auth_credentials_store_mode: AuthCredentialsStoreMode, /// Definition for MCP servers that Codex can reach out to for tool calls. pub mcp_servers: HashMap<String, McpServerConfig>, /// Preferred store for MCP OAuth credentials. /// keyring: Use an OS-specific keyring service. /// Credentials stored in the keyring will only be readable by Codex unless the user explicitly grants access via OS-level keyring access. /// https://github.com/openai/codex/blob/main/codex-rs/rmcp-client/src/oauth.rs#L2 /// file: CODEX_HOME/.credentials.json /// This file will be readable to Codex and other applications running as the same user. /// auto (default): keyring if available, otherwise file. pub mcp_oauth_credentials_store_mode: OAuthCredentialsStoreMode, /// Combined provider map (defaults merged with user-defined overrides). pub model_providers: HashMap<String, ModelProviderInfo>, /// Maximum number of bytes to include from an AGENTS.md project doc file. pub project_doc_max_bytes: usize, /// Additional filenames to try when looking for project-level docs. pub project_doc_fallback_filenames: Vec<String>, // todo(aibrahim): this should be used in the override model family /// Token budget applied when storing tool/function outputs in the context manager. pub tool_output_token_limit: Option<usize>, /// Directory containing all Codex state (defaults to `~/.codex` but can be /// overridden by the `CODEX_HOME` environment variable). pub codex_home: PathBuf, /// Settings that govern if and what will be written to `~/.codex/history.jsonl`. pub history: History, /// Optional URI-based file opener. If set, citations to files in the model /// output will be hyperlinked using the specified URI scheme. pub file_opener: UriBasedFileOpener, /// Path to the `codex-linux-sandbox` executable. This must be set if /// [`crate::exec::SandboxType::LinuxSeccomp`] is used. Note that this /// cannot be set in the config file: it must be set in code via /// [`ConfigOverrides`]. /// /// When this program is invoked, arg0 will be set to `codex-linux-sandbox`. pub codex_linux_sandbox_exe: Option<PathBuf>, /// Value to use for `reasoning.effort` when making a request using the /// Responses API. pub model_reasoning_effort: Option<ReasoningEffort>, /// If not "none", the value to use for `reasoning.summary` when making a /// request using the Responses API. pub model_reasoning_summary: ReasoningSummary, /// Optional override to force-enable reasoning summaries for the configured model. pub model_supports_reasoning_summaries: Option<bool>, /// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`). pub model_verbosity: Option<Verbosity>, /// Base URL for requests to ChatGPT (as opposed to the OpenAI API). pub chatgpt_base_url: String, /// When set, restricts ChatGPT login to a specific workspace identifier. pub forced_chatgpt_workspace_id: Option<String>, /// When set, restricts the login mechanism users may use. pub forced_login_method: Option<ForcedLoginMethod>, /// Include the `apply_patch` tool for models that benefit from invoking /// file edits as a structured tool call. When unset, this falls back to the /// model family's default preference. pub include_apply_patch_tool: bool, pub tools_web_search_request: bool, /// If set to `true`, used only the experimental unified exec tool. pub use_experimental_unified_exec_tool: bool, /// Settings for ghost snapshots (used for undo). pub ghost_snapshot: GhostSnapshotConfig, /// Centralized feature flags; source of truth for feature gating. pub features: Features, /// The active profile name used to derive this `Config` (if any). pub active_profile: Option<String>, /// The currently active project config, resolved by checking if cwd: /// is (1) part of a git repo, (2) a git worktree, or (3) just using the cwd pub active_project: ProjectConfig, /// Tracks whether the Windows onboarding screen has been acknowledged. pub windows_wsl_setup_acknowledged: bool, /// Collection of various notices we show the user pub notices: Notice, /// When `true`, checks for Codex updates on startup and surfaces update prompts. /// Set to `false` only if your Codex updates are centrally managed. /// Defaults to `true`. pub check_for_update_on_startup: bool, /// When true, disables burst-paste detection for typed input entirely. /// All characters are inserted as they are received, and no buffering /// or placeholder replacement will occur for fast keypress bursts. pub disable_paste_burst: bool, /// OTEL configuration (exporter type, endpoint, headers, etc.). pub otel: crate::config::types::OtelConfig, } #[derive(Debug, Clone, Default)] pub struct ConfigBuilder { codex_home: Option<PathBuf>, cli_overrides: Option<Vec<(String, TomlValue)>>, harness_overrides: Option<ConfigOverrides>, loader_overrides: Option<LoaderOverrides>, } impl ConfigBuilder { pub fn codex_home(mut self, codex_home: PathBuf) -> Self { self.codex_home = Some(codex_home); self } pub fn cli_overrides(mut self, cli_overrides: Vec<(String, TomlValue)>) -> Self { self.cli_overrides = Some(cli_overrides); self } pub fn harness_overrides(mut self, harness_overrides: ConfigOverrides) -> Self { self.harness_overrides = Some(harness_overrides); self } pub fn loader_overrides(mut self, loader_overrides: LoaderOverrides) -> Self { self.loader_overrides = Some(loader_overrides); self } pub async fn build(self) -> std::io::Result<Config> { let Self { codex_home, cli_overrides, harness_overrides, loader_overrides, } = self; let codex_home = codex_home.map_or_else(find_codex_home, std::io::Result::Ok)?; let cli_overrides = cli_overrides.unwrap_or_default(); let harness_overrides = harness_overrides.unwrap_or_default(); let loader_overrides = loader_overrides.unwrap_or_default(); let cwd = match harness_overrides.cwd.as_deref() { Some(path) => AbsolutePathBuf::try_from(path)?, None => AbsolutePathBuf::current_dir()?, }; let config_layer_stack = load_config_layers_state(&codex_home, Some(cwd), &cli_overrides, loader_overrides) .await?; let merged_toml = config_layer_stack.effective_config(); // Note that each layer in ConfigLayerStack should have resolved // relative paths to absolute paths based on the parent folder of the // respective config file, so we should be safe to deserialize without // AbsolutePathBufGuard here. let config_toml: ConfigToml = merged_toml .try_into() .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; Config::load_config_with_layer_stack( config_toml, harness_overrides, codex_home, config_layer_stack, ) } } impl Config { /// This is the preferred way to create an instance of [Config]. pub async fn load_with_cli_overrides( cli_overrides: Vec<(String, TomlValue)>, ) -> std::io::Result<Self> { ConfigBuilder::default() .cli_overrides(cli_overrides) .build() .await } /// This is a secondary way of creating [Config], which is appropriate when /// the harness is meant to be used with a specific configuration that /// ignores user settings. For example, the `codex exec` subcommand is /// designed to use [AskForApproval::Never] exclusively. /// /// Further, [ConfigOverrides] contains some options that are not supported /// in [ConfigToml], such as `cwd` and `codex_linux_sandbox_exe`. pub async fn load_with_cli_overrides_and_harness_overrides( cli_overrides: Vec<(String, TomlValue)>, harness_overrides: ConfigOverrides, ) -> std::io::Result<Self> { ConfigBuilder::default() .cli_overrides(cli_overrides) .harness_overrides(harness_overrides) .build() .await } } /// DEPRECATED: Use [Config::load_with_cli_overrides()] instead because working /// with [ConfigToml] directly means that [ConfigRequirements] have not been /// applied yet, which risks failing to enforce required constraints. pub async fn load_config_as_toml_with_cli_overrides( codex_home: &Path, cwd: &AbsolutePathBuf, cli_overrides: Vec<(String, TomlValue)>, ) -> std::io::Result<ConfigToml> { let config_layer_stack = load_config_layers_state( codex_home, Some(cwd.clone()), &cli_overrides, LoaderOverrides::default(), ) .await?; let merged_toml = config_layer_stack.effective_config(); let cfg = deserialize_config_toml_with_base(merged_toml, codex_home).map_err(|e| { tracing::error!("Failed to deserialize overridden config: {e}"); e })?; Ok(cfg) } fn deserialize_config_toml_with_base( root_value: TomlValue, config_base_dir: &Path, ) -> std::io::Result<ConfigToml> { // This guard ensures that any relative paths that is deserialized into an // [AbsolutePathBuf] is resolved against `config_base_dir`. let _guard = AbsolutePathBufGuard::new(config_base_dir); root_value .try_into() .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e)) } pub async fn load_global_mcp_servers( codex_home: &Path, ) -> std::io::Result<BTreeMap<String, McpServerConfig>> { // In general, Config::load_with_cli_overrides() should be used to load the // full config with requirements.toml applied, but in this case, we need // access to the raw TOML in order to warn the user about deprecated fields. // // Note that a more precise way to do this would be to audit the individual // config layers for deprecated fields rather than reporting on the merged // result. let cli_overrides = Vec::<(String, TomlValue)>::new(); // There is no cwd/project context for this query, so this will not include // MCP servers defined in in-repo .codex/ folders. let cwd: Option<AbsolutePathBuf> = None; let config_layer_stack = load_config_layers_state(codex_home, cwd, &cli_overrides, LoaderOverrides::default()) .await?; let merged_toml = config_layer_stack.effective_config(); let Some(servers_value) = merged_toml.get("mcp_servers") else { return Ok(BTreeMap::new()); }; ensure_no_inline_bearer_tokens(servers_value)?; servers_value .clone() .try_into() .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e)) } /// We briefly allowed plain text bearer_token fields in MCP server configs. /// We want to warn people who recently added these fields but can remove this after a few months. fn ensure_no_inline_bearer_tokens(value: &TomlValue) -> std::io::Result<()> { let Some(servers_table) = value.as_table() else { return Ok(()); }; for (server_name, server_value) in servers_table { if let Some(server_table) = server_value.as_table() && server_table.contains_key("bearer_token") { let message = format!( "mcp_servers.{server_name} uses unsupported `bearer_token`; set `bearer_token_env_var`." ); return Err(std::io::Error::new(ErrorKind::InvalidData, message)); } } Ok(()) } pub(crate) fn set_project_trust_level_inner( doc: &mut DocumentMut, project_path: &Path, trust_level: TrustLevel, ) -> anyhow::Result<()> { // Ensure we render a human-friendly structure: // // [projects] // [projects."/path/to/project"] // trust_level = "trusted" or "untrusted" // // rather than inline tables like: // // [projects] // "/path/to/project" = { trust_level = "trusted" } let project_key = project_path.to_string_lossy().to_string(); // Ensure top-level `projects` exists as a non-inline, explicit table. If it // exists but was previously represented as a non-table (e.g., inline), // replace it with an explicit table. { let root = doc.as_table_mut(); // If `projects` exists but isn't a standard table (e.g., it's an inline table), // convert it to an explicit table while preserving existing entries. let existing_projects = root.get("projects").cloned(); if existing_projects.as_ref().is_none_or(|i| !i.is_table()) { let mut projects_tbl = toml_edit::Table::new(); projects_tbl.set_implicit(true); // If there was an existing inline table, migrate its entries to explicit tables. if let Some(inline_tbl) = existing_projects.as_ref().and_then(|i| i.as_inline_table()) { for (k, v) in inline_tbl.iter() { if let Some(inner_tbl) = v.as_inline_table() { let new_tbl = inner_tbl.clone().into_table(); projects_tbl.insert(k, toml_edit::Item::Table(new_tbl)); } } } root.insert("projects", toml_edit::Item::Table(projects_tbl)); } } let Some(projects_tbl) = doc["projects"].as_table_mut() else { return Err(anyhow::anyhow!( "projects table missing after initialization" )); }; // Ensure the per-project entry is its own explicit table. If it exists but // is not a table (e.g., an inline table), replace it with an explicit table. let needs_proj_table = !projects_tbl.contains_key(project_key.as_str()) || projects_tbl .get(project_key.as_str()) .and_then(|i| i.as_table()) .is_none(); if needs_proj_table { projects_tbl.insert(project_key.as_str(), toml_edit::table()); } let Some(proj_tbl) = projects_tbl .get_mut(project_key.as_str()) .and_then(|i| i.as_table_mut()) else { return Err(anyhow::anyhow!("project table missing for {project_key}")); }; proj_tbl.set_implicit(false); proj_tbl["trust_level"] = toml_edit::value(trust_level.to_string()); Ok(()) } /// Patch `CODEX_HOME/config.toml` project state to set trust level. /// Use with caution. pub fn set_project_trust_level( codex_home: &Path, project_path: &Path, trust_level: TrustLevel, ) -> anyhow::Result<()> { use crate::config::edit::ConfigEditsBuilder; ConfigEditsBuilder::new(codex_home) .set_project_trust_level(project_path, trust_level) .apply_blocking() } /// Save the default OSS provider preference to config.toml pub fn set_default_oss_provider(codex_home: &Path, provider: &str) -> std::io::Result<()> { // Validate that the provider is one of the known OSS providers match provider { LMSTUDIO_OSS_PROVIDER_ID | OLLAMA_OSS_PROVIDER_ID => { // Valid provider, continue } _ => { return Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, format!( "Invalid OSS provider '{provider}'. Must be one of: {LMSTUDIO_OSS_PROVIDER_ID}, {OLLAMA_OSS_PROVIDER_ID}" ), )); } } let config_path = codex_home.join(CONFIG_TOML_FILE); // Read existing config or create empty string if file doesn't exist let content = match std::fs::read_to_string(&config_path) { Ok(content) => content, Err(e) if e.kind() == std::io::ErrorKind::NotFound => String::new(), Err(e) => return Err(e), }; // Parse as DocumentMut for editing while preserving structure let mut doc = content.parse::<DocumentMut>().map_err(|e| { std::io::Error::new( std::io::ErrorKind::InvalidData, format!("failed to parse config.toml: {e}"), ) })?; // Set the default_oss_provider at root level use toml_edit::value; doc["oss_provider"] = value(provider); // Write the modified document back std::fs::write(&config_path, doc.to_string())?; Ok(()) } /// Base config deserialized from ~/.codex/config.toml. #[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] pub struct ConfigToml { /// Optional override of model selection. pub model: Option<String>, /// Review model override used by the `/review` feature. pub review_model: Option<String>, /// Provider to use from the model_providers map. pub model_provider: Option<String>, /// Size of the context window for the model, in tokens. pub model_context_window: Option<i64>, /// Token usage threshold triggering auto-compaction of conversation history. pub model_auto_compact_token_limit: Option<i64>, /// Default approval policy for executing commands. pub approval_policy: Option<AskForApproval>, #[serde(default)] pub shell_environment_policy: ShellEnvironmentPolicyToml, /// Sandbox mode to use. pub sandbox_mode: Option<SandboxMode>, /// Sandbox configuration to apply if `sandbox` is `WorkspaceWrite`. pub sandbox_workspace_write: Option<SandboxWorkspaceWrite>, /// Optional external command to spawn for end-user notifications. #[serde(default)] pub notify: Option<Vec<String>>, /// System instructions. pub instructions: Option<String>, /// Developer instructions inserted as a `developer` role message. #[serde(default)] pub developer_instructions: Option<String>, /// Compact prompt used for history compaction. pub compact_prompt: Option<String>, /// When set, restricts ChatGPT login to a specific workspace identifier. #[serde(default)] pub forced_chatgpt_workspace_id: Option<String>, /// When set, restricts the login mechanism users may use. #[serde(default)] pub forced_login_method: Option<ForcedLoginMethod>, /// Preferred backend for storing CLI auth credentials. /// file (default): Use a file in the Codex home directory. /// keyring: Use an OS-specific keyring service. /// auto: Use the keyring if available, otherwise use a file. #[serde(default)] pub cli_auth_credentials_store: Option<AuthCredentialsStoreMode>, /// Definition for MCP servers that Codex can reach out to for tool calls. #[serde(default)] pub mcp_servers: HashMap<String, McpServerConfig>, /// Preferred backend for storing MCP OAuth credentials. /// keyring: Use an OS-specific keyring service. /// https://github.com/openai/codex/blob/main/codex-rs/rmcp-client/src/oauth.rs#L2 /// file: Use a file in the Codex home directory. /// auto (default): Use the OS-specific keyring service if available, otherwise use a file. #[serde(default)] pub mcp_oauth_credentials_store: Option<OAuthCredentialsStoreMode>, /// User-defined provider entries that extend/override the built-in list. #[serde(default)] pub model_providers: HashMap<String, ModelProviderInfo>, /// Maximum number of bytes to include from an AGENTS.md project doc file. pub project_doc_max_bytes: Option<usize>, /// Ordered list of fallback filenames to look for when AGENTS.md is missing. pub project_doc_fallback_filenames: Option<Vec<String>>, /// Token budget applied when storing tool/function outputs in the context manager. pub tool_output_token_limit: Option<usize>, /// Profile to use from the `profiles` map. pub profile: Option<String>, /// Named profiles to facilitate switching between different configurations. #[serde(default)] pub profiles: HashMap<String, ConfigProfile>, /// Settings that govern if and what will be written to `~/.codex/history.jsonl`. #[serde(default)] pub history: Option<History>, /// Optional URI-based file opener. If set, citations to files in the model /// output will be hyperlinked using the specified URI scheme. pub file_opener: Option<UriBasedFileOpener>, /// Collection of settings that are specific to the TUI. pub tui: Option<Tui>, /// When set to `true`, `AgentReasoning` events will be hidden from the /// UI/output. Defaults to `false`. pub hide_agent_reasoning: Option<bool>, /// When set to `true`, `AgentReasoningRawContentEvent` events will be shown in the UI/output. /// Defaults to `false`. pub show_raw_agent_reasoning: Option<bool>, pub model_reasoning_effort: Option<ReasoningEffort>, pub model_reasoning_summary: Option<ReasoningSummary>, /// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`). pub model_verbosity: Option<Verbosity>, /// Override to force-enable reasoning summaries for the configured model. pub model_supports_reasoning_summaries: Option<bool>, /// Base URL for requests to ChatGPT (as opposed to the OpenAI API). pub chatgpt_base_url: Option<String>, pub projects: Option<HashMap<String, ProjectConfig>>, /// Nested tools section for feature toggles pub tools: Option<ToolsToml>, /// Centralized feature flags (new). Prefer this over individual toggles. #[serde(default)] pub features: Option<FeaturesToml>, /// Settings for ghost snapshots (used for undo). #[serde(default)] pub ghost_snapshot: Option<GhostSnapshotToml>, /// Markers used to detect the project root when searching parent /// directories for `.codex` folders. Defaults to [".git"] when unset. #[serde(default)] pub project_root_markers: Option<Vec<String>>, /// When `true`, checks for Codex updates on startup and surfaces update prompts. /// Set to `false` only if your Codex updates are centrally managed. /// Defaults to `true`. pub check_for_update_on_startup: Option<bool>, /// When true, disables burst-paste detection for typed input entirely. /// All characters are inserted as they are received, and no buffering /// or placeholder replacement will occur for fast keypress bursts. pub disable_paste_burst: Option<bool>, /// OTEL configuration. pub otel: Option<crate::config::types::OtelConfigToml>, /// Tracks whether the Windows onboarding screen has been acknowledged. pub windows_wsl_setup_acknowledged: Option<bool>, /// Collection of in-product notices (different from notifications) /// See [`crate::config::types::Notices`] for more details pub notice: Option<Notice>, /// Legacy, now use features pub experimental_instructions_file: Option<AbsolutePathBuf>, pub experimental_compact_prompt_file: Option<AbsolutePathBuf>,
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/config/edit.rs
codex-rs/core/src/config/edit.rs
use crate::config::CONFIG_TOML_FILE; use crate::config::types::McpServerConfig; use crate::config::types::Notice; use anyhow::Context; use codex_protocol::config_types::TrustLevel; use codex_protocol::openai_models::ReasoningEffort; use std::collections::BTreeMap; use std::path::Path; use std::path::PathBuf; use tempfile::NamedTempFile; use tokio::task; use toml_edit::DocumentMut; use toml_edit::Item as TomlItem; use toml_edit::Table as TomlTable; use toml_edit::value; /// Discrete config mutations supported by the persistence engine. #[derive(Clone, Debug)] pub enum ConfigEdit { /// Update the active (or default) model selection and optional reasoning effort. SetModel { model: Option<String>, effort: Option<ReasoningEffort>, }, /// Toggle the acknowledgement flag under `[notice]`. SetNoticeHideFullAccessWarning(bool), /// Toggle the Windows world-writable directories warning acknowledgement flag. SetNoticeHideWorldWritableWarning(bool), /// Toggle the rate limit model nudge acknowledgement flag. SetNoticeHideRateLimitModelNudge(bool), /// Toggle the Windows onboarding acknowledgement flag. SetWindowsWslSetupAcknowledged(bool), /// Toggle the model migration prompt acknowledgement flag. SetNoticeHideModelMigrationPrompt(String, bool), /// Record that a migration prompt was shown for an old->new model mapping. RecordModelMigrationSeen { from: String, to: String }, /// Replace the entire `[mcp_servers]` table. ReplaceMcpServers(BTreeMap<String, McpServerConfig>), /// Set trust_level under `[projects."<path>"]`, /// migrating inline tables to explicit tables. SetProjectTrustLevel { path: PathBuf, level: TrustLevel }, /// Set the value stored at the exact dotted path. SetPath { segments: Vec<String>, value: TomlItem, }, /// Remove the value stored at the exact dotted path. ClearPath { segments: Vec<String> }, } // TODO(jif) move to a dedicated file mod document_helpers { use crate::config::types::McpServerConfig; use crate::config::types::McpServerTransportConfig; use toml_edit::Array as TomlArray; use toml_edit::InlineTable; use toml_edit::Item as TomlItem; use toml_edit::Table as TomlTable; use toml_edit::value; pub(super) fn ensure_table_for_write(item: &mut TomlItem) -> Option<&mut TomlTable> { match item { TomlItem::Table(table) => Some(table), TomlItem::Value(value) => { if let Some(inline) = value.as_inline_table() { *item = TomlItem::Table(table_from_inline(inline)); item.as_table_mut() } else { *item = TomlItem::Table(new_implicit_table()); item.as_table_mut() } } TomlItem::None => { *item = TomlItem::Table(new_implicit_table()); item.as_table_mut() } _ => None, } } pub(super) fn ensure_table_for_read(item: &mut TomlItem) -> Option<&mut TomlTable> { match item { TomlItem::Table(table) => Some(table), TomlItem::Value(value) => { let inline = value.as_inline_table()?; *item = TomlItem::Table(table_from_inline(inline)); item.as_table_mut() } _ => None, } } fn serialize_mcp_server_table(config: &McpServerConfig) -> TomlTable { let mut entry = TomlTable::new(); entry.set_implicit(false); match &config.transport { McpServerTransportConfig::Stdio { command, args, env, env_vars, cwd, } => { entry["command"] = value(command.clone()); if !args.is_empty() { entry["args"] = array_from_iter(args.iter().cloned()); } if let Some(env) = env && !env.is_empty() { entry["env"] = table_from_pairs(env.iter()); } if !env_vars.is_empty() { entry["env_vars"] = array_from_iter(env_vars.iter().cloned()); } if let Some(cwd) = cwd { entry["cwd"] = value(cwd.to_string_lossy().to_string()); } } McpServerTransportConfig::StreamableHttp { url, bearer_token_env_var, http_headers, env_http_headers, } => { entry["url"] = value(url.clone()); if let Some(env_var) = bearer_token_env_var { entry["bearer_token_env_var"] = value(env_var.clone()); } if let Some(headers) = http_headers && !headers.is_empty() { entry["http_headers"] = table_from_pairs(headers.iter()); } if let Some(headers) = env_http_headers && !headers.is_empty() { entry["env_http_headers"] = table_from_pairs(headers.iter()); } } } if !config.enabled { entry["enabled"] = value(false); } if let Some(timeout) = config.startup_timeout_sec { entry["startup_timeout_sec"] = value(timeout.as_secs_f64()); } if let Some(timeout) = config.tool_timeout_sec { entry["tool_timeout_sec"] = value(timeout.as_secs_f64()); } if let Some(enabled_tools) = &config.enabled_tools && !enabled_tools.is_empty() { entry["enabled_tools"] = array_from_iter(enabled_tools.iter().cloned()); } if let Some(disabled_tools) = &config.disabled_tools && !disabled_tools.is_empty() { entry["disabled_tools"] = array_from_iter(disabled_tools.iter().cloned()); } entry } pub(super) fn serialize_mcp_server(config: &McpServerConfig) -> TomlItem { TomlItem::Table(serialize_mcp_server_table(config)) } pub(super) fn serialize_mcp_server_inline(config: &McpServerConfig) -> InlineTable { serialize_mcp_server_table(config).into_inline_table() } pub(super) fn merge_inline_table(existing: &mut InlineTable, replacement: InlineTable) { existing.retain(|key, _| replacement.get(key).is_some()); for (key, value) in replacement.iter() { if let Some(existing_value) = existing.get_mut(key) { let mut updated_value = value.clone(); *updated_value.decor_mut() = existing_value.decor().clone(); *existing_value = updated_value; } else { existing.insert(key.to_string(), value.clone()); } } } fn table_from_inline(inline: &InlineTable) -> TomlTable { let mut table = new_implicit_table(); for (key, value) in inline.iter() { let mut value = value.clone(); let decor = value.decor_mut(); decor.set_suffix(""); table.insert(key, TomlItem::Value(value)); } table } pub(super) fn new_implicit_table() -> TomlTable { let mut table = TomlTable::new(); table.set_implicit(true); table } fn array_from_iter<I>(iter: I) -> TomlItem where I: Iterator<Item = String>, { let mut array = TomlArray::new(); for value in iter { array.push(value); } TomlItem::Value(array.into()) } fn table_from_pairs<'a, I>(pairs: I) -> TomlItem where I: IntoIterator<Item = (&'a String, &'a String)>, { let mut entries: Vec<_> = pairs.into_iter().collect(); entries.sort_by(|(a, _), (b, _)| a.cmp(b)); let mut table = TomlTable::new(); table.set_implicit(false); for (key, val) in entries { table.insert(key, value(val.clone())); } TomlItem::Table(table) } } struct ConfigDocument { doc: DocumentMut, profile: Option<String>, } #[derive(Copy, Clone)] enum Scope { Global, Profile, } #[derive(Copy, Clone)] enum TraversalMode { Create, Existing, } impl ConfigDocument { fn new(doc: DocumentMut, profile: Option<String>) -> Self { Self { doc, profile } } fn apply(&mut self, edit: &ConfigEdit) -> anyhow::Result<bool> { match edit { ConfigEdit::SetModel { model, effort } => Ok({ let mut mutated = false; mutated |= self.write_profile_value( &["model"], model.as_ref().map(|model_value| value(model_value.clone())), ); mutated |= self.write_profile_value( &["model_reasoning_effort"], effort.map(|effort| value(effort.to_string())), ); mutated }), ConfigEdit::SetNoticeHideFullAccessWarning(acknowledged) => Ok(self.write_value( Scope::Global, &[Notice::TABLE_KEY, "hide_full_access_warning"], value(*acknowledged), )), ConfigEdit::SetNoticeHideWorldWritableWarning(acknowledged) => Ok(self.write_value( Scope::Global, &[Notice::TABLE_KEY, "hide_world_writable_warning"], value(*acknowledged), )), ConfigEdit::SetNoticeHideRateLimitModelNudge(acknowledged) => Ok(self.write_value( Scope::Global, &[Notice::TABLE_KEY, "hide_rate_limit_model_nudge"], value(*acknowledged), )), ConfigEdit::SetNoticeHideModelMigrationPrompt(migration_config, acknowledged) => { Ok(self.write_value( Scope::Global, &[Notice::TABLE_KEY, migration_config.as_str()], value(*acknowledged), )) } ConfigEdit::RecordModelMigrationSeen { from, to } => Ok(self.write_value( Scope::Global, &[Notice::TABLE_KEY, "model_migrations", from.as_str()], value(to.clone()), )), ConfigEdit::SetWindowsWslSetupAcknowledged(acknowledged) => Ok(self.write_value( Scope::Global, &["windows_wsl_setup_acknowledged"], value(*acknowledged), )), ConfigEdit::ReplaceMcpServers(servers) => Ok(self.replace_mcp_servers(servers)), ConfigEdit::SetPath { segments, value } => Ok(self.insert(segments, value.clone())), ConfigEdit::ClearPath { segments } => Ok(self.clear_owned(segments)), ConfigEdit::SetProjectTrustLevel { path, level } => { // Delegate to the existing, tested logic in config.rs to // ensure tables are explicit and migration is preserved. crate::config::set_project_trust_level_inner( &mut self.doc, path.as_path(), *level, )?; Ok(true) } } } fn write_profile_value(&mut self, segments: &[&str], value: Option<TomlItem>) -> bool { match value { Some(item) => self.write_value(Scope::Profile, segments, item), None => self.clear(Scope::Profile, segments), } } fn write_value(&mut self, scope: Scope, segments: &[&str], value: TomlItem) -> bool { let resolved = self.scoped_segments(scope, segments); self.insert(&resolved, value) } fn clear(&mut self, scope: Scope, segments: &[&str]) -> bool { let resolved = self.scoped_segments(scope, segments); self.remove(&resolved) } fn clear_owned(&mut self, segments: &[String]) -> bool { self.remove(segments) } fn replace_mcp_servers(&mut self, servers: &BTreeMap<String, McpServerConfig>) -> bool { if servers.is_empty() { return self.clear(Scope::Global, &["mcp_servers"]); } let root = self.doc.as_table_mut(); if !root.contains_key("mcp_servers") { root.insert( "mcp_servers", TomlItem::Table(document_helpers::new_implicit_table()), ); } let Some(item) = root.get_mut("mcp_servers") else { return false; }; if document_helpers::ensure_table_for_write(item).is_none() { *item = TomlItem::Table(document_helpers::new_implicit_table()); } let Some(table) = item.as_table_mut() else { return false; }; let keys_to_remove: Vec<String> = table .iter() .map(|(key, _)| key.to_string()) .filter(|key| !servers.contains_key(key.as_str())) .collect(); for key in keys_to_remove { table.remove(&key); } for (name, config) in servers { if let Some(existing) = table.get_mut(name.as_str()) { if let TomlItem::Value(value) = existing && let Some(inline) = value.as_inline_table_mut() { let replacement = document_helpers::serialize_mcp_server_inline(config); document_helpers::merge_inline_table(inline, replacement); } else { *existing = document_helpers::serialize_mcp_server(config); } } else { table.insert(name, document_helpers::serialize_mcp_server(config)); } } true } fn scoped_segments(&self, scope: Scope, segments: &[&str]) -> Vec<String> { let resolved: Vec<String> = segments .iter() .map(|segment| (*segment).to_string()) .collect(); if matches!(scope, Scope::Profile) && resolved.first().is_none_or(|segment| segment != "profiles") && let Some(profile) = self.profile.as_deref() { let mut scoped = Vec::with_capacity(resolved.len() + 2); scoped.push("profiles".to_string()); scoped.push(profile.to_string()); scoped.extend(resolved); return scoped; } resolved } fn insert(&mut self, segments: &[String], value: TomlItem) -> bool { let Some((last, parents)) = segments.split_last() else { return false; }; let Some(parent) = self.descend(parents, TraversalMode::Create) else { return false; }; let mut value = value; if let Some(existing) = parent.get(last) { Self::preserve_decor(existing, &mut value); } parent[last] = value; true } fn remove(&mut self, segments: &[String]) -> bool { let Some((last, parents)) = segments.split_last() else { return false; }; let Some(parent) = self.descend(parents, TraversalMode::Existing) else { return false; }; parent.remove(last).is_some() } fn descend(&mut self, segments: &[String], mode: TraversalMode) -> Option<&mut TomlTable> { let mut current = self.doc.as_table_mut(); for segment in segments { match mode { TraversalMode::Create => { if !current.contains_key(segment.as_str()) { current.insert( segment.as_str(), TomlItem::Table(document_helpers::new_implicit_table()), ); } let item = current.get_mut(segment.as_str())?; current = document_helpers::ensure_table_for_write(item)?; } TraversalMode::Existing => { let item = current.get_mut(segment.as_str())?; current = document_helpers::ensure_table_for_read(item)?; } } } Some(current) } fn preserve_decor(existing: &TomlItem, replacement: &mut TomlItem) { match (existing, replacement) { (TomlItem::Table(existing_table), TomlItem::Table(replacement_table)) => { replacement_table .decor_mut() .clone_from(existing_table.decor()); for (key, existing_item) in existing_table.iter() { if let (Some(existing_key), Some(mut replacement_key)) = (existing_table.key(key), replacement_table.key_mut(key)) { replacement_key .leaf_decor_mut() .clone_from(existing_key.leaf_decor()); replacement_key .dotted_decor_mut() .clone_from(existing_key.dotted_decor()); } if let Some(replacement_item) = replacement_table.get_mut(key) { Self::preserve_decor(existing_item, replacement_item); } } } (TomlItem::Value(existing_value), TomlItem::Value(replacement_value)) => { replacement_value .decor_mut() .clone_from(existing_value.decor()); } _ => {} } } } /// Persist edits using a blocking strategy. pub fn apply_blocking( codex_home: &Path, profile: Option<&str>, edits: &[ConfigEdit], ) -> anyhow::Result<()> { if edits.is_empty() { return Ok(()); } let config_path = codex_home.join(CONFIG_TOML_FILE); let serialized = match std::fs::read_to_string(&config_path) { Ok(contents) => contents, Err(err) if err.kind() == std::io::ErrorKind::NotFound => String::new(), Err(err) => return Err(err.into()), }; let doc = if serialized.is_empty() { DocumentMut::new() } else { serialized.parse::<DocumentMut>()? }; let profile = profile.map(ToOwned::to_owned).or_else(|| { doc.get("profile") .and_then(|item| item.as_str()) .map(ToOwned::to_owned) }); let mut document = ConfigDocument::new(doc, profile); let mut mutated = false; for edit in edits { mutated |= document.apply(edit)?; } if !mutated { return Ok(()); } std::fs::create_dir_all(codex_home).with_context(|| { format!( "failed to create Codex home directory at {}", codex_home.display() ) })?; let tmp = NamedTempFile::new_in(codex_home)?; std::fs::write(tmp.path(), document.doc.to_string()).with_context(|| { format!( "failed to write temporary config file at {}", tmp.path().display() ) })?; tmp.persist(config_path)?; Ok(()) } /// Persist edits asynchronously by offloading the blocking writer. pub async fn apply( codex_home: &Path, profile: Option<&str>, edits: Vec<ConfigEdit>, ) -> anyhow::Result<()> { let codex_home = codex_home.to_path_buf(); let profile = profile.map(ToOwned::to_owned); task::spawn_blocking(move || apply_blocking(&codex_home, profile.as_deref(), &edits)) .await .context("config persistence task panicked")? } /// Fluent builder to batch config edits and apply them atomically. #[derive(Default)] pub struct ConfigEditsBuilder { codex_home: PathBuf, profile: Option<String>, edits: Vec<ConfigEdit>, } impl ConfigEditsBuilder { pub fn new(codex_home: &Path) -> Self { Self { codex_home: codex_home.to_path_buf(), profile: None, edits: Vec::new(), } } pub fn with_profile(mut self, profile: Option<&str>) -> Self { self.profile = profile.map(ToOwned::to_owned); self } pub fn set_model(mut self, model: Option<&str>, effort: Option<ReasoningEffort>) -> Self { self.edits.push(ConfigEdit::SetModel { model: model.map(ToOwned::to_owned), effort, }); self } pub fn set_hide_full_access_warning(mut self, acknowledged: bool) -> Self { self.edits .push(ConfigEdit::SetNoticeHideFullAccessWarning(acknowledged)); self } pub fn set_hide_world_writable_warning(mut self, acknowledged: bool) -> Self { self.edits .push(ConfigEdit::SetNoticeHideWorldWritableWarning(acknowledged)); self } pub fn set_hide_rate_limit_model_nudge(mut self, acknowledged: bool) -> Self { self.edits .push(ConfigEdit::SetNoticeHideRateLimitModelNudge(acknowledged)); self } pub fn set_hide_model_migration_prompt(mut self, model: &str, acknowledged: bool) -> Self { self.edits .push(ConfigEdit::SetNoticeHideModelMigrationPrompt( model.to_string(), acknowledged, )); self } pub fn record_model_migration_seen(mut self, from: &str, to: &str) -> Self { self.edits.push(ConfigEdit::RecordModelMigrationSeen { from: from.to_string(), to: to.to_string(), }); self } pub fn set_windows_wsl_setup_acknowledged(mut self, acknowledged: bool) -> Self { self.edits .push(ConfigEdit::SetWindowsWslSetupAcknowledged(acknowledged)); self } pub fn replace_mcp_servers(mut self, servers: &BTreeMap<String, McpServerConfig>) -> Self { self.edits .push(ConfigEdit::ReplaceMcpServers(servers.clone())); self } pub fn set_project_trust_level<P: Into<PathBuf>>( mut self, project_path: P, trust_level: TrustLevel, ) -> Self { self.edits.push(ConfigEdit::SetProjectTrustLevel { path: project_path.into(), level: trust_level, }); self } /// Enable or disable a feature flag by key under the `[features]` table. pub fn set_feature_enabled(mut self, key: &str, enabled: bool) -> Self { self.edits.push(ConfigEdit::SetPath { segments: vec!["features".to_string(), key.to_string()], value: value(enabled), }); self } pub fn with_edits<I>(mut self, edits: I) -> Self where I: IntoIterator<Item = ConfigEdit>, { self.edits.extend(edits); self } /// Apply edits on a blocking thread. pub fn apply_blocking(self) -> anyhow::Result<()> { apply_blocking(&self.codex_home, self.profile.as_deref(), &self.edits) } /// Apply edits asynchronously via a blocking offload. pub async fn apply(self) -> anyhow::Result<()> { task::spawn_blocking(move || { apply_blocking(&self.codex_home, self.profile.as_deref(), &self.edits) }) .await .context("config persistence task panicked")? } } #[cfg(test)] mod tests { use super::*; use crate::config::types::McpServerTransportConfig; use codex_protocol::openai_models::ReasoningEffort; use pretty_assertions::assert_eq; use tempfile::tempdir; use toml::Value as TomlValue; #[test] fn blocking_set_model_top_level() { let tmp = tempdir().expect("tmpdir"); let codex_home = tmp.path(); apply_blocking( codex_home, None, &[ConfigEdit::SetModel { model: Some("gpt-5.1-codex".to_string()), effort: Some(ReasoningEffort::High), }], ) .expect("persist"); let contents = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config"); let expected = r#"model = "gpt-5.1-codex" model_reasoning_effort = "high" "#; assert_eq!(contents, expected); } #[test] fn builder_with_edits_applies_custom_paths() { let tmp = tempdir().expect("tmpdir"); let codex_home = tmp.path(); ConfigEditsBuilder::new(codex_home) .with_edits(vec![ConfigEdit::SetPath { segments: vec!["enabled".to_string()], value: value(true), }]) .apply_blocking() .expect("persist"); let contents = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config"); assert_eq!(contents, "enabled = true\n"); } #[test] fn blocking_set_model_preserves_inline_table_contents() { let tmp = tempdir().expect("tmpdir"); let codex_home = tmp.path(); // Seed with inline tables for profiles to simulate common user config. std::fs::write( codex_home.join(CONFIG_TOML_FILE), r#"profile = "fast" profiles = { fast = { model = "gpt-4o", sandbox_mode = "strict" } } "#, ) .expect("seed"); apply_blocking( codex_home, None, &[ConfigEdit::SetModel { model: Some("o4-mini".to_string()), effort: None, }], ) .expect("persist"); let raw = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config"); let value: TomlValue = toml::from_str(&raw).expect("parse config"); // Ensure sandbox_mode is preserved under profiles.fast and model updated. let profiles_tbl = value .get("profiles") .and_then(|v| v.as_table()) .expect("profiles table"); let fast_tbl = profiles_tbl .get("fast") .and_then(|v| v.as_table()) .expect("fast table"); assert_eq!( fast_tbl.get("sandbox_mode").and_then(|v| v.as_str()), Some("strict") ); assert_eq!( fast_tbl.get("model").and_then(|v| v.as_str()), Some("o4-mini") ); } #[test] fn batch_write_table_upsert_preserves_inline_comments() { let tmp = tempdir().expect("tmpdir"); let codex_home = tmp.path(); let original = r#"approval_policy = "never" [mcp_servers.linear] name = "linear" # ok url = "https://linear.example" [mcp_servers.linear.http_headers] foo = "bar" [sandbox_workspace_write] # ok 3 network_access = false "#; std::fs::write(codex_home.join(CONFIG_TOML_FILE), original).expect("seed config"); apply_blocking( codex_home, None, &[ ConfigEdit::SetPath { segments: vec![ "mcp_servers".to_string(), "linear".to_string(), "url".to_string(), ], value: value("https://linear.example/v2"), }, ConfigEdit::SetPath { segments: vec![ "sandbox_workspace_write".to_string(), "network_access".to_string(), ], value: value(true), }, ], ) .expect("apply"); let updated = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config"); let expected = r#"approval_policy = "never" [mcp_servers.linear] name = "linear" # ok url = "https://linear.example/v2" [mcp_servers.linear.http_headers] foo = "bar" [sandbox_workspace_write] # ok 3 network_access = true "#; assert_eq!(updated, expected); } #[test] fn blocking_clear_model_removes_inline_table_entry() { let tmp = tempdir().expect("tmpdir"); let codex_home = tmp.path(); std::fs::write( codex_home.join(CONFIG_TOML_FILE), r#"profile = "fast" profiles = { fast = { model = "gpt-4o", sandbox_mode = "strict" } } "#, ) .expect("seed"); apply_blocking( codex_home, None, &[ConfigEdit::SetModel { model: None, effort: Some(ReasoningEffort::High), }], ) .expect("persist"); let contents = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config"); let expected = r#"profile = "fast" [profiles.fast] sandbox_mode = "strict" model_reasoning_effort = "high" "#; assert_eq!(contents, expected); } #[test] fn blocking_set_model_scopes_to_active_profile() { let tmp = tempdir().expect("tmpdir"); let codex_home = tmp.path(); std::fs::write( codex_home.join(CONFIG_TOML_FILE), r#"profile = "team" [profiles.team] model_reasoning_effort = "low" "#, ) .expect("seed"); apply_blocking( codex_home, None, &[ConfigEdit::SetModel { model: Some("o5-preview".to_string()), effort: Some(ReasoningEffort::Minimal), }], ) .expect("persist"); let contents = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config"); let expected = r#"profile = "team" [profiles.team] model_reasoning_effort = "minimal" model = "o5-preview" "#; assert_eq!(contents, expected); } #[test] fn blocking_set_model_with_explicit_profile() { let tmp = tempdir().expect("tmpdir"); let codex_home = tmp.path(); std::fs::write( codex_home.join(CONFIG_TOML_FILE), r#"[profiles."team a"] model = "gpt-5.1-codex" "#, ) .expect("seed"); apply_blocking( codex_home, Some("team a"), &[ConfigEdit::SetModel { model: Some("o4-mini".to_string()), effort: None, }], ) .expect("persist"); let contents = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config"); let expected = r#"[profiles."team a"] model = "o4-mini" "#; assert_eq!(contents, expected); } #[test] fn blocking_set_hide_full_access_warning_preserves_table() { let tmp = tempdir().expect("tmpdir"); let codex_home = tmp.path(); std::fs::write( codex_home.join(CONFIG_TOML_FILE), r#"# Global comment [notice] # keep me existing = "value" "#, ) .expect("seed"); apply_blocking( codex_home, None, &[ConfigEdit::SetNoticeHideFullAccessWarning(true)], ) .expect("persist"); let contents = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config"); let expected = r#"# Global comment [notice] # keep me existing = "value" hide_full_access_warning = true "#; assert_eq!(contents, expected); } #[test] fn blocking_set_hide_rate_limit_model_nudge_preserves_table() { let tmp = tempdir().expect("tmpdir"); let codex_home = tmp.path(); std::fs::write( codex_home.join(CONFIG_TOML_FILE), r#"[notice] existing = "value" "#, ) .expect("seed"); apply_blocking( codex_home, None, &[ConfigEdit::SetNoticeHideRateLimitModelNudge(true)], ) .expect("persist"); let contents = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config"); let expected = r#"[notice] existing = "value" hide_rate_limit_model_nudge = true "#; assert_eq!(contents, expected); } #[test] fn blocking_set_hide_gpt5_1_migration_prompt_preserves_table() { let tmp = tempdir().expect("tmpdir"); let codex_home = tmp.path(); std::fs::write( codex_home.join(CONFIG_TOML_FILE), r#"[notice] existing = "value" "#, ) .expect("seed"); apply_blocking( codex_home, None, &[ConfigEdit::SetNoticeHideModelMigrationPrompt( "hide_gpt5_1_migration_prompt".to_string(), true, )], ) .expect("persist"); let contents =
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/config/constraint.rs
codex-rs/core/src/config/constraint.rs
use std::fmt; use std::sync::Arc; use thiserror::Error; #[derive(Debug, Error, PartialEq, Eq)] pub enum ConstraintError { #[error("value `{candidate}` is not in the allowed set {allowed}")] InvalidValue { candidate: String, allowed: String }, #[error("field `{field_name}` cannot be empty")] EmptyField { field_name: String }, } impl ConstraintError { pub fn invalid_value(candidate: impl Into<String>, allowed: impl Into<String>) -> Self { Self::InvalidValue { candidate: candidate.into(), allowed: allowed.into(), } } pub fn empty_field(field_name: impl Into<String>) -> Self { Self::EmptyField { field_name: field_name.into(), } } } pub type ConstraintResult<T> = Result<T, ConstraintError>; impl From<ConstraintError> for std::io::Error { fn from(err: ConstraintError) -> Self { std::io::Error::new(std::io::ErrorKind::InvalidInput, err) } } type ConstraintValidator<T> = dyn Fn(&T) -> ConstraintResult<()> + Send + Sync; #[derive(Clone)] pub struct Constrained<T> { value: T, validator: Arc<ConstraintValidator<T>>, } impl<T: Send + Sync> Constrained<T> { pub fn new( initial_value: T, validator: impl Fn(&T) -> ConstraintResult<()> + Send + Sync + 'static, ) -> ConstraintResult<Self> { let validator: Arc<ConstraintValidator<T>> = Arc::new(validator); validator(&initial_value)?; Ok(Self { value: initial_value, validator, }) } pub fn allow_any(initial_value: T) -> Self { Self { value: initial_value, validator: Arc::new(|_| Ok(())), } } pub fn allow_only(value: T) -> Self where T: PartialEq + Send + Sync + fmt::Debug + Clone + 'static, { #[expect(clippy::expect_used)] Self::new(value.clone(), move |candidate| { if *candidate == value { Ok(()) } else { Err(ConstraintError::invalid_value( format!("{candidate:?}"), format!("{value:?}"), )) } }) .expect("initial value should always be valid") } /// Allow any value of T, using T's Default as the initial value. pub fn allow_any_from_default() -> Self where T: Default, { Self::allow_any(T::default()) } pub fn allow_values(initial_value: T, allowed: Vec<T>) -> ConstraintResult<Self> where T: PartialEq + Send + Sync + fmt::Debug + 'static, { Self::new(initial_value, move |candidate| { if allowed.contains(candidate) { Ok(()) } else { Err(ConstraintError::invalid_value( format!("{candidate:?}"), format!("{allowed:?}"), )) } }) } pub fn get(&self) -> &T { &self.value } pub fn value(&self) -> T where T: Copy, { self.value } pub fn can_set(&self, candidate: &T) -> ConstraintResult<()> { (self.validator)(candidate) } pub fn set(&mut self, value: T) -> ConstraintResult<()> { (self.validator)(&value)?; self.value = value; Ok(()) } } impl<T> std::ops::Deref for Constrained<T> { type Target = T; fn deref(&self) -> &Self::Target { &self.value } } impl<T: fmt::Debug> fmt::Debug for Constrained<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Constrained") .field("value", &self.value) .finish() } } impl<T: PartialEq> PartialEq for Constrained<T> { fn eq(&self, other: &Self) -> bool { self.value == other.value } } #[cfg(test)] mod tests { use super::*; use pretty_assertions::assert_eq; #[test] fn constrained_allow_any_accepts_any_value() { let mut constrained = Constrained::allow_any(5); constrained.set(-10).expect("allow any accepts all values"); assert_eq!(constrained.value(), -10); } #[test] fn constrained_allow_any_default_uses_default_value() { let constrained = Constrained::<i32>::allow_any_from_default(); assert_eq!(constrained.value(), 0); } #[test] fn constrained_new_rejects_invalid_initial_value() { let result = Constrained::new(0, |value| { if *value > 0 { Ok(()) } else { Err(ConstraintError::invalid_value( value.to_string(), "positive values", )) } }); assert_eq!( result, Err(ConstraintError::invalid_value("0", "positive values")) ); } #[test] fn constrained_set_rejects_invalid_value_and_leaves_previous() { let mut constrained = Constrained::new(1, |value| { if *value > 0 { Ok(()) } else { Err(ConstraintError::invalid_value( value.to_string(), "positive values", )) } }) .expect("initial value should be accepted"); let err = constrained .set(-5) .expect_err("negative values should be rejected"); assert_eq!(err, ConstraintError::invalid_value("-5", "positive values")); assert_eq!(constrained.value(), 1); } #[test] fn constrained_can_set_allows_probe_without_setting() { let constrained = Constrained::new(1, |value| { if *value > 0 { Ok(()) } else { Err(ConstraintError::invalid_value( value.to_string(), "positive values", )) } }) .expect("initial value should be accepted"); constrained .can_set(&2) .expect("can_set should accept positive value"); let err = constrained .can_set(&-1) .expect_err("can_set should reject negative value"); assert_eq!(err, ConstraintError::invalid_value("-1", "positive values")); assert_eq!(constrained.value(), 1); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/config/profile.rs
codex-rs/core/src/config/profile.rs
use codex_utils_absolute_path::AbsolutePathBuf; use serde::Deserialize; use serde::Serialize; use crate::protocol::AskForApproval; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::config_types::SandboxMode; use codex_protocol::config_types::Verbosity; use codex_protocol::openai_models::ReasoningEffort; /// Collection of common configuration options that a user can define as a unit /// in `config.toml`. #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] pub struct ConfigProfile { pub model: Option<String>, /// The key in the `model_providers` map identifying the /// [`ModelProviderInfo`] to use. pub model_provider: Option<String>, pub approval_policy: Option<AskForApproval>, pub sandbox_mode: Option<SandboxMode>, pub model_reasoning_effort: Option<ReasoningEffort>, pub model_reasoning_summary: Option<ReasoningSummary>, pub model_verbosity: Option<Verbosity>, pub chatgpt_base_url: Option<String>, pub experimental_instructions_file: Option<AbsolutePathBuf>, pub experimental_compact_prompt_file: Option<AbsolutePathBuf>, pub include_apply_patch_tool: Option<bool>, pub experimental_use_unified_exec_tool: Option<bool>, pub experimental_use_freeform_apply_patch: Option<bool>, pub tools_web_search: Option<bool>, pub tools_view_image: Option<bool>, /// Optional feature toggles scoped to this profile. #[serde(default)] pub features: Option<crate::features::FeaturesToml>, pub oss_provider: Option<String>, } impl From<ConfigProfile> for codex_app_server_protocol::Profile { fn from(config_profile: ConfigProfile) -> Self { Self { model: config_profile.model, model_provider: config_profile.model_provider, approval_policy: config_profile.approval_policy, model_reasoning_effort: config_profile.model_reasoning_effort, model_reasoning_summary: config_profile.model_reasoning_summary, model_verbosity: config_profile.model_verbosity, chatgpt_base_url: config_profile.chatgpt_base_url, } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/config_loader/merge.rs
codex-rs/core/src/config_loader/merge.rs
use toml::Value as TomlValue; /// Merge config `overlay` into `base`, giving `overlay` precedence. pub fn merge_toml_values(base: &mut TomlValue, overlay: &TomlValue) { if let TomlValue::Table(overlay_table) = overlay && let TomlValue::Table(base_table) = base { for (key, value) in overlay_table { if let Some(existing) = base_table.get_mut(key) { merge_toml_values(existing, value); } else { base_table.insert(key.clone(), value.clone()); } } } else { *base = overlay.clone(); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/config_loader/tests.rs
codex-rs/core/src/config_loader/tests.rs
use super::LoaderOverrides; use super::load_config_layers_state; use crate::config::CONFIG_TOML_FILE; use crate::config::ConfigBuilder; use crate::config::ConfigOverrides; use crate::config_loader::ConfigLayerEntry; use crate::config_loader::ConfigRequirements; use crate::config_loader::config_requirements::ConfigRequirementsToml; use crate::config_loader::fingerprint::version_for_toml; use crate::config_loader::load_requirements_toml; use codex_protocol::protocol::AskForApproval; use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use tempfile::tempdir; use toml::Value as TomlValue; #[tokio::test] async fn merges_managed_config_layer_on_top() { let tmp = tempdir().expect("tempdir"); let managed_path = tmp.path().join("managed_config.toml"); std::fs::write( tmp.path().join(CONFIG_TOML_FILE), r#"foo = 1 [nested] value = "base" "#, ) .expect("write base"); std::fs::write( &managed_path, r#"foo = 2 [nested] value = "managed_config" extra = true "#, ) .expect("write managed config"); let overrides = LoaderOverrides { managed_config_path: Some(managed_path), #[cfg(target_os = "macos")] managed_preferences_base64: None, }; let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd"); let state = load_config_layers_state( tmp.path(), Some(cwd), &[] as &[(String, TomlValue)], overrides, ) .await .expect("load config"); let loaded = state.effective_config(); let table = loaded.as_table().expect("top-level table expected"); assert_eq!(table.get("foo"), Some(&TomlValue::Integer(2))); let nested = table .get("nested") .and_then(|v| v.as_table()) .expect("nested"); assert_eq!( nested.get("value"), Some(&TomlValue::String("managed_config".to_string())) ); assert_eq!(nested.get("extra"), Some(&TomlValue::Boolean(true))); } #[tokio::test] async fn returns_empty_when_all_layers_missing() { let tmp = tempdir().expect("tempdir"); let managed_path = tmp.path().join("managed_config.toml"); let overrides = LoaderOverrides { managed_config_path: Some(managed_path), #[cfg(target_os = "macos")] managed_preferences_base64: None, }; let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd"); let layers = load_config_layers_state( tmp.path(), Some(cwd), &[] as &[(String, TomlValue)], overrides, ) .await .expect("load layers"); let user_layer = layers .get_user_layer() .expect("expected a user layer even when CODEX_HOME/config.toml does not exist"); assert_eq!( &ConfigLayerEntry { name: super::ConfigLayerSource::User { file: AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, tmp.path()) .expect("resolve user config.toml path") }, config: TomlValue::Table(toml::map::Map::new()), version: version_for_toml(&TomlValue::Table(toml::map::Map::new())), }, user_layer, ); assert_eq!( user_layer.config, TomlValue::Table(toml::map::Map::new()), "expected empty config for user layer when config.toml does not exist" ); let binding = layers.effective_config(); let base_table = binding.as_table().expect("base table expected"); assert!( base_table.is_empty(), "expected empty base layer when configs missing" ); let num_system_layers = layers .layers_high_to_low() .iter() .filter(|layer| matches!(layer.name, super::ConfigLayerSource::System { .. })) .count(); let expected_system_layers = if cfg!(unix) { 1 } else { 0 }; assert_eq!( num_system_layers, expected_system_layers, "system layer should be present only on unix" ); #[cfg(not(target_os = "macos"))] { let effective = layers.effective_config(); let table = effective.as_table().expect("top-level table expected"); assert!( table.is_empty(), "expected empty table when configs missing" ); } } #[cfg(target_os = "macos")] #[tokio::test] async fn managed_preferences_take_highest_precedence() { use base64::Engine; let managed_payload = r#" [nested] value = "managed" flag = false "#; let encoded = base64::prelude::BASE64_STANDARD.encode(managed_payload.as_bytes()); let tmp = tempdir().expect("tempdir"); let managed_path = tmp.path().join("managed_config.toml"); std::fs::write( tmp.path().join(CONFIG_TOML_FILE), r#"[nested] value = "base" "#, ) .expect("write base"); std::fs::write( &managed_path, r#"[nested] value = "managed_config" flag = true "#, ) .expect("write managed config"); let overrides = LoaderOverrides { managed_config_path: Some(managed_path), managed_preferences_base64: Some(encoded), }; let cwd = AbsolutePathBuf::try_from(tmp.path()).expect("cwd"); let state = load_config_layers_state( tmp.path(), Some(cwd), &[] as &[(String, TomlValue)], overrides, ) .await .expect("load config"); let loaded = state.effective_config(); let nested = loaded .get("nested") .and_then(|v| v.as_table()) .expect("nested table"); assert_eq!( nested.get("value"), Some(&TomlValue::String("managed".to_string())) ); assert_eq!(nested.get("flag"), Some(&TomlValue::Boolean(false))); } #[tokio::test(flavor = "current_thread")] async fn load_requirements_toml_produces_expected_constraints() -> anyhow::Result<()> { let tmp = tempdir()?; let requirements_file = tmp.path().join("requirements.toml"); tokio::fs::write( &requirements_file, r#" allowed_approval_policies = ["never", "on-request"] "#, ) .await?; let mut config_requirements_toml = ConfigRequirementsToml::default(); load_requirements_toml(&mut config_requirements_toml, &requirements_file).await?; assert_eq!( config_requirements_toml.allowed_approval_policies, Some(vec![AskForApproval::Never, AskForApproval::OnRequest]) ); let config_requirements: ConfigRequirements = config_requirements_toml.try_into()?; assert_eq!( config_requirements.approval_policy.value(), AskForApproval::Never ); config_requirements .approval_policy .can_set(&AskForApproval::Never)?; assert!( config_requirements .approval_policy .can_set(&AskForApproval::OnFailure) .is_err() ); Ok(()) } #[tokio::test] async fn project_layers_prefer_closest_cwd() -> std::io::Result<()> { let tmp = tempdir()?; let project_root = tmp.path().join("project"); let nested = project_root.join("child"); tokio::fs::create_dir_all(nested.join(".codex")).await?; tokio::fs::create_dir_all(project_root.join(".codex")).await?; tokio::fs::write(project_root.join(".git"), "gitdir: here").await?; tokio::fs::write( project_root.join(".codex").join(CONFIG_TOML_FILE), "foo = \"root\"\n", ) .await?; tokio::fs::write( nested.join(".codex").join(CONFIG_TOML_FILE), "foo = \"child\"\n", ) .await?; let codex_home = tmp.path().join("home"); tokio::fs::create_dir_all(&codex_home).await?; let cwd = AbsolutePathBuf::from_absolute_path(&nested)?; let layers = load_config_layers_state( &codex_home, Some(cwd), &[] as &[(String, TomlValue)], LoaderOverrides::default(), ) .await?; let project_layers: Vec<_> = layers .layers_high_to_low() .into_iter() .filter_map(|layer| match &layer.name { super::ConfigLayerSource::Project { dot_codex_folder } => Some(dot_codex_folder), _ => None, }) .collect(); assert_eq!(project_layers.len(), 2); assert_eq!(project_layers[0].as_path(), nested.join(".codex").as_path()); assert_eq!( project_layers[1].as_path(), project_root.join(".codex").as_path() ); let config = layers.effective_config(); let foo = config .get("foo") .and_then(TomlValue::as_str) .expect("foo entry"); assert_eq!(foo, "child"); Ok(()) } #[tokio::test] async fn project_paths_resolve_relative_to_dot_codex_and_override_in_order() -> std::io::Result<()> { let tmp = tempdir()?; let project_root = tmp.path().join("project"); let nested = project_root.join("child"); tokio::fs::create_dir_all(project_root.join(".codex")).await?; tokio::fs::create_dir_all(nested.join(".codex")).await?; tokio::fs::write(project_root.join(".git"), "gitdir: here").await?; let root_cfg = r#" experimental_instructions_file = "root.txt" "#; let nested_cfg = r#" experimental_instructions_file = "child.txt" "#; tokio::fs::write(project_root.join(".codex").join(CONFIG_TOML_FILE), root_cfg).await?; tokio::fs::write(nested.join(".codex").join(CONFIG_TOML_FILE), nested_cfg).await?; tokio::fs::write( project_root.join(".codex").join("root.txt"), "root instructions", ) .await?; tokio::fs::write( nested.join(".codex").join("child.txt"), "child instructions", ) .await?; let codex_home = tmp.path().join("home"); tokio::fs::create_dir_all(&codex_home).await?; let config = ConfigBuilder::default() .codex_home(codex_home) .harness_overrides(ConfigOverrides { cwd: Some(nested.clone()), ..ConfigOverrides::default() }) .build() .await?; assert_eq!( config.base_instructions.as_deref(), Some("child instructions") ); Ok(()) } #[tokio::test] async fn project_layer_is_added_when_dot_codex_exists_without_config_toml() -> std::io::Result<()> { let tmp = tempdir()?; let project_root = tmp.path().join("project"); let nested = project_root.join("child"); tokio::fs::create_dir_all(&nested).await?; tokio::fs::create_dir_all(project_root.join(".codex")).await?; tokio::fs::write(project_root.join(".git"), "gitdir: here").await?; let codex_home = tmp.path().join("home"); tokio::fs::create_dir_all(&codex_home).await?; let cwd = AbsolutePathBuf::from_absolute_path(&nested)?; let layers = load_config_layers_state( &codex_home, Some(cwd), &[] as &[(String, TomlValue)], LoaderOverrides::default(), ) .await?; let project_layers: Vec<_> = layers .layers_high_to_low() .into_iter() .filter(|layer| matches!(layer.name, super::ConfigLayerSource::Project { .. })) .collect(); assert_eq!( vec![&ConfigLayerEntry { name: super::ConfigLayerSource::Project { dot_codex_folder: AbsolutePathBuf::from_absolute_path(project_root.join(".codex"))?, }, config: TomlValue::Table(toml::map::Map::new()), version: version_for_toml(&TomlValue::Table(toml::map::Map::new())), }], project_layers ); Ok(()) } #[tokio::test] async fn project_root_markers_supports_alternate_markers() -> std::io::Result<()> { let tmp = tempdir()?; let project_root = tmp.path().join("project"); let nested = project_root.join("child"); tokio::fs::create_dir_all(project_root.join(".codex")).await?; tokio::fs::create_dir_all(nested.join(".codex")).await?; tokio::fs::write(project_root.join(".hg"), "hg").await?; tokio::fs::write( project_root.join(".codex").join(CONFIG_TOML_FILE), "foo = \"root\"\n", ) .await?; tokio::fs::write( nested.join(".codex").join(CONFIG_TOML_FILE), "foo = \"child\"\n", ) .await?; let codex_home = tmp.path().join("home"); tokio::fs::create_dir_all(&codex_home).await?; tokio::fs::write( codex_home.join(CONFIG_TOML_FILE), r#" project_root_markers = [".hg"] "#, ) .await?; let cwd = AbsolutePathBuf::from_absolute_path(&nested)?; let layers = load_config_layers_state( &codex_home, Some(cwd), &[] as &[(String, TomlValue)], LoaderOverrides::default(), ) .await?; let project_layers: Vec<_> = layers .layers_high_to_low() .into_iter() .filter_map(|layer| match &layer.name { super::ConfigLayerSource::Project { dot_codex_folder } => Some(dot_codex_folder), _ => None, }) .collect(); assert_eq!(project_layers.len(), 2); assert_eq!(project_layers[0].as_path(), nested.join(".codex").as_path()); assert_eq!( project_layers[1].as_path(), project_root.join(".codex").as_path() ); let merged = layers.effective_config(); let foo = merged .get("foo") .and_then(TomlValue::as_str) .expect("foo entry"); assert_eq!(foo, "child"); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/config_loader/macos.rs
codex-rs/core/src/config_loader/macos.rs
use base64::Engine; use base64::prelude::BASE64_STANDARD; use core_foundation::base::TCFType; use core_foundation::string::CFString; use core_foundation::string::CFStringRef; use std::ffi::c_void; use std::io; use tokio::task; use toml::Value as TomlValue; const MANAGED_PREFERENCES_APPLICATION_ID: &str = "com.openai.codex"; const MANAGED_PREFERENCES_CONFIG_KEY: &str = "config_toml_base64"; pub(crate) async fn load_managed_admin_config_layer( override_base64: Option<&str>, ) -> io::Result<Option<TomlValue>> { if let Some(encoded) = override_base64 { let trimmed = encoded.trim(); return if trimmed.is_empty() { Ok(None) } else { parse_managed_preferences_base64(trimmed).map(Some) }; } const LOAD_ERROR: &str = "Failed to load managed preferences configuration"; match task::spawn_blocking(load_managed_admin_config).await { Ok(result) => result, Err(join_err) => { if join_err.is_cancelled() { tracing::error!("Managed preferences load task was cancelled"); } else { tracing::error!("Managed preferences load task failed: {join_err}"); } Err(io::Error::other(LOAD_ERROR)) } } } fn load_managed_admin_config() -> io::Result<Option<TomlValue>> { #[link(name = "CoreFoundation", kind = "framework")] unsafe extern "C" { fn CFPreferencesCopyAppValue(key: CFStringRef, application_id: CFStringRef) -> *mut c_void; } let application_id = CFString::new(MANAGED_PREFERENCES_APPLICATION_ID); let key = CFString::new(MANAGED_PREFERENCES_CONFIG_KEY); let value_ref = unsafe { CFPreferencesCopyAppValue( key.as_concrete_TypeRef(), application_id.as_concrete_TypeRef(), ) }; if value_ref.is_null() { tracing::debug!( "Managed preferences for {} key {} not found", MANAGED_PREFERENCES_APPLICATION_ID, MANAGED_PREFERENCES_CONFIG_KEY ); return Ok(None); } let value = unsafe { CFString::wrap_under_create_rule(value_ref as _) }; let contents = value.to_string(); let trimmed = contents.trim(); parse_managed_preferences_base64(trimmed).map(Some) } fn parse_managed_preferences_base64(encoded: &str) -> io::Result<TomlValue> { let decoded = BASE64_STANDARD.decode(encoded.as_bytes()).map_err(|err| { tracing::error!("Failed to decode managed preferences as base64: {err}"); io::Error::new(io::ErrorKind::InvalidData, err) })?; let decoded_str = String::from_utf8(decoded).map_err(|err| { tracing::error!("Managed preferences base64 contents were not valid UTF-8: {err}"); io::Error::new(io::ErrorKind::InvalidData, err) })?; match toml::from_str::<TomlValue>(&decoded_str) { Ok(TomlValue::Table(parsed)) => Ok(TomlValue::Table(parsed)), Ok(other) => { tracing::error!( "Managed preferences TOML must have a table at the root, found {other:?}", ); Err(io::Error::new( io::ErrorKind::InvalidData, "managed preferences root must be a table", )) } Err(err) => { tracing::error!("Failed to parse managed preferences TOML: {err}"); Err(io::Error::new(io::ErrorKind::InvalidData, err)) } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/config_loader/fingerprint.rs
codex-rs/core/src/config_loader/fingerprint.rs
use codex_app_server_protocol::ConfigLayerMetadata; use serde_json::Value as JsonValue; use sha2::Digest; use sha2::Sha256; use std::collections::HashMap; use toml::Value as TomlValue; pub(super) fn record_origins( value: &TomlValue, meta: &ConfigLayerMetadata, path: &mut Vec<String>, origins: &mut HashMap<String, ConfigLayerMetadata>, ) { match value { TomlValue::Table(table) => { for (key, val) in table { path.push(key.clone()); record_origins(val, meta, path, origins); path.pop(); } } TomlValue::Array(items) => { for (idx, item) in (0_i32..).zip(items.iter()) { path.push(idx.to_string()); record_origins(item, meta, path, origins); path.pop(); } } _ => { if !path.is_empty() { origins.insert(path.join("."), meta.clone()); } } } } pub(super) fn version_for_toml(value: &TomlValue) -> String { let json = serde_json::to_value(value).unwrap_or(JsonValue::Null); let canonical = canonical_json(&json); let serialized = serde_json::to_vec(&canonical).unwrap_or_default(); let mut hasher = Sha256::new(); hasher.update(serialized); let hash = hasher.finalize(); let hex = hash .iter() .map(|byte| format!("{byte:02x}")) .collect::<String>(); format!("sha256:{hex}") } fn canonical_json(value: &JsonValue) -> JsonValue { match value { JsonValue::Object(map) => { let mut sorted = serde_json::Map::new(); let mut keys = map.keys().cloned().collect::<Vec<_>>(); keys.sort(); for key in keys { if let Some(val) = map.get(&key) { sorted.insert(key, canonical_json(val)); } } JsonValue::Object(sorted) } JsonValue::Array(items) => JsonValue::Array(items.iter().map(canonical_json).collect()), other => other.clone(), } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/config_loader/layer_io.rs
codex-rs/core/src/config_loader/layer_io.rs
use super::LoaderOverrides; #[cfg(target_os = "macos")] use super::macos::load_managed_admin_config_layer; use codex_utils_absolute_path::AbsolutePathBuf; use std::io; use std::path::Path; use std::path::PathBuf; use tokio::fs; use toml::Value as TomlValue; #[cfg(unix)] const CODEX_MANAGED_CONFIG_SYSTEM_PATH: &str = "/etc/codex/managed_config.toml"; #[derive(Debug, Clone)] pub(super) struct MangedConfigFromFile { pub managed_config: TomlValue, pub file: AbsolutePathBuf, } #[derive(Debug, Clone)] pub(super) struct LoadedConfigLayers { /// If present, data read from a file such as `/etc/codex/managed_config.toml`. pub managed_config: Option<MangedConfigFromFile>, /// If present, data read from managed preferences (macOS only). pub managed_config_from_mdm: Option<TomlValue>, } pub(super) async fn load_config_layers_internal( codex_home: &Path, overrides: LoaderOverrides, ) -> io::Result<LoadedConfigLayers> { #[cfg(target_os = "macos")] let LoaderOverrides { managed_config_path, managed_preferences_base64, } = overrides; #[cfg(not(target_os = "macos"))] let LoaderOverrides { managed_config_path, } = overrides; let managed_config_path = AbsolutePathBuf::from_absolute_path( managed_config_path.unwrap_or_else(|| managed_config_default_path(codex_home)), )?; let managed_config = read_config_from_path(&managed_config_path, false) .await? .map(|managed_config| MangedConfigFromFile { managed_config, file: managed_config_path.clone(), }); #[cfg(target_os = "macos")] let managed_preferences = load_managed_admin_config_layer(managed_preferences_base64.as_deref()).await?; #[cfg(not(target_os = "macos"))] let managed_preferences = None; Ok(LoadedConfigLayers { managed_config, managed_config_from_mdm: managed_preferences, }) } pub(super) async fn read_config_from_path( path: impl AsRef<Path>, log_missing_as_info: bool, ) -> io::Result<Option<TomlValue>> { match fs::read_to_string(path.as_ref()).await { Ok(contents) => match toml::from_str::<TomlValue>(&contents) { Ok(value) => Ok(Some(value)), Err(err) => { tracing::error!("Failed to parse {}: {err}", path.as_ref().display()); Err(io::Error::new(io::ErrorKind::InvalidData, err)) } }, Err(err) if err.kind() == io::ErrorKind::NotFound => { if log_missing_as_info { tracing::info!("{} not found, using defaults", path.as_ref().display()); } else { tracing::debug!("{} not found", path.as_ref().display()); } Ok(None) } Err(err) => { tracing::error!("Failed to read {}: {err}", path.as_ref().display()); Err(err) } } } /// Return the default managed config path (honoring `CODEX_MANAGED_CONFIG_PATH`). pub(super) fn managed_config_default_path(codex_home: &Path) -> PathBuf { if let Ok(path) = std::env::var("CODEX_MANAGED_CONFIG_PATH") { return PathBuf::from(path); } #[cfg(unix)] { let _ = codex_home; PathBuf::from(CODEX_MANAGED_CONFIG_SYSTEM_PATH) } #[cfg(not(unix))] { codex_home.join("managed_config.toml") } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/config_loader/state.rs
codex-rs/core/src/config_loader/state.rs
use crate::config_loader::ConfigRequirements; use super::fingerprint::record_origins; use super::fingerprint::version_for_toml; use super::merge::merge_toml_values; use codex_app_server_protocol::ConfigLayer; use codex_app_server_protocol::ConfigLayerMetadata; use codex_app_server_protocol::ConfigLayerSource; use codex_utils_absolute_path::AbsolutePathBuf; use serde_json::Value as JsonValue; use std::collections::HashMap; use std::path::PathBuf; use toml::Value as TomlValue; #[derive(Debug, Default, Clone)] pub struct LoaderOverrides { pub managed_config_path: Option<PathBuf>, #[cfg(target_os = "macos")] pub managed_preferences_base64: Option<String>, } #[derive(Debug, Clone, PartialEq)] pub struct ConfigLayerEntry { pub name: ConfigLayerSource, pub config: TomlValue, pub version: String, } impl ConfigLayerEntry { pub fn new(name: ConfigLayerSource, config: TomlValue) -> Self { let version = version_for_toml(&config); Self { name, config, version, } } pub fn metadata(&self) -> ConfigLayerMetadata { ConfigLayerMetadata { name: self.name.clone(), version: self.version.clone(), } } pub fn as_layer(&self) -> ConfigLayer { ConfigLayer { name: self.name.clone(), version: self.version.clone(), config: serde_json::to_value(&self.config).unwrap_or(JsonValue::Null), } } // Get the `.codex/` folder associated with this config layer, if any. pub fn config_folder(&self) -> Option<AbsolutePathBuf> { match &self.name { ConfigLayerSource::Mdm { .. } => None, ConfigLayerSource::System { file } => file.parent(), ConfigLayerSource::User { file } => file.parent(), ConfigLayerSource::Project { dot_codex_folder } => Some(dot_codex_folder.clone()), ConfigLayerSource::SessionFlags => None, ConfigLayerSource::LegacyManagedConfigTomlFromFile { .. } => None, ConfigLayerSource::LegacyManagedConfigTomlFromMdm => None, } } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ConfigLayerStackOrdering { LowestPrecedenceFirst, HighestPrecedenceFirst, } #[derive(Debug, Clone, Default, PartialEq)] pub struct ConfigLayerStack { /// Layers are listed from lowest precedence (base) to highest (top), so /// later entries in the Vec override earlier ones. layers: Vec<ConfigLayerEntry>, /// Index into [layers] of the user config layer, if any. user_layer_index: Option<usize>, /// Constraints that must be enforced when deriving a [Config] from the /// layers. requirements: ConfigRequirements, } impl ConfigLayerStack { pub fn new( layers: Vec<ConfigLayerEntry>, requirements: ConfigRequirements, ) -> std::io::Result<Self> { let user_layer_index = verify_layer_ordering(&layers)?; Ok(Self { layers, user_layer_index, requirements, }) } /// Returns the user config layer, if any. pub fn get_user_layer(&self) -> Option<&ConfigLayerEntry> { self.user_layer_index .and_then(|index| self.layers.get(index)) } pub fn requirements(&self) -> &ConfigRequirements { &self.requirements } /// Creates a new [ConfigLayerStack] using the specified values to inject a /// "user layer" into the stack. If such a layer already exists, it is /// replaced; otherwise, it is inserted into the stack at the appropriate /// position based on precedence rules. pub fn with_user_config(&self, config_toml: &AbsolutePathBuf, user_config: TomlValue) -> Self { let user_layer = ConfigLayerEntry::new( ConfigLayerSource::User { file: config_toml.clone(), }, user_config, ); let mut layers = self.layers.clone(); match self.user_layer_index { Some(index) => { layers[index] = user_layer; Self { layers, user_layer_index: self.user_layer_index, requirements: self.requirements.clone(), } } None => { let user_layer_index = match layers .iter() .position(|layer| layer.name.precedence() > user_layer.name.precedence()) { Some(index) => { layers.insert(index, user_layer); index } None => { layers.push(user_layer); layers.len() - 1 } }; Self { layers, user_layer_index: Some(user_layer_index), requirements: self.requirements.clone(), } } } } pub fn effective_config(&self) -> TomlValue { let mut merged = TomlValue::Table(toml::map::Map::new()); for layer in &self.layers { merge_toml_values(&mut merged, &layer.config); } merged } pub fn origins(&self) -> HashMap<String, ConfigLayerMetadata> { let mut origins = HashMap::new(); let mut path = Vec::new(); for layer in &self.layers { record_origins(&layer.config, &layer.metadata(), &mut path, &mut origins); } origins } /// Returns the highest-precedence to lowest-precedence layers, so /// `ConfigLayerSource::SessionFlags` would be first, if present. pub fn layers_high_to_low(&self) -> Vec<&ConfigLayerEntry> { self.get_layers(ConfigLayerStackOrdering::HighestPrecedenceFirst) } /// Returns the highest-precedence to lowest-precedence layers, so /// `ConfigLayerSource::SessionFlags` would be first, if present. pub fn get_layers(&self, ordering: ConfigLayerStackOrdering) -> Vec<&ConfigLayerEntry> { match ordering { ConfigLayerStackOrdering::HighestPrecedenceFirst => self.layers.iter().rev().collect(), ConfigLayerStackOrdering::LowestPrecedenceFirst => self.layers.iter().collect(), } } } /// Ensures precedence ordering of config layers is correct. Returns the index /// of the user config layer, if any (at most one should exist). fn verify_layer_ordering(layers: &[ConfigLayerEntry]) -> std::io::Result<Option<usize>> { if !layers.iter().map(|layer| &layer.name).is_sorted() { return Err(std::io::Error::new( std::io::ErrorKind::InvalidData, "config layers are not in correct precedence order", )); } // The previous check ensured `layers` is sorted by precedence, so now we // further verify that: // 1. There is at most one user config layer. // 2. Project layers are ordered from root to cwd. let mut user_layer_index: Option<usize> = None; let mut previous_project_dot_codex_folder: Option<&AbsolutePathBuf> = None; for (index, layer) in layers.iter().enumerate() { if matches!(layer.name, ConfigLayerSource::User { .. }) { if user_layer_index.is_some() { return Err(std::io::Error::new( std::io::ErrorKind::InvalidData, "multiple user config layers found", )); } user_layer_index = Some(index); } if let ConfigLayerSource::Project { dot_codex_folder: current_project_dot_codex_folder, } = &layer.name { if let Some(previous) = previous_project_dot_codex_folder { let Some(parent) = previous.as_path().parent() else { return Err(std::io::Error::new( std::io::ErrorKind::InvalidData, "project layer has no parent directory", )); }; if previous == current_project_dot_codex_folder || !current_project_dot_codex_folder .as_path() .ancestors() .any(|ancestor| ancestor == parent) { return Err(std::io::Error::new( std::io::ErrorKind::InvalidData, "project layers are not ordered from root to cwd", )); } } previous_project_dot_codex_folder = Some(current_project_dot_codex_folder); } } Ok(user_layer_index) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/config_loader/overrides.rs
codex-rs/core/src/config_loader/overrides.rs
use toml::Value as TomlValue; pub(super) fn default_empty_table() -> TomlValue { TomlValue::Table(Default::default()) } pub(super) fn build_cli_overrides_layer(cli_overrides: &[(String, TomlValue)]) -> TomlValue { let mut root = default_empty_table(); for (path, value) in cli_overrides { apply_toml_override(&mut root, path, value.clone()); } root } /// Apply a single dotted-path override onto a TOML value. fn apply_toml_override(root: &mut TomlValue, path: &str, value: TomlValue) { use toml::value::Table; let mut current = root; let mut segments_iter = path.split('.').peekable(); while let Some(segment) = segments_iter.next() { let is_last = segments_iter.peek().is_none(); if is_last { match current { TomlValue::Table(table) => { table.insert(segment.to_string(), value); } _ => { let mut table = Table::new(); table.insert(segment.to_string(), value); *current = TomlValue::Table(table); } } return; } match current { TomlValue::Table(table) => { current = table .entry(segment.to_string()) .or_insert_with(|| TomlValue::Table(Table::new())); } _ => { *current = TomlValue::Table(Table::new()); if let TomlValue::Table(tbl) = current { current = tbl .entry(segment.to_string()) .or_insert_with(|| TomlValue::Table(Table::new())); } } } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/config_loader/config_requirements.rs
codex-rs/core/src/config_loader/config_requirements.rs
use codex_protocol::config_types::SandboxMode; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::SandboxPolicy; use serde::Deserialize; use crate::config::Constrained; use crate::config::ConstraintError; /// Normalized version of [`ConfigRequirementsToml`] after deserialization and /// normalization. #[derive(Debug, Clone, PartialEq)] pub struct ConfigRequirements { pub approval_policy: Constrained<AskForApproval>, pub sandbox_policy: Constrained<SandboxPolicy>, } impl Default for ConfigRequirements { fn default() -> Self { Self { approval_policy: Constrained::allow_any_from_default(), sandbox_policy: Constrained::allow_any(SandboxPolicy::ReadOnly), } } } /// Base config deserialized from /etc/codex/requirements.toml or MDM. #[derive(Deserialize, Debug, Clone, Default, PartialEq)] pub struct ConfigRequirementsToml { pub allowed_approval_policies: Option<Vec<AskForApproval>>, pub allowed_sandbox_modes: Option<Vec<SandboxModeRequirement>>, } /// Currently, `external-sandbox` is not supported in config.toml, but it is /// supported through programmatic use. #[derive(Deserialize, Debug, Clone, Copy, PartialEq)] pub enum SandboxModeRequirement { #[serde(rename = "read-only")] ReadOnly, #[serde(rename = "workspace-write")] WorkspaceWrite, #[serde(rename = "danger-full-access")] DangerFullAccess, #[serde(rename = "external-sandbox")] ExternalSandbox, } impl From<SandboxMode> for SandboxModeRequirement { fn from(mode: SandboxMode) -> Self { match mode { SandboxMode::ReadOnly => SandboxModeRequirement::ReadOnly, SandboxMode::WorkspaceWrite => SandboxModeRequirement::WorkspaceWrite, SandboxMode::DangerFullAccess => SandboxModeRequirement::DangerFullAccess, } } } impl ConfigRequirementsToml { /// For every field in `other` that is `Some`, if the corresponding field in /// `self` is `None`, copy the value from `other` into `self`. pub fn merge_unset_fields(&mut self, mut other: ConfigRequirementsToml) { macro_rules! fill_missing_take { ($base:expr, $other:expr, { $($field:ident),+ $(,)? }) => { $( if $base.$field.is_none() { if let Some(value) = $other.$field.take() { $base.$field = Some(value); } } )+ }; } fill_missing_take!(self, other, { allowed_approval_policies, allowed_sandbox_modes }); } } impl TryFrom<ConfigRequirementsToml> for ConfigRequirements { type Error = ConstraintError; fn try_from(toml: ConfigRequirementsToml) -> Result<Self, Self::Error> { let ConfigRequirementsToml { allowed_approval_policies, allowed_sandbox_modes, } = toml; let approval_policy: Constrained<AskForApproval> = match allowed_approval_policies { Some(policies) => { if let Some(first) = policies.first() { Constrained::allow_values(*first, policies)? } else { return Err(ConstraintError::empty_field("allowed_approval_policies")); } } None => Constrained::allow_any_from_default(), }; // TODO(gt): `ConfigRequirementsToml` should let the author specify the // default `SandboxPolicy`? Should do this for `AskForApproval` too? // // Currently, we force ReadOnly as the default policy because two of // the other variants (WorkspaceWrite, ExternalSandbox) require // additional parameters. Ultimately, we should expand the config // format to allow specifying those parameters. let default_sandbox_policy = SandboxPolicy::ReadOnly; let sandbox_policy: Constrained<SandboxPolicy> = match allowed_sandbox_modes { Some(modes) => { if !modes.contains(&SandboxModeRequirement::ReadOnly) { return Err(ConstraintError::invalid_value( "allowed_sandbox_modes", "must include 'read-only' to allow any SandboxPolicy", )); }; Constrained::new(default_sandbox_policy, move |candidate| { let mode = match candidate { SandboxPolicy::ReadOnly => SandboxModeRequirement::ReadOnly, SandboxPolicy::WorkspaceWrite { .. } => { SandboxModeRequirement::WorkspaceWrite } SandboxPolicy::DangerFullAccess => SandboxModeRequirement::DangerFullAccess, SandboxPolicy::ExternalSandbox { .. } => { SandboxModeRequirement::ExternalSandbox } }; if modes.contains(&mode) { Ok(()) } else { Err(ConstraintError::invalid_value( format!("{candidate:?}"), format!("{modes:?}"), )) } })? } None => Constrained::allow_any(default_sandbox_policy), }; Ok(ConfigRequirements { approval_policy, sandbox_policy, }) } } #[cfg(test)] mod tests { use super::*; use anyhow::Result; use codex_protocol::protocol::NetworkAccess; use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use toml::from_str; #[test] fn merge_unset_fields_only_fills_missing_values() -> Result<()> { let source: ConfigRequirementsToml = from_str( r#" allowed_approval_policies = ["on-request"] "#, )?; let mut empty_target: ConfigRequirementsToml = from_str( r#" # intentionally left unset "#, )?; empty_target.merge_unset_fields(source.clone()); assert_eq!( empty_target.allowed_approval_policies, Some(vec![AskForApproval::OnRequest]) ); let mut populated_target: ConfigRequirementsToml = from_str( r#" allowed_approval_policies = ["never"] "#, )?; populated_target.merge_unset_fields(source); assert_eq!( populated_target.allowed_approval_policies, Some(vec![AskForApproval::Never]) ); Ok(()) } #[test] fn deserialize_allowed_approval_policies() -> Result<()> { let toml_str = r#" allowed_approval_policies = ["untrusted", "on-request"] "#; let config: ConfigRequirementsToml = from_str(toml_str)?; let requirements = ConfigRequirements::try_from(config)?; assert_eq!( requirements.approval_policy.value(), AskForApproval::UnlessTrusted, "currently, there is no way to specify the default value for approval policy in the toml, so it picks the first allowed value" ); assert!( requirements .approval_policy .can_set(&AskForApproval::UnlessTrusted) .is_ok() ); assert_eq!( requirements .approval_policy .can_set(&AskForApproval::OnFailure), Err(ConstraintError::InvalidValue { candidate: "OnFailure".into(), allowed: "[UnlessTrusted, OnRequest]".into(), }) ); assert!( requirements .approval_policy .can_set(&AskForApproval::OnRequest) .is_ok() ); assert_eq!( requirements.approval_policy.can_set(&AskForApproval::Never), Err(ConstraintError::InvalidValue { candidate: "Never".into(), allowed: "[UnlessTrusted, OnRequest]".into(), }) ); assert!( requirements .sandbox_policy .can_set(&SandboxPolicy::ReadOnly) .is_ok() ); Ok(()) } #[test] fn deserialize_allowed_sandbox_modes() -> Result<()> { let toml_str = r#" allowed_sandbox_modes = ["read-only", "workspace-write"] "#; let config: ConfigRequirementsToml = from_str(toml_str)?; let requirements = ConfigRequirements::try_from(config)?; let root = if cfg!(windows) { "C:\\repo" } else { "/repo" }; assert!( requirements .sandbox_policy .can_set(&SandboxPolicy::ReadOnly) .is_ok() ); assert!( requirements .sandbox_policy .can_set(&SandboxPolicy::WorkspaceWrite { writable_roots: vec![AbsolutePathBuf::from_absolute_path(root)?], network_access: false, exclude_tmpdir_env_var: false, exclude_slash_tmp: false, }) .is_ok() ); assert_eq!( requirements .sandbox_policy .can_set(&SandboxPolicy::DangerFullAccess), Err(ConstraintError::InvalidValue { candidate: "DangerFullAccess".into(), allowed: "[ReadOnly, WorkspaceWrite]".into(), }) ); assert_eq!( requirements .sandbox_policy .can_set(&SandboxPolicy::ExternalSandbox { network_access: NetworkAccess::Restricted, }), Err(ConstraintError::InvalidValue { candidate: "ExternalSandbox { network_access: Restricted }".into(), allowed: "[ReadOnly, WorkspaceWrite]".into(), }) ); Ok(()) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/config_loader/mod.rs
codex-rs/core/src/config_loader/mod.rs
mod config_requirements; mod fingerprint; mod layer_io; #[cfg(target_os = "macos")] mod macos; mod merge; mod overrides; mod state; #[cfg(test)] mod tests; use crate::config::CONFIG_TOML_FILE; use crate::config::ConfigToml; use crate::config_loader::config_requirements::ConfigRequirementsToml; use crate::config_loader::layer_io::LoadedConfigLayers; use codex_app_server_protocol::ConfigLayerSource; use codex_protocol::config_types::SandboxMode; use codex_protocol::protocol::AskForApproval; use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_absolute_path::AbsolutePathBufGuard; use serde::Deserialize; use std::io; use std::path::Path; use toml::Value as TomlValue; pub use config_requirements::ConfigRequirements; pub use merge::merge_toml_values; pub use state::ConfigLayerEntry; pub use state::ConfigLayerStack; pub use state::ConfigLayerStackOrdering; pub use state::LoaderOverrides; /// On Unix systems, load requirements from this file path, if present. const DEFAULT_REQUIREMENTS_TOML_FILE_UNIX: &str = "/etc/codex/requirements.toml"; /// On Unix systems, load default settings from this file path, if present. /// Note that /etc/codex/ is treated as a "config folder," so subfolders such /// as skills/ and rules/ will also be honored. pub const SYSTEM_CONFIG_TOML_FILE_UNIX: &str = "/etc/codex/config.toml"; const DEFAULT_PROJECT_ROOT_MARKERS: &[&str] = &[".git"]; /// To build up the set of admin-enforced constraints, we build up from multiple /// configuration layers in the following order, but a constraint defined in an /// earlier layer cannot be overridden by a later layer: /// /// - admin: managed preferences (*) /// - system `/etc/codex/requirements.toml` /// /// For backwards compatibility, we also load from /// `/etc/codex/managed_config.toml` and map it to /// `/etc/codex/requirements.toml`. /// /// Configuration is built up from multiple layers in the following order: /// /// - admin: managed preferences (*) /// - system `/etc/codex/config.toml` /// - user `${CODEX_HOME}/config.toml` /// - cwd `${PWD}/config.toml` /// - tree parent directories up to root looking for `./.codex/config.toml` /// - repo `$(git rev-parse --show-toplevel)/.codex/config.toml` /// - runtime e.g., --config flags, model selector in UI /// /// (*) Only available on macOS via managed device profiles. /// /// See https://developers.openai.com/codex/security for details. /// /// When loading the config stack for a thread, there should be a `cwd` /// associated with it such that `cwd` should be `Some(...)`. Only for /// thread-agnostic config loading (e.g., for the app server's `/config` /// endpoint) should `cwd` be `None`. pub async fn load_config_layers_state( codex_home: &Path, cwd: Option<AbsolutePathBuf>, cli_overrides: &[(String, TomlValue)], overrides: LoaderOverrides, ) -> io::Result<ConfigLayerStack> { let mut config_requirements_toml = ConfigRequirementsToml::default(); // TODO(gt): Support an entry in MDM for config requirements and use it // with `config_requirements_toml.merge_unset_fields(...)`, if present. // Honor /etc/codex/requirements.toml. if cfg!(unix) { load_requirements_toml( &mut config_requirements_toml, DEFAULT_REQUIREMENTS_TOML_FILE_UNIX, ) .await?; } // Make a best-effort to support the legacy `managed_config.toml` as a // requirements specification. let loaded_config_layers = layer_io::load_config_layers_internal(codex_home, overrides).await?; load_requirements_from_legacy_scheme( &mut config_requirements_toml, loaded_config_layers.clone(), ) .await?; let mut layers = Vec::<ConfigLayerEntry>::new(); // TODO(gt): Honor managed preferences (macOS only). // Include an entry for the "system" config folder, loading its config.toml, // if it exists. let system_config_toml_file = if cfg!(unix) { Some(AbsolutePathBuf::from_absolute_path( SYSTEM_CONFIG_TOML_FILE_UNIX, )?) } else { // TODO(gt): Determine the path to load on Windows. None }; if let Some(system_config_toml_file) = system_config_toml_file { let system_layer = load_config_toml_for_required_layer(&system_config_toml_file, |config_toml| { ConfigLayerEntry::new( ConfigLayerSource::System { file: system_config_toml_file.clone(), }, config_toml, ) }) .await?; layers.push(system_layer); } // Add a layer for $CODEX_HOME/config.toml if it exists. Note if the file // exists, but is malformed, then this error should be propagated to the // user. let user_file = AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, codex_home)?; let user_layer = load_config_toml_for_required_layer(&user_file, |config_toml| { ConfigLayerEntry::new( ConfigLayerSource::User { file: user_file.clone(), }, config_toml, ) }) .await?; layers.push(user_layer); if let Some(cwd) = cwd { let mut merged_so_far = TomlValue::Table(toml::map::Map::new()); for layer in &layers { merge_toml_values(&mut merged_so_far, &layer.config); } let project_root_markers = project_root_markers_from_config(&merged_so_far)? .unwrap_or_else(default_project_root_markers); let project_root = find_project_root(&cwd, &project_root_markers).await?; let project_layers = load_project_layers(&cwd, &project_root).await?; layers.extend(project_layers); } // Add a layer for runtime overrides from the CLI or UI, if any exist. if !cli_overrides.is_empty() { let cli_overrides_layer = overrides::build_cli_overrides_layer(cli_overrides); layers.push(ConfigLayerEntry::new( ConfigLayerSource::SessionFlags, cli_overrides_layer, )); } // Make a best-effort to support the legacy `managed_config.toml` as a // config layer on top of everything else. For fields in // `managed_config.toml` that do not have an equivalent in // `ConfigRequirements`, note users can still override these values on a // per-turn basis in the TUI and VS Code. let LoadedConfigLayers { managed_config, managed_config_from_mdm, } = loaded_config_layers; if let Some(config) = managed_config { let managed_parent = config.file.as_path().parent().ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidData, format!( "Managed config file {} has no parent directory", config.file.as_path().display() ), ) })?; let managed_config = resolve_relative_paths_in_config_toml(config.managed_config, managed_parent)?; layers.push(ConfigLayerEntry::new( ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: config.file }, managed_config, )); } if let Some(config) = managed_config_from_mdm { layers.push(ConfigLayerEntry::new( ConfigLayerSource::LegacyManagedConfigTomlFromMdm, config, )); } ConfigLayerStack::new(layers, config_requirements_toml.try_into()?) } /// Attempts to load a config.toml file from `config_toml`. /// - If the file exists and is valid TOML, passes the parsed `toml::Value` to /// `create_entry` and returns the resulting layer entry. /// - If the file does not exist, uses an empty `Table` with `create_entry` and /// returns the resulting layer entry. /// - If there is an error reading the file or parsing the TOML, returns an /// error. async fn load_config_toml_for_required_layer( config_toml: impl AsRef<Path>, create_entry: impl FnOnce(TomlValue) -> ConfigLayerEntry, ) -> io::Result<ConfigLayerEntry> { let toml_file = config_toml.as_ref(); let toml_value = match tokio::fs::read_to_string(toml_file).await { Ok(contents) => { let config: TomlValue = toml::from_str(&contents).map_err(|e| { io::Error::new( io::ErrorKind::InvalidData, format!("Error parsing config file {}: {e}", toml_file.display()), ) })?; let config_parent = toml_file.parent().ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidData, format!( "Config file {} has no parent directory", toml_file.display() ), ) })?; resolve_relative_paths_in_config_toml(config, config_parent) } Err(e) => { if e.kind() == io::ErrorKind::NotFound { Ok(TomlValue::Table(toml::map::Map::new())) } else { Err(io::Error::new( e.kind(), format!("Failed to read config file {}: {e}", toml_file.display()), )) } } }?; Ok(create_entry(toml_value)) } /// If available, apply requirements from `/etc/codex/requirements.toml` to /// `config_requirements_toml` by filling in any unset fields. async fn load_requirements_toml( config_requirements_toml: &mut ConfigRequirementsToml, requirements_toml_file: impl AsRef<Path>, ) -> io::Result<()> { match tokio::fs::read_to_string(&requirements_toml_file).await { Ok(contents) => { let requirements_config: ConfigRequirementsToml = toml::from_str(&contents).map_err(|e| { io::Error::new( io::ErrorKind::InvalidData, format!( "Error parsing requirements file {}: {e}", requirements_toml_file.as_ref().display(), ), ) })?; config_requirements_toml.merge_unset_fields(requirements_config); } Err(e) => { if e.kind() != io::ErrorKind::NotFound { return Err(io::Error::new( e.kind(), format!( "Failed to read requirements file {}: {e}", requirements_toml_file.as_ref().display(), ), )); } } } Ok(()) } async fn load_requirements_from_legacy_scheme( config_requirements_toml: &mut ConfigRequirementsToml, loaded_config_layers: LoadedConfigLayers, ) -> io::Result<()> { // In this implementation, earlier layers cannot be overwritten by later // layers, so list managed_config_from_mdm first because it has the highest // precedence. let LoadedConfigLayers { managed_config, managed_config_from_mdm, } = loaded_config_layers; for config in [ managed_config_from_mdm, managed_config.map(|c| c.managed_config), ] .into_iter() .flatten() { let legacy_config: LegacyManagedConfigToml = config.try_into().map_err(|err: toml::de::Error| { io::Error::new( io::ErrorKind::InvalidData, format!("Failed to parse config requirements as TOML: {err}"), ) })?; let new_requirements_toml = ConfigRequirementsToml::from(legacy_config); config_requirements_toml.merge_unset_fields(new_requirements_toml); } Ok(()) } /// Reads `project_root_markers` from the [toml::Value] produced by merging /// `config.toml` from the config layers in the stack preceding /// [ConfigLayerSource::Project]. /// /// Invariants: /// - If `project_root_markers` is not specified, returns `Ok(None)`. /// - If `project_root_markers` is specified, returns `Ok(Some(markers))` where /// `markers` is a `Vec<String>` (including `Ok(Some(Vec::new()))` for an /// empty array, which indicates that root detection should be disabled). /// - Returns an error if `project_root_markers` is specified but is not an /// array of strings. fn project_root_markers_from_config(config: &TomlValue) -> io::Result<Option<Vec<String>>> { let Some(table) = config.as_table() else { return Ok(None); }; let Some(markers_value) = table.get("project_root_markers") else { return Ok(None); }; let TomlValue::Array(entries) = markers_value else { return Err(io::Error::new( io::ErrorKind::InvalidData, "project_root_markers must be an array of strings", )); }; if entries.is_empty() { return Ok(Some(Vec::new())); } let mut markers = Vec::new(); for entry in entries { let Some(marker) = entry.as_str() else { return Err(io::Error::new( io::ErrorKind::InvalidData, "project_root_markers must be an array of strings", )); }; markers.push(marker.to_string()); } Ok(Some(markers)) } fn default_project_root_markers() -> Vec<String> { DEFAULT_PROJECT_ROOT_MARKERS .iter() .map(ToString::to_string) .collect() } /// Takes a `toml::Value` parsed from a config.toml file and walks through it, /// resolving any `AbsolutePathBuf` fields against `base_dir`, returning a new /// `toml::Value` with the same shape but with paths resolved. /// /// This ensures that multiple config layers can be merged together correctly /// even if they were loaded from different directories. fn resolve_relative_paths_in_config_toml( value_from_config_toml: TomlValue, base_dir: &Path, ) -> io::Result<TomlValue> { // Use the serialize/deserialize round-trip to convert the // `toml::Value` into a `ConfigToml` with `AbsolutePath let _guard = AbsolutePathBufGuard::new(base_dir); let Ok(resolved) = value_from_config_toml.clone().try_into::<ConfigToml>() else { return Ok(value_from_config_toml); }; drop(_guard); let resolved_value = TomlValue::try_from(resolved).map_err(|e| { io::Error::new( io::ErrorKind::InvalidData, format!("Failed to serialize resolved config: {e}"), ) })?; Ok(copy_shape_from_original( &value_from_config_toml, &resolved_value, )) } /// Ensure that every field in `original` is present in the returned /// `toml::Value`, taking the value from `resolved` where possible. This ensures /// the fields that we "removed" during the serialize/deserialize round-trip in /// `resolve_config_paths` are preserved, out of an abundance of caution. fn copy_shape_from_original(original: &TomlValue, resolved: &TomlValue) -> TomlValue { match (original, resolved) { (TomlValue::Table(original_table), TomlValue::Table(resolved_table)) => { let mut table = toml::map::Map::new(); for (key, original_value) in original_table { let resolved_value = resolved_table.get(key).unwrap_or(original_value); table.insert( key.clone(), copy_shape_from_original(original_value, resolved_value), ); } TomlValue::Table(table) } (TomlValue::Array(original_array), TomlValue::Array(resolved_array)) => { let mut items = Vec::new(); for (index, original_value) in original_array.iter().enumerate() { let resolved_value = resolved_array.get(index).unwrap_or(original_value); items.push(copy_shape_from_original(original_value, resolved_value)); } TomlValue::Array(items) } (_, resolved_value) => resolved_value.clone(), } } async fn find_project_root( cwd: &AbsolutePathBuf, project_root_markers: &[String], ) -> io::Result<AbsolutePathBuf> { if project_root_markers.is_empty() { return Ok(cwd.clone()); } for ancestor in cwd.as_path().ancestors() { for marker in project_root_markers { let marker_path = ancestor.join(marker); if tokio::fs::metadata(&marker_path).await.is_ok() { return AbsolutePathBuf::from_absolute_path(ancestor); } } } Ok(cwd.clone()) } /// Return the appropriate list of layers (each with /// [ConfigLayerSource::Project] as the source) between `cwd` and /// `project_root`, inclusive. The list is ordered in _increasing_ precdence, /// starting from folders closest to `project_root` (which is the lowest /// precedence) to those closest to `cwd` (which is the highest precedence). async fn load_project_layers( cwd: &AbsolutePathBuf, project_root: &AbsolutePathBuf, ) -> io::Result<Vec<ConfigLayerEntry>> { let mut dirs = cwd .as_path() .ancestors() .scan(false, |done, a| { if *done { None } else { if a == project_root.as_path() { *done = true; } Some(a) } }) .collect::<Vec<_>>(); dirs.reverse(); let mut layers = Vec::new(); for dir in dirs { let dot_codex = dir.join(".codex"); if !tokio::fs::metadata(&dot_codex) .await .map(|meta| meta.is_dir()) .unwrap_or(false) { continue; } let dot_codex_abs = AbsolutePathBuf::from_absolute_path(&dot_codex)?; let config_file = dot_codex_abs.join(CONFIG_TOML_FILE)?; match tokio::fs::read_to_string(&config_file).await { Ok(contents) => { let config: TomlValue = toml::from_str(&contents).map_err(|e| { io::Error::new( io::ErrorKind::InvalidData, format!( "Error parsing project config file {}: {e}", config_file.as_path().display(), ), ) })?; let config = resolve_relative_paths_in_config_toml(config, dot_codex_abs.as_path())?; layers.push(ConfigLayerEntry::new( ConfigLayerSource::Project { dot_codex_folder: dot_codex_abs, }, config, )); } Err(err) => { if err.kind() == io::ErrorKind::NotFound { // If there is no config.toml file, record an empty entry // for this project layer, as this may still have subfolders // that are significant in the overall ConfigLayerStack. layers.push(ConfigLayerEntry::new( ConfigLayerSource::Project { dot_codex_folder: dot_codex_abs, }, TomlValue::Table(toml::map::Map::new()), )); } else { return Err(io::Error::new( err.kind(), format!( "Failed to read project config file {}: {err}", config_file.as_path().display(), ), )); } } } } Ok(layers) } /// The legacy mechanism for specifying admin-enforced configuration is to read /// from a file like `/etc/codex/managed_config.toml` that has the same /// structure as `config.toml` where fields like `approval_policy` can specify /// exactly one value rather than a list of allowed values. /// /// If present, re-interpret `managed_config.toml` as a `requirements.toml` /// where each specified field is treated as a constraint allowing only that /// value. #[derive(Deserialize, Debug, Clone, Default, PartialEq)] struct LegacyManagedConfigToml { approval_policy: Option<AskForApproval>, sandbox_mode: Option<SandboxMode>, } impl From<LegacyManagedConfigToml> for ConfigRequirementsToml { fn from(legacy: LegacyManagedConfigToml) -> Self { let mut config_requirements_toml = ConfigRequirementsToml::default(); let LegacyManagedConfigToml { approval_policy, sandbox_mode, } = legacy; if let Some(approval_policy) = approval_policy { config_requirements_toml.allowed_approval_policies = Some(vec![approval_policy]); } if let Some(sandbox_mode) = sandbox_mode { config_requirements_toml.allowed_sandbox_modes = Some(vec![sandbox_mode.into()]); } config_requirements_toml } } // Cannot name this `mod tests` because of tests.rs in this folder. #[cfg(test)] mod unit_tests { use super::*; use tempfile::tempdir; #[test] fn ensure_resolve_relative_paths_in_config_toml_preserves_all_fields() -> anyhow::Result<()> { let tmp = tempdir()?; let base_dir = tmp.path(); let contents = r#" # This is a field recognized by config.toml that is an AbsolutePathBuf in # the ConfigToml struct. experimental_instructions_file = "./some_file.md" # This is a field recognized by config.toml. model = "gpt-1000" # This is a field not recognized by config.toml. foo = "xyzzy" "#; let user_config: TomlValue = toml::from_str(contents)?; let normalized_toml_value = resolve_relative_paths_in_config_toml(user_config, base_dir)?; let mut expected_toml_value = toml::map::Map::new(); expected_toml_value.insert( "experimental_instructions_file".to_string(), TomlValue::String( AbsolutePathBuf::resolve_path_against_base("./some_file.md", base_dir)? .as_path() .to_string_lossy() .to_string(), ), ); expected_toml_value.insert( "model".to_string(), TomlValue::String("gpt-1000".to_string()), ); expected_toml_value.insert("foo".to_string(), TomlValue::String("xyzzy".to_string())); assert_eq!(normalized_toml_value, TomlValue::Table(expected_toml_value)); Ok(()) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/mcp/auth.rs
codex-rs/core/src/mcp/auth.rs
use std::collections::HashMap; use anyhow::Result; use codex_protocol::protocol::McpAuthStatus; use codex_rmcp_client::OAuthCredentialsStoreMode; use codex_rmcp_client::determine_streamable_http_auth_status; use futures::future::join_all; use tracing::warn; use crate::config::types::McpServerConfig; use crate::config::types::McpServerTransportConfig; #[derive(Debug, Clone)] pub struct McpAuthStatusEntry { pub config: McpServerConfig, pub auth_status: McpAuthStatus, } pub async fn compute_auth_statuses<'a, I>( servers: I, store_mode: OAuthCredentialsStoreMode, ) -> HashMap<String, McpAuthStatusEntry> where I: IntoIterator<Item = (&'a String, &'a McpServerConfig)>, { let futures = servers.into_iter().map(|(name, config)| { let name = name.clone(); let config = config.clone(); async move { let auth_status = match compute_auth_status(&name, &config, store_mode).await { Ok(status) => status, Err(error) => { warn!("failed to determine auth status for MCP server `{name}`: {error:?}"); McpAuthStatus::Unsupported } }; let entry = McpAuthStatusEntry { config, auth_status, }; (name, entry) } }); join_all(futures).await.into_iter().collect() } async fn compute_auth_status( server_name: &str, config: &McpServerConfig, store_mode: OAuthCredentialsStoreMode, ) -> Result<McpAuthStatus> { match &config.transport { McpServerTransportConfig::Stdio { .. } => Ok(McpAuthStatus::Unsupported), McpServerTransportConfig::StreamableHttp { url, bearer_token_env_var, http_headers, env_http_headers, } => { determine_streamable_http_auth_status( server_name, url, bearer_token_env_var.as_deref(), http_headers.clone(), env_http_headers.clone(), store_mode, ) .await } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/mcp/mod.rs
codex-rs/core/src/mcp/mod.rs
pub mod auth; use std::collections::HashMap; use std::env; use std::path::PathBuf; use async_channel::unbounded; use codex_protocol::protocol::McpListToolsResponseEvent; use codex_protocol::protocol::SandboxPolicy; use mcp_types::Tool as McpTool; use tokio_util::sync::CancellationToken; use crate::config::Config; use crate::mcp::auth::compute_auth_statuses; use crate::mcp_connection_manager::McpConnectionManager; use crate::mcp_connection_manager::SandboxState; const MCP_TOOL_NAME_PREFIX: &str = "mcp"; const MCP_TOOL_NAME_DELIMITER: &str = "__"; pub async fn collect_mcp_snapshot(config: &Config) -> McpListToolsResponseEvent { if config.mcp_servers.is_empty() { return McpListToolsResponseEvent { tools: HashMap::new(), resources: HashMap::new(), resource_templates: HashMap::new(), auth_statuses: HashMap::new(), }; } let auth_status_entries = compute_auth_statuses( config.mcp_servers.iter(), config.mcp_oauth_credentials_store_mode, ) .await; let mut mcp_connection_manager = McpConnectionManager::default(); let (tx_event, rx_event) = unbounded(); drop(rx_event); let cancel_token = CancellationToken::new(); // Use ReadOnly sandbox policy for MCP snapshot collection (safest default) let sandbox_state = SandboxState { sandbox_policy: SandboxPolicy::ReadOnly, codex_linux_sandbox_exe: config.codex_linux_sandbox_exe.clone(), sandbox_cwd: env::current_dir().unwrap_or_else(|_| PathBuf::from("/")), }; mcp_connection_manager .initialize( config.mcp_servers.clone(), config.mcp_oauth_credentials_store_mode, auth_status_entries.clone(), tx_event, cancel_token.clone(), sandbox_state, ) .await; let snapshot = collect_mcp_snapshot_from_manager(&mcp_connection_manager, auth_status_entries).await; cancel_token.cancel(); snapshot } pub fn split_qualified_tool_name(qualified_name: &str) -> Option<(String, String)> { let mut parts = qualified_name.split(MCP_TOOL_NAME_DELIMITER); let prefix = parts.next()?; if prefix != MCP_TOOL_NAME_PREFIX { return None; } let server_name = parts.next()?; let tool_name: String = parts.collect::<Vec<_>>().join(MCP_TOOL_NAME_DELIMITER); if tool_name.is_empty() { return None; } Some((server_name.to_string(), tool_name)) } pub fn group_tools_by_server( tools: &HashMap<String, McpTool>, ) -> HashMap<String, HashMap<String, McpTool>> { let mut grouped = HashMap::new(); for (qualified_name, tool) in tools { if let Some((server_name, tool_name)) = split_qualified_tool_name(qualified_name) { grouped .entry(server_name) .or_insert_with(HashMap::new) .insert(tool_name, tool.clone()); } } grouped } pub(crate) async fn collect_mcp_snapshot_from_manager( mcp_connection_manager: &McpConnectionManager, auth_status_entries: HashMap<String, crate::mcp::auth::McpAuthStatusEntry>, ) -> McpListToolsResponseEvent { let (tools, resources, resource_templates) = tokio::join!( mcp_connection_manager.list_all_tools(), mcp_connection_manager.list_all_resources(), mcp_connection_manager.list_all_resource_templates(), ); let auth_statuses = auth_status_entries .iter() .map(|(name, entry)| (name.clone(), entry.auth_status)) .collect(); McpListToolsResponseEvent { tools: tools .into_iter() .map(|(name, tool)| (name, tool.tool)) .collect(), resources, resource_templates, auth_statuses, } } #[cfg(test)] mod tests { use super::*; use mcp_types::ToolInputSchema; use pretty_assertions::assert_eq; fn make_tool(name: &str) -> McpTool { McpTool { annotations: None, description: None, input_schema: ToolInputSchema { properties: None, required: None, r#type: "object".to_string(), }, name: name.to_string(), output_schema: None, title: None, } } #[test] fn split_qualified_tool_name_returns_server_and_tool() { assert_eq!( split_qualified_tool_name("mcp__alpha__do_thing"), Some(("alpha".to_string(), "do_thing".to_string())) ); } #[test] fn split_qualified_tool_name_rejects_invalid_names() { assert_eq!(split_qualified_tool_name("other__alpha__do_thing"), None); assert_eq!(split_qualified_tool_name("mcp__alpha__"), None); } #[test] fn group_tools_by_server_strips_prefix_and_groups() { let mut tools = HashMap::new(); tools.insert("mcp__alpha__do_thing".to_string(), make_tool("do_thing")); tools.insert( "mcp__alpha__nested__op".to_string(), make_tool("nested__op"), ); tools.insert("mcp__beta__do_other".to_string(), make_tool("do_other")); let mut expected_alpha = HashMap::new(); expected_alpha.insert("do_thing".to_string(), make_tool("do_thing")); expected_alpha.insert("nested__op".to_string(), make_tool("nested__op")); let mut expected_beta = HashMap::new(); expected_beta.insert("do_other".to_string(), make_tool("do_other")); let mut expected = HashMap::new(); expected.insert("alpha".to_string(), expected_alpha); expected.insert("beta".to_string(), expected_beta); assert_eq!(group_tools_by_server(&tools), expected); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/all.rs
codex-rs/core/tests/all.rs
// Single integration test binary that aggregates all test modules. // The submodules live in `tests/all/`. mod suite;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/chat_completions_sse.rs
codex-rs/core/tests/chat_completions_sse.rs
use assert_matches::assert_matches; use codex_core::AuthManager; use std::sync::Arc; use tracing_test::traced_test; use codex_core::CodexAuth; use codex_core::ContentItem; use codex_core::ModelClient; use codex_core::ModelProviderInfo; use codex_core::Prompt; use codex_core::ResponseEvent; use codex_core::ResponseItem; use codex_core::WireApi; use codex_core::models_manager::manager::ModelsManager; use codex_otel::otel_manager::OtelManager; use codex_protocol::ConversationId; use codex_protocol::models::ReasoningItemContent; use codex_protocol::protocol::SessionSource; use core_test_support::load_default_config_for_test; use core_test_support::skip_if_no_network; use futures::StreamExt; use tempfile::TempDir; use wiremock::Mock; use wiremock::MockServer; use wiremock::ResponseTemplate; use wiremock::matchers::method; use wiremock::matchers::path; async fn run_stream(sse_body: &str) -> Vec<ResponseEvent> { run_stream_with_bytes(sse_body.as_bytes()).await } async fn run_stream_with_bytes(sse_body: &[u8]) -> Vec<ResponseEvent> { let server = MockServer::start().await; let template = ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_bytes(sse_body.to_vec()); Mock::given(method("POST")) .and(path("/v1/chat/completions")) .respond_with(template) .expect(1) .mount(&server) .await; let provider = ModelProviderInfo { name: "mock".into(), base_url: Some(format!("{}/v1", server.uri())), env_key: None, env_key_instructions: None, experimental_bearer_token: None, wire_api: WireApi::Chat, query_params: None, http_headers: None, env_http_headers: None, request_max_retries: Some(0), stream_max_retries: Some(0), stream_idle_timeout_ms: Some(5_000), requires_openai_auth: false, }; let codex_home = match TempDir::new() { Ok(dir) => dir, Err(e) => panic!("failed to create TempDir: {e}"), }; let mut config = load_default_config_for_test(&codex_home).await; config.model_provider_id = provider.name.clone(); config.model_provider = provider.clone(); config.show_raw_agent_reasoning = true; let effort = config.model_reasoning_effort; let summary = config.model_reasoning_summary; let config = Arc::new(config); let conversation_id = ConversationId::new(); let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key")); let auth_mode = auth_manager.get_auth_mode(); let model = ModelsManager::get_model_offline(config.model.as_deref()); let model_family = ModelsManager::construct_model_family_offline(model.as_str(), &config); let otel_manager = OtelManager::new( conversation_id, model.as_str(), model_family.slug.as_str(), None, Some("test@test.com".to_string()), auth_mode, false, "test".to_string(), SessionSource::Exec, ); let client = ModelClient::new( Arc::clone(&config), None, model_family, otel_manager, provider, effort, summary, conversation_id, SessionSource::Exec, ); let mut prompt = Prompt::default(); prompt.input = vec![ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: "hello".to_string(), }], }]; let mut stream = match client.stream(&prompt).await { Ok(s) => s, Err(e) => panic!("stream chat failed: {e}"), }; let mut events = Vec::new(); while let Some(event) = stream.next().await { match event { Ok(ev) => events.push(ev), // We still collect the error to exercise telemetry and complete the task. Err(_e) => break, } } events } fn assert_message(item: &ResponseItem, expected: &str) { if let ResponseItem::Message { content, .. } = item { let text = content.iter().find_map(|part| match part { ContentItem::OutputText { text } | ContentItem::InputText { text } => Some(text), _ => None, }); let Some(text) = text else { panic!("message missing text: {item:?}"); }; assert_eq!(text, expected); } else { panic!("expected message item, got: {item:?}"); } } fn assert_reasoning(item: &ResponseItem, expected: &str) { if let ResponseItem::Reasoning { content: Some(parts), .. } = item { let mut combined = String::new(); for part in parts { match part { ReasoningItemContent::ReasoningText { text } | ReasoningItemContent::Text { text } => combined.push_str(text), } } assert_eq!(combined, expected); } else { panic!("expected reasoning item, got: {item:?}"); } } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn streams_text_without_reasoning() { skip_if_no_network!(); let sse = concat!( "data: {\"choices\":[{\"delta\":{\"content\":\"hi\"}}]}\n\n", "data: {\"choices\":[{\"delta\":{}}]}\n\n", "data: [DONE]\n\n", ); let events = run_stream(sse).await; assert_eq!(events.len(), 4, "unexpected events: {events:?}"); match &events[0] { ResponseEvent::OutputItemAdded(ResponseItem::Message { .. }) => {} other => panic!("expected initial assistant item, got {other:?}"), } match &events[1] { ResponseEvent::OutputTextDelta(text) => assert_eq!(text, "hi"), other => panic!("expected text delta, got {other:?}"), } match &events[2] { ResponseEvent::OutputItemDone(item) => assert_message(item, "hi"), other => panic!("expected terminal message, got {other:?}"), } assert_matches!(events[3], ResponseEvent::Completed { .. }); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn streams_reasoning_from_string_delta() { skip_if_no_network!(); let sse = concat!( "data: {\"choices\":[{\"delta\":{\"reasoning\":\"think1\"}}]}\n\n", "data: {\"choices\":[{\"delta\":{\"content\":\"ok\"}}]}\n\n", "data: {\"choices\":[{\"delta\":{} ,\"finish_reason\":\"stop\"}]}\n\n", ); let events = run_stream(sse).await; assert_eq!(events.len(), 7, "unexpected events: {events:?}"); match &events[0] { ResponseEvent::OutputItemAdded(ResponseItem::Reasoning { .. }) => {} other => panic!("expected initial reasoning item, got {other:?}"), } match &events[1] { ResponseEvent::ReasoningContentDelta { delta, content_index, } => { assert_eq!(delta, "think1"); assert_eq!(content_index, &0); } other => panic!("expected reasoning delta, got {other:?}"), } match &events[2] { ResponseEvent::OutputItemAdded(ResponseItem::Message { .. }) => {} other => panic!("expected initial message item, got {other:?}"), } match &events[3] { ResponseEvent::OutputTextDelta(text) => assert_eq!(text, "ok"), other => panic!("expected text delta, got {other:?}"), } match &events[4] { ResponseEvent::OutputItemDone(item) => assert_reasoning(item, "think1"), other => panic!("expected terminal reasoning, got {other:?}"), } match &events[5] { ResponseEvent::OutputItemDone(item) => assert_message(item, "ok"), other => panic!("expected terminal message, got {other:?}"), } assert_matches!(events[6], ResponseEvent::Completed { .. }); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn streams_reasoning_from_object_delta() { skip_if_no_network!(); let sse = concat!( "data: {\"choices\":[{\"delta\":{\"reasoning\":{\"text\":\"partA\"}}}]}\n\n", "data: {\"choices\":[{\"delta\":{\"reasoning\":{\"content\":\"partB\"}}}]}\n\n", "data: {\"choices\":[{\"delta\":{\"content\":\"answer\"}}]}\n\n", "data: {\"choices\":[{\"delta\":{} ,\"finish_reason\":\"stop\"}]}\n\n", ); let events = run_stream(sse).await; assert_eq!(events.len(), 8, "unexpected events: {events:?}"); match &events[0] { ResponseEvent::OutputItemAdded(ResponseItem::Reasoning { .. }) => {} other => panic!("expected initial reasoning item, got {other:?}"), } match &events[1] { ResponseEvent::ReasoningContentDelta { delta, content_index, } => { assert_eq!(delta, "partA"); assert_eq!(content_index, &0); } other => panic!("expected reasoning delta, got {other:?}"), } match &events[2] { ResponseEvent::ReasoningContentDelta { delta, content_index, } => { assert_eq!(delta, "partB"); assert_eq!(content_index, &1); } other => panic!("expected reasoning delta, got {other:?}"), } match &events[3] { ResponseEvent::OutputItemAdded(ResponseItem::Message { .. }) => {} other => panic!("expected initial message item, got {other:?}"), } match &events[4] { ResponseEvent::OutputTextDelta(text) => assert_eq!(text, "answer"), other => panic!("expected text delta, got {other:?}"), } match &events[5] { ResponseEvent::OutputItemDone(item) => assert_reasoning(item, "partApartB"), other => panic!("expected terminal reasoning, got {other:?}"), } match &events[6] { ResponseEvent::OutputItemDone(item) => assert_message(item, "answer"), other => panic!("expected terminal message, got {other:?}"), } assert_matches!(events[7], ResponseEvent::Completed { .. }); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn streams_reasoning_from_final_message() { skip_if_no_network!(); let sse = "data: {\"choices\":[{\"message\":{\"reasoning\":\"final-cot\"},\"finish_reason\":\"stop\"}]}\n\n"; let events = run_stream(sse).await; assert_eq!(events.len(), 4, "unexpected events: {events:?}"); match &events[0] { ResponseEvent::OutputItemAdded(ResponseItem::Reasoning { .. }) => {} other => panic!("expected initial reasoning item, got {other:?}"), } match &events[1] { ResponseEvent::ReasoningContentDelta { delta, content_index, } => { assert_eq!(delta, "final-cot"); assert_eq!(content_index, &0); } other => panic!("expected reasoning delta, got {other:?}"), } match &events[2] { ResponseEvent::OutputItemDone(item) => assert_reasoning(item, "final-cot"), other => panic!("expected reasoning item, got {other:?}"), } assert_matches!(events[3], ResponseEvent::Completed { .. }); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn streams_reasoning_before_tool_call() { skip_if_no_network!(); let sse = concat!( "data: {\"choices\":[{\"delta\":{\"reasoning\":\"pre-tool\"}}]}\n\n", "data: {\"choices\":[{\"delta\":{\"tool_calls\":[{\"id\":\"call_1\",\"type\":\"function\",\"function\":{\"name\":\"run\",\"arguments\":\"{}\"}}]},\"finish_reason\":\"tool_calls\"}]}\n\n", ); let events = run_stream(sse).await; assert_eq!(events.len(), 5, "unexpected events: {events:?}"); match &events[0] { ResponseEvent::OutputItemAdded(ResponseItem::Reasoning { .. }) => {} other => panic!("expected initial reasoning item, got {other:?}"), } match &events[1] { ResponseEvent::ReasoningContentDelta { delta, content_index, } => { assert_eq!(delta, "pre-tool"); assert_eq!(content_index, &0); } other => panic!("expected reasoning delta, got {other:?}"), } match &events[2] { ResponseEvent::OutputItemDone(item) => assert_reasoning(item, "pre-tool"), other => panic!("expected reasoning item, got {other:?}"), } match &events[3] { ResponseEvent::OutputItemDone(ResponseItem::FunctionCall { name, arguments, call_id, .. }) => { assert_eq!(name, "run"); assert_eq!(arguments, "{}"); assert_eq!(call_id, "call_1"); } other => panic!("expected function call, got {other:?}"), } assert_matches!(events[4], ResponseEvent::Completed { .. }); } #[tokio::test] #[traced_test] async fn chat_sse_emits_failed_on_parse_error() { skip_if_no_network!(); let sse_body = concat!("data: not-json\n\n", "data: [DONE]\n\n"); let _ = run_stream(sse_body).await; logs_assert(|lines: &[&str]| { lines .iter() .find(|line| { line.contains("codex.api_request") && line.contains("http.response.status_code=200") }) .map(|_| Ok(())) .unwrap_or(Err("cannot find codex.api_request event".to_string())) }); logs_assert(|lines: &[&str]| { lines .iter() .find(|line| { line.contains("codex.sse_event") && line.contains("error.message") && line.contains("expected ident at line 1 column 2") }) .map(|_| Ok(())) .unwrap_or(Err("cannot find SSE event".to_string())) }); } #[tokio::test] #[traced_test] async fn chat_sse_done_chunk_emits_event() { skip_if_no_network!(); let sse_body = "data: [DONE]\n\n"; let _ = run_stream(sse_body).await; logs_assert(|lines: &[&str]| { lines .iter() .find(|line| line.contains("codex.sse_event") && line.contains("event.kind=message")) .map(|_| Ok(())) .unwrap_or(Err("cannot find SSE event".to_string())) }); } #[tokio::test] #[traced_test] async fn chat_sse_emits_error_on_invalid_utf8() { skip_if_no_network!(); let _ = run_stream_with_bytes(b"data: \x80\x80\n\n").await; logs_assert(|lines: &[&str]| { lines .iter() .find(|line| { line.contains("codex.sse_event") && line.contains("error.message") && line.contains("UTF8 error: invalid utf-8 sequence of 1 bytes from index 0") }) .map(|_| Ok(())) .unwrap_or(Err("cannot find SSE event".to_string())) }); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/responses_headers.rs
codex-rs/core/tests/responses_headers.rs
use std::sync::Arc; use codex_app_server_protocol::AuthMode; use codex_core::AuthManager; use codex_core::CodexAuth; use codex_core::ContentItem; use codex_core::ModelClient; use codex_core::ModelProviderInfo; use codex_core::Prompt; use codex_core::ResponseEvent; use codex_core::ResponseItem; use codex_core::WireApi; use codex_core::models_manager::manager::ModelsManager; use codex_otel::otel_manager::OtelManager; use codex_protocol::ConversationId; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::SubAgentSource; use core_test_support::load_default_config_for_test; use core_test_support::responses; use futures::StreamExt; use tempfile::TempDir; use wiremock::matchers::header; #[tokio::test] async fn responses_stream_includes_subagent_header_on_review() { core_test_support::skip_if_no_network!(); let server = responses::start_mock_server().await; let response_body = responses::sse(vec![ responses::ev_response_created("resp-1"), responses::ev_completed("resp-1"), ]); let request_recorder = responses::mount_sse_once_match( &server, header("x-openai-subagent", "review"), response_body, ) .await; let provider = ModelProviderInfo { name: "mock".into(), base_url: Some(format!("{}/v1", server.uri())), env_key: None, env_key_instructions: None, experimental_bearer_token: None, wire_api: WireApi::Responses, query_params: None, http_headers: None, env_http_headers: None, request_max_retries: Some(0), stream_max_retries: Some(0), stream_idle_timeout_ms: Some(5_000), requires_openai_auth: false, }; let codex_home = TempDir::new().expect("failed to create TempDir"); let mut config = load_default_config_for_test(&codex_home).await; config.model_provider_id = provider.name.clone(); config.model_provider = provider.clone(); let effort = config.model_reasoning_effort; let summary = config.model_reasoning_summary; let model = ModelsManager::get_model_offline(config.model.as_deref()); config.model = Some(model.clone()); let config = Arc::new(config); let conversation_id = ConversationId::new(); let auth_mode = AuthMode::ChatGPT; let session_source = SessionSource::SubAgent(SubAgentSource::Review); let model_family = ModelsManager::construct_model_family_offline(model.as_str(), &config); let otel_manager = OtelManager::new( conversation_id, model.as_str(), model_family.slug.as_str(), None, Some("test@test.com".to_string()), Some(auth_mode), false, "test".to_string(), session_source.clone(), ); let client = ModelClient::new( Arc::clone(&config), None, model_family, otel_manager, provider, effort, summary, conversation_id, session_source, ); let mut prompt = Prompt::default(); prompt.input = vec![ResponseItem::Message { id: None, role: "user".into(), content: vec![ContentItem::InputText { text: "hello".into(), }], }]; let mut stream = client.stream(&prompt).await.expect("stream failed"); while let Some(event) = stream.next().await { if matches!(event, Ok(ResponseEvent::Completed { .. })) { break; } } let request = request_recorder.single_request(); assert_eq!( request.header("x-openai-subagent").as_deref(), Some("review") ); } #[tokio::test] async fn responses_stream_includes_subagent_header_on_other() { core_test_support::skip_if_no_network!(); let server = responses::start_mock_server().await; let response_body = responses::sse(vec![ responses::ev_response_created("resp-1"), responses::ev_completed("resp-1"), ]); let request_recorder = responses::mount_sse_once_match( &server, header("x-openai-subagent", "my-task"), response_body, ) .await; let provider = ModelProviderInfo { name: "mock".into(), base_url: Some(format!("{}/v1", server.uri())), env_key: None, env_key_instructions: None, experimental_bearer_token: None, wire_api: WireApi::Responses, query_params: None, http_headers: None, env_http_headers: None, request_max_retries: Some(0), stream_max_retries: Some(0), stream_idle_timeout_ms: Some(5_000), requires_openai_auth: false, }; let codex_home = TempDir::new().expect("failed to create TempDir"); let mut config = load_default_config_for_test(&codex_home).await; config.model_provider_id = provider.name.clone(); config.model_provider = provider.clone(); let effort = config.model_reasoning_effort; let summary = config.model_reasoning_summary; let model = ModelsManager::get_model_offline(config.model.as_deref()); config.model = Some(model.clone()); let config = Arc::new(config); let conversation_id = ConversationId::new(); let auth_mode = AuthMode::ChatGPT; let session_source = SessionSource::SubAgent(SubAgentSource::Other("my-task".to_string())); let model_family = ModelsManager::construct_model_family_offline(model.as_str(), &config); let otel_manager = OtelManager::new( conversation_id, model.as_str(), model_family.slug.as_str(), None, Some("test@test.com".to_string()), Some(auth_mode), false, "test".to_string(), session_source.clone(), ); let client = ModelClient::new( Arc::clone(&config), None, model_family, otel_manager, provider, effort, summary, conversation_id, session_source, ); let mut prompt = Prompt::default(); prompt.input = vec![ResponseItem::Message { id: None, role: "user".into(), content: vec![ContentItem::InputText { text: "hello".into(), }], }]; let mut stream = client.stream(&prompt).await.expect("stream failed"); while let Some(event) = stream.next().await { if matches!(event, Ok(ResponseEvent::Completed { .. })) { break; } } let request = request_recorder.single_request(); assert_eq!( request.header("x-openai-subagent").as_deref(), Some("my-task") ); } #[tokio::test] async fn responses_respects_model_family_overrides_from_config() { core_test_support::skip_if_no_network!(); let server = responses::start_mock_server().await; let response_body = responses::sse(vec![ responses::ev_response_created("resp-1"), responses::ev_completed("resp-1"), ]); let request_recorder = responses::mount_sse_once(&server, response_body).await; let provider = ModelProviderInfo { name: "mock".into(), base_url: Some(format!("{}/v1", server.uri())), env_key: None, env_key_instructions: None, experimental_bearer_token: None, wire_api: WireApi::Responses, query_params: None, http_headers: None, env_http_headers: None, request_max_retries: Some(0), stream_max_retries: Some(0), stream_idle_timeout_ms: Some(5_000), requires_openai_auth: false, }; let codex_home = TempDir::new().expect("failed to create TempDir"); let mut config = load_default_config_for_test(&codex_home).await; config.model = Some("gpt-3.5-turbo".to_string()); config.model_provider_id = provider.name.clone(); config.model_provider = provider.clone(); config.model_supports_reasoning_summaries = Some(true); config.model_reasoning_summary = ReasoningSummary::Detailed; let effort = config.model_reasoning_effort; let summary = config.model_reasoning_summary; let model = config.model.clone().expect("model configured"); let config = Arc::new(config); let conversation_id = ConversationId::new(); let auth_mode = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key")).get_auth_mode(); let session_source = SessionSource::SubAgent(SubAgentSource::Other("override-check".to_string())); let model_family = ModelsManager::construct_model_family_offline(model.as_str(), &config); let otel_manager = OtelManager::new( conversation_id, model.as_str(), model_family.slug.as_str(), None, Some("test@test.com".to_string()), auth_mode, false, "test".to_string(), session_source.clone(), ); let client = ModelClient::new( Arc::clone(&config), None, model_family, otel_manager, provider, effort, summary, conversation_id, session_source, ); let mut prompt = Prompt::default(); prompt.input = vec![ResponseItem::Message { id: None, role: "user".into(), content: vec![ContentItem::InputText { text: "hello".into(), }], }]; let mut stream = client.stream(&prompt).await.expect("stream failed"); while let Some(event) = stream.next().await { if matches!(event, Ok(ResponseEvent::Completed { .. })) { break; } } let request = request_recorder.single_request(); let body = request.body_json(); let reasoning = body .get("reasoning") .and_then(|value| value.as_object()) .cloned(); assert!( reasoning.is_some(), "reasoning should be present when config enables summaries" ); assert_eq!( reasoning .as_ref() .and_then(|value| value.get("summary")) .and_then(|value| value.as_str()), Some("detailed") ); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/chat_completions_payload.rs
codex-rs/core/tests/chat_completions_payload.rs
#![allow(clippy::expect_used)] use std::sync::Arc; use codex_app_server_protocol::AuthMode; use codex_core::ContentItem; use codex_core::LocalShellAction; use codex_core::LocalShellExecAction; use codex_core::LocalShellStatus; use codex_core::ModelClient; use codex_core::ModelProviderInfo; use codex_core::Prompt; use codex_core::ResponseItem; use codex_core::WireApi; use codex_core::models_manager::manager::ModelsManager; use codex_otel::otel_manager::OtelManager; use codex_protocol::ConversationId; use codex_protocol::models::ReasoningItemContent; use codex_protocol::protocol::SessionSource; use core_test_support::load_default_config_for_test; use core_test_support::skip_if_no_network; use futures::StreamExt; use serde_json::Value; use tempfile::TempDir; use wiremock::Mock; use wiremock::MockServer; use wiremock::ResponseTemplate; use wiremock::matchers::method; use wiremock::matchers::path; async fn run_request(input: Vec<ResponseItem>) -> Value { let server = MockServer::start().await; let template = ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_raw( "data: {\"choices\":[{\"delta\":{}}]}\n\ndata: [DONE]\n\n", "text/event-stream", ); Mock::given(method("POST")) .and(path("/v1/chat/completions")) .respond_with(template) .expect(1) .mount(&server) .await; let provider = ModelProviderInfo { name: "mock".into(), base_url: Some(format!("{}/v1", server.uri())), env_key: None, env_key_instructions: None, experimental_bearer_token: None, wire_api: WireApi::Chat, query_params: None, http_headers: None, env_http_headers: None, request_max_retries: Some(0), stream_max_retries: Some(0), stream_idle_timeout_ms: Some(5_000), requires_openai_auth: false, }; let codex_home = match TempDir::new() { Ok(dir) => dir, Err(e) => panic!("failed to create TempDir: {e}"), }; let mut config = load_default_config_for_test(&codex_home).await; config.model_provider_id = provider.name.clone(); config.model_provider = provider.clone(); config.show_raw_agent_reasoning = true; let effort = config.model_reasoning_effort; let summary = config.model_reasoning_summary; let config = Arc::new(config); let conversation_id = ConversationId::new(); let model = ModelsManager::get_model_offline(config.model.as_deref()); let model_family = ModelsManager::construct_model_family_offline(model.as_str(), &config); let otel_manager = OtelManager::new( conversation_id, model.as_str(), model_family.slug.as_str(), None, Some("test@test.com".to_string()), Some(AuthMode::ApiKey), false, "test".to_string(), SessionSource::Exec, ); let client = ModelClient::new( Arc::clone(&config), None, model_family, otel_manager, provider, effort, summary, conversation_id, SessionSource::Exec, ); let mut prompt = Prompt::default(); prompt.input = input; let mut stream = match client.stream(&prompt).await { Ok(s) => s, Err(e) => panic!("stream chat failed: {e}"), }; while let Some(event) = stream.next().await { if let Err(e) = event { panic!("stream event error: {e}"); } } let all_requests = server.received_requests().await.expect("received requests"); let requests: Vec<_> = all_requests .iter() .filter(|req| req.method == "POST" && req.url.path().ends_with("/chat/completions")) .collect(); let request = requests .first() .unwrap_or_else(|| panic!("expected POST request to /chat/completions")); match request.body_json() { Ok(v) => v, Err(e) => panic!("invalid json body: {e}"), } } fn user_message(text: &str) -> ResponseItem { ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: text.to_string(), }], } } fn assistant_message(text: &str) -> ResponseItem { ResponseItem::Message { id: None, role: "assistant".to_string(), content: vec![ContentItem::OutputText { text: text.to_string(), }], } } fn reasoning_item(text: &str) -> ResponseItem { ResponseItem::Reasoning { id: String::new(), summary: Vec::new(), content: Some(vec![ReasoningItemContent::ReasoningText { text: text.to_string(), }]), encrypted_content: None, } } fn function_call() -> ResponseItem { ResponseItem::FunctionCall { id: None, name: "f".to_string(), arguments: "{}".to_string(), call_id: "c1".to_string(), } } fn local_shell_call() -> ResponseItem { ResponseItem::LocalShellCall { id: Some("id1".to_string()), call_id: None, status: LocalShellStatus::InProgress, action: LocalShellAction::Exec(LocalShellExecAction { command: vec!["echo".to_string()], timeout_ms: Some(1_000), working_directory: None, env: None, user: None, }), } } fn messages_from(body: &Value) -> Vec<Value> { match body["messages"].as_array() { Some(arr) => arr.clone(), None => panic!("messages array missing"), } } fn first_assistant(messages: &[Value]) -> &Value { match messages.iter().find(|msg| msg["role"] == "assistant") { Some(v) => v, None => panic!("assistant message not present"), } } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn omits_reasoning_when_none_present() { skip_if_no_network!(); let body = run_request(vec![user_message("u1"), assistant_message("a1")]).await; let messages = messages_from(&body); let assistant = first_assistant(&messages); assert_eq!(assistant["content"], Value::String("a1".into())); assert!(assistant.get("reasoning").is_none()); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn attaches_reasoning_to_previous_assistant() { skip_if_no_network!(); let body = run_request(vec![ user_message("u1"), assistant_message("a1"), reasoning_item("rA"), ]) .await; let messages = messages_from(&body); let assistant = first_assistant(&messages); assert_eq!(assistant["content"], Value::String("a1".into())); assert_eq!(assistant["reasoning"], Value::String("rA".into())); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn attaches_reasoning_to_function_call_anchor() { skip_if_no_network!(); let body = run_request(vec![ user_message("u1"), reasoning_item("rFunc"), function_call(), ]) .await; let messages = messages_from(&body); let assistant = first_assistant(&messages); assert_eq!(assistant["reasoning"], Value::String("rFunc".into())); let tool_calls = match assistant["tool_calls"].as_array() { Some(arr) => arr, None => panic!("tool call list missing"), }; assert_eq!(tool_calls.len(), 1); assert_eq!(tool_calls[0]["type"], Value::String("function".into())); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn attaches_reasoning_to_local_shell_call() { skip_if_no_network!(); let body = run_request(vec![ user_message("u1"), reasoning_item("rShell"), local_shell_call(), ]) .await; let messages = messages_from(&body); let assistant = first_assistant(&messages); assert_eq!(assistant["reasoning"], Value::String("rShell".into())); assert_eq!( assistant["tool_calls"][0]["type"], Value::String("local_shell_call".into()) ); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn drops_reasoning_when_last_role_is_user() { skip_if_no_network!(); let body = run_request(vec![ assistant_message("aPrev"), reasoning_item("rHist"), user_message("uNew"), ]) .await; let messages = messages_from(&body); assert!(messages.iter().all(|msg| msg.get("reasoning").is_none())); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn ignores_reasoning_before_last_user() { skip_if_no_network!(); let body = run_request(vec![ user_message("u1"), assistant_message("a1"), user_message("u2"), reasoning_item("rAfterU1"), ]) .await; let messages = messages_from(&body); assert!(messages.iter().all(|msg| msg.get("reasoning").is_none())); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn skips_empty_reasoning_segments() { skip_if_no_network!(); let body = run_request(vec![ user_message("u1"), assistant_message("a1"), reasoning_item(""), reasoning_item(" "), ]) .await; let messages = messages_from(&body); let assistant = first_assistant(&messages); assert!(assistant.get("reasoning").is_none()); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn suppresses_duplicate_assistant_messages() { skip_if_no_network!(); let body = run_request(vec![assistant_message("dup"), assistant_message("dup")]).await; let messages = messages_from(&body); let assistant_messages: Vec<_> = messages .iter() .filter(|msg| msg["role"] == "assistant") .collect(); assert_eq!(assistant_messages.len(), 1); assert_eq!( assistant_messages[0]["content"], Value::String("dup".into()) ); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/quota_exceeded.rs
codex-rs/core/tests/suite/quota_exceeded.rs
use anyhow::Result; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_protocol::user_input::UserInput; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_sse_once; use core_test_support::responses::sse; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use pretty_assertions::assert_eq; use serde_json::json; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn quota_exceeded_emits_single_error_event() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex(); mount_sse_once( &server, sse(vec![ ev_response_created("resp-1"), json!({ "type": "response.failed", "response": { "id": "resp-1", "error": { "code": "insufficient_quota", "message": "You exceeded your current quota, please check your plan and billing details." } } }), ]), ) .await; let test = builder.build(&server).await?; test.codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "quota?".into(), }], }) .await .unwrap(); let mut error_events = 0; loop { let event = wait_for_event(&test.codex, |_| true).await; match event { EventMsg::Error(err) => { error_events += 1; assert_eq!( err.message, "Quota exceeded. Check your plan and billing details." ); } EventMsg::TaskComplete(_) => break, _ => {} } } assert_eq!(error_events, 1, "expected exactly one Codex:Error event"); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/remote_models.rs
codex-rs/core/tests/suite/remote_models.rs
#![cfg(not(target_os = "windows"))] // unified exec is not supported on Windows OS use std::sync::Arc; use anyhow::Result; use codex_core::CodexAuth; use codex_core::CodexConversation; use codex_core::ConversationManager; use codex_core::ModelProviderInfo; use codex_core::built_in_model_providers; use codex_core::config::Config; use codex_core::features::Feature; use codex_core::models_manager::manager::ModelsManager; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::ExecCommandSource; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::openai_models::ConfigShellToolType; use codex_protocol::openai_models::ModelInfo; use codex_protocol::openai_models::ModelPreset; use codex_protocol::openai_models::ModelVisibility; use codex_protocol::openai_models::ModelsResponse; use codex_protocol::openai_models::ReasoningEffort; use codex_protocol::openai_models::ReasoningEffortPreset; use codex_protocol::openai_models::TruncationPolicyConfig; use codex_protocol::user_input::UserInput; use core_test_support::load_default_config_for_test; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_models_once; use core_test_support::responses::mount_sse_once; use core_test_support::responses::mount_sse_sequence; use core_test_support::responses::sse; use core_test_support::skip_if_no_network; use core_test_support::skip_if_sandbox; use core_test_support::wait_for_event; use core_test_support::wait_for_event_match; use pretty_assertions::assert_eq; use serde_json::json; use tempfile::TempDir; use tokio::time::Duration; use tokio::time::Instant; use tokio::time::sleep; use wiremock::BodyPrintLimit; use wiremock::MockServer; const REMOTE_MODEL_SLUG: &str = "codex-test"; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn remote_models_remote_model_uses_unified_exec() -> Result<()> { skip_if_no_network!(Ok(())); skip_if_sandbox!(Ok(())); let server = MockServer::builder() .body_print_limit(BodyPrintLimit::Limited(80_000)) .start() .await; let remote_model = ModelInfo { slug: REMOTE_MODEL_SLUG.to_string(), display_name: "Remote Test".to_string(), description: Some("A remote model that requires the test shell".to_string()), default_reasoning_level: ReasoningEffort::Medium, supported_reasoning_levels: vec![ReasoningEffortPreset { effort: ReasoningEffort::Medium, description: ReasoningEffort::Medium.to_string(), }], shell_type: ConfigShellToolType::UnifiedExec, visibility: ModelVisibility::List, supported_in_api: true, priority: 1, upgrade: None, base_instructions: None, supports_reasoning_summaries: false, support_verbosity: false, default_verbosity: None, apply_patch_tool_type: None, truncation_policy: TruncationPolicyConfig::bytes(10_000), supports_parallel_tool_calls: false, context_window: None, experimental_supported_tools: Vec::new(), }; let models_mock = mount_models_once( &server, ModelsResponse { models: vec![remote_model], }, ) .await; let harness = build_remote_models_harness(&server, |config| { config.features.enable(Feature::RemoteModels); config.model = Some("gpt-5.1".to_string()); }) .await?; let RemoteModelsHarness { codex, cwd, config, conversation_manager, .. } = harness; let models_manager = conversation_manager.get_models_manager(); let available_model = wait_for_model_available(&models_manager, REMOTE_MODEL_SLUG, &config).await; assert_eq!(available_model.model, REMOTE_MODEL_SLUG); let requests = models_mock.requests(); assert_eq!( requests.len(), 1, "expected a single /models refresh request for the remote models feature" ); assert_eq!(requests[0].url.path(), "/v1/models"); let family = models_manager .construct_model_family(REMOTE_MODEL_SLUG, &config) .await; assert_eq!(family.shell_type, ConfigShellToolType::UnifiedExec); codex .submit(Op::OverrideTurnContext { cwd: None, approval_policy: None, sandbox_policy: None, model: Some(REMOTE_MODEL_SLUG.to_string()), effort: None, summary: None, }) .await?; let call_id = "call"; let args = json!({ "cmd": "/bin/echo call", "yield_time_ms": 250, }); let responses = vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "exec_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_response_created("resp-2"), ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ]; mount_sse_sequence(&server, responses).await; codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "run call".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: REMOTE_MODEL_SLUG.to_string(), effort: None, summary: ReasoningSummary::Auto, }) .await?; let begin_event = wait_for_event_match(&codex, |msg| match msg { EventMsg::ExecCommandBegin(event) if event.call_id == call_id => Some(event.clone()), _ => None, }) .await; assert_eq!(begin_event.source, ExecCommandSource::UnifiedExecStartup); wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn remote_models_apply_remote_base_instructions() -> Result<()> { skip_if_no_network!(Ok(())); skip_if_sandbox!(Ok(())); let server = MockServer::builder() .body_print_limit(BodyPrintLimit::Limited(80_000)) .start() .await; let model = "test-gpt-5-remote"; let remote_base = "Use the remote base instructions only."; let remote_model = ModelInfo { slug: model.to_string(), display_name: "Parallel Remote".to_string(), description: Some("A remote model with custom instructions".to_string()), default_reasoning_level: ReasoningEffort::Medium, supported_reasoning_levels: vec![ReasoningEffortPreset { effort: ReasoningEffort::Medium, description: ReasoningEffort::Medium.to_string(), }], shell_type: ConfigShellToolType::ShellCommand, visibility: ModelVisibility::List, supported_in_api: true, priority: 1, upgrade: None, base_instructions: Some(remote_base.to_string()), supports_reasoning_summaries: false, support_verbosity: false, default_verbosity: None, apply_patch_tool_type: None, truncation_policy: TruncationPolicyConfig::bytes(10_000), supports_parallel_tool_calls: false, context_window: None, experimental_supported_tools: Vec::new(), }; mount_models_once( &server, ModelsResponse { models: vec![remote_model], }, ) .await; let response_mock = mount_sse_once( &server, sse(vec![ ev_response_created("resp-1"), ev_assistant_message("msg-1", "done"), ev_completed("resp-1"), ]), ) .await; let harness = build_remote_models_harness(&server, |config| { config.features.enable(Feature::RemoteModels); config.model = Some("gpt-5.1".to_string()); }) .await?; let RemoteModelsHarness { codex, cwd, config, conversation_manager, .. } = harness; let models_manager = conversation_manager.get_models_manager(); wait_for_model_available(&models_manager, model, &config).await; codex .submit(Op::OverrideTurnContext { cwd: None, approval_policy: None, sandbox_policy: None, model: Some(model.to_string()), effort: None, summary: None, }) .await?; codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "hello remote".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: model.to_string(), effort: None, summary: ReasoningSummary::Auto, }) .await?; wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; let body = response_mock.single_request().body_json(); let instructions = body["instructions"].as_str().unwrap(); assert_eq!(instructions, remote_base); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn remote_models_preserve_builtin_presets() -> Result<()> { skip_if_no_network!(Ok(())); skip_if_sandbox!(Ok(())); let server = MockServer::start().await; let remote_model = test_remote_model("remote-alpha", ModelVisibility::List, 0); let models_mock = mount_models_once( &server, ModelsResponse { models: vec![remote_model.clone()], }, ) .await; let codex_home = TempDir::new()?; let mut config = load_default_config_for_test(&codex_home).await; config.features.enable(Feature::RemoteModels); let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing(); let provider = ModelProviderInfo { base_url: Some(format!("{}/v1", server.uri())), ..built_in_model_providers()["openai"].clone() }; let manager = ModelsManager::with_provider( codex_core::auth::AuthManager::from_auth_for_testing(auth), provider, ); manager .refresh_available_models_with_cache(&config) .await .expect("refresh succeeds"); let available = manager.list_models(&config).await; let remote = available .iter() .find(|model| model.model == "remote-alpha") .expect("remote model should be listed"); let mut expected_remote: ModelPreset = remote_model.into(); expected_remote.is_default = true; assert_eq!(*remote, expected_remote); assert!( available .iter() .any(|model| model.model == "gpt-5.1-codex-max"), "builtin presets should remain available after refresh" ); assert_eq!( models_mock.requests().len(), 1, "expected a single /models request" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn remote_models_hide_picker_only_models() -> Result<()> { skip_if_no_network!(Ok(())); skip_if_sandbox!(Ok(())); let server = MockServer::start().await; let remote_model = test_remote_model("codex-auto-balanced", ModelVisibility::Hide, 0); mount_models_once( &server, ModelsResponse { models: vec![remote_model], }, ) .await; let codex_home = TempDir::new()?; let mut config = load_default_config_for_test(&codex_home).await; config.features.enable(Feature::RemoteModels); let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing(); let provider = ModelProviderInfo { base_url: Some(format!("{}/v1", server.uri())), ..built_in_model_providers()["openai"].clone() }; let manager = ModelsManager::with_provider( codex_core::auth::AuthManager::from_auth_for_testing(auth), provider, ); let selected = manager.get_model(&None, &config).await; assert_eq!(selected, "gpt-5.2-codex"); let available = manager.list_models(&config).await; assert!( available .iter() .all(|model| model.model != "codex-auto-balanced"), "hidden models should not appear in the picker list" ); Ok(()) } async fn wait_for_model_available( manager: &Arc<ModelsManager>, slug: &str, config: &Config, ) -> ModelPreset { let deadline = Instant::now() + Duration::from_secs(2); loop { if let Some(model) = { let guard = manager.list_models(config).await; guard.iter().find(|model| model.model == slug).cloned() } { return model; } if Instant::now() >= deadline { panic!("timed out waiting for the remote model {slug} to appear"); } sleep(Duration::from_millis(25)).await; } } struct RemoteModelsHarness { codex: Arc<CodexConversation>, cwd: Arc<TempDir>, config: Config, conversation_manager: Arc<ConversationManager>, } // todo(aibrahim): move this to with_model_provier in test_codex async fn build_remote_models_harness<F>( server: &MockServer, mutate_config: F, ) -> Result<RemoteModelsHarness> where F: FnOnce(&mut Config), { let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing(); let home = Arc::new(TempDir::new()?); let cwd = Arc::new(TempDir::new()?); let mut config = load_default_config_for_test(&home).await; config.cwd = cwd.path().to_path_buf(); config.features.enable(Feature::RemoteModels); let provider = ModelProviderInfo { base_url: Some(format!("{}/v1", server.uri())), ..built_in_model_providers()["openai"].clone() }; config.model_provider = provider.clone(); mutate_config(&mut config); let conversation_manager = Arc::new(ConversationManager::with_models_provider(auth, provider)); let new_conversation = conversation_manager .new_conversation(config.clone()) .await?; Ok(RemoteModelsHarness { codex: new_conversation.conversation, cwd, config, conversation_manager, }) } fn test_remote_model(slug: &str, visibility: ModelVisibility, priority: i32) -> ModelInfo { ModelInfo { slug: slug.to_string(), display_name: format!("{slug} display"), description: Some(format!("{slug} description")), default_reasoning_level: ReasoningEffort::Medium, supported_reasoning_levels: vec![ReasoningEffortPreset { effort: ReasoningEffort::Medium, description: ReasoningEffort::Medium.to_string(), }], shell_type: ConfigShellToolType::ShellCommand, visibility, supported_in_api: true, priority, upgrade: None, base_instructions: None, supports_reasoning_summaries: false, support_verbosity: false, default_verbosity: None, apply_patch_tool_type: None, truncation_policy: TruncationPolicyConfig::bytes(10_000), supports_parallel_tool_calls: false, context_window: None, experimental_supported_tools: Vec::new(), } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/codex_delegate.rs
codex-rs/core/tests/suite/codex_delegate.rs
use codex_core::config::Constrained; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::ReviewDecision; use codex_core::protocol::ReviewRequest; use codex_core::protocol::ReviewTarget; use codex_core::protocol::SandboxPolicy; use codex_core::sandboxing::SandboxPermissions; use core_test_support::responses::ev_apply_patch_function_call; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_reasoning_item_added; use core_test_support::responses::ev_reasoning_summary_text_delta; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_sse_sequence; use core_test_support::responses::sse; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use pretty_assertions::assert_eq; /// Delegate should surface ExecApprovalRequest from sub-agent and proceed /// after parent submits an approval decision. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn codex_delegate_forwards_exec_approval_and_proceeds_on_approval() { skip_if_no_network!(); // Sub-agent turn 1: emit a shell_command function_call requiring approval, then complete. let call_id = "call-exec-1"; let args = serde_json::json!({ "command": "rm -rf delegated", "timeout_ms": 1000, "sandbox_permissions": SandboxPermissions::RequireEscalated, }) .to_string(); let sse1 = sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "shell_command", &args), ev_completed("resp-1"), ]); // Sub-agent turn 2: return structured review output and complete. let review_json = serde_json::json!({ "findings": [], "overall_correctness": "ok", "overall_explanation": "delegate approved exec", "overall_confidence_score": 0.5 }) .to_string(); let sse2 = sse(vec![ ev_response_created("resp-2"), ev_assistant_message("msg-1", &review_json), ev_completed("resp-2"), ]); let server = start_mock_server().await; mount_sse_sequence(&server, vec![sse1, sse2]).await; // Build a conversation configured to require approvals so the delegate // routes ExecApprovalRequest via the parent. let mut builder = test_codex().with_model("gpt-5.1").with_config(|config| { config.approval_policy = Constrained::allow_any(AskForApproval::OnRequest); config.sandbox_policy = Constrained::allow_any(SandboxPolicy::ReadOnly); }); let test = builder.build(&server).await.expect("build test codex"); // Kick off review (sub-agent starts internally). test.codex .submit(Op::Review { review_request: ReviewRequest { target: ReviewTarget::Custom { instructions: "Please review".to_string(), }, user_facing_hint: None, }, }) .await .expect("submit review"); // Lifecycle: Entered -> ExecApprovalRequest -> Exited(Some) -> TaskComplete. wait_for_event(&test.codex, |ev| { matches!(ev, EventMsg::EnteredReviewMode(_)) }) .await; // Expect parent-side approval request (forwarded by delegate). wait_for_event(&test.codex, |ev| { matches!(ev, EventMsg::ExecApprovalRequest(_)) }) .await; // Approve via parent; id "0" is the active sub_id in tests. test.codex .submit(Op::ExecApproval { id: "0".into(), decision: ReviewDecision::Approved, }) .await .expect("submit exec approval"); wait_for_event(&test.codex, |ev| { matches!(ev, EventMsg::ExitedReviewMode(_)) }) .await; wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; } /// Delegate should surface ApplyPatchApprovalRequest and honor parent decision /// so the sub-agent can proceed to completion. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn codex_delegate_forwards_patch_approval_and_proceeds_on_decision() { skip_if_no_network!(); let call_id = "call-patch-1"; let patch = "*** Begin Patch\n*** Add File: delegated.txt\n+hello\n*** End Patch\n"; let sse1 = sse(vec![ ev_response_created("resp-1"), ev_apply_patch_function_call(call_id, patch), ev_completed("resp-1"), ]); let review_json = serde_json::json!({ "findings": [], "overall_correctness": "ok", "overall_explanation": "delegate patch handled", "overall_confidence_score": 0.5 }) .to_string(); let sse2 = sse(vec![ ev_response_created("resp-2"), ev_assistant_message("msg-1", &review_json), ev_completed("resp-2"), ]); let server = start_mock_server().await; mount_sse_sequence(&server, vec![sse1, sse2]).await; let mut builder = test_codex().with_model("gpt-5.1").with_config(|config| { config.approval_policy = Constrained::allow_any(AskForApproval::OnRequest); // Use a restricted sandbox so patch approval is required config.sandbox_policy = Constrained::allow_any(SandboxPolicy::ReadOnly); config.include_apply_patch_tool = true; }); let test = builder.build(&server).await.expect("build test codex"); test.codex .submit(Op::Review { review_request: ReviewRequest { target: ReviewTarget::Custom { instructions: "Please review".to_string(), }, user_facing_hint: None, }, }) .await .expect("submit review"); wait_for_event(&test.codex, |ev| { matches!(ev, EventMsg::EnteredReviewMode(_)) }) .await; wait_for_event(&test.codex, |ev| { matches!(ev, EventMsg::ApplyPatchApprovalRequest(_)) }) .await; // Deny via parent so delegate can continue; id "0" is the active sub_id in tests. test.codex .submit(Op::PatchApproval { id: "0".into(), decision: ReviewDecision::Denied, }) .await .expect("submit patch approval"); wait_for_event(&test.codex, |ev| { matches!(ev, EventMsg::ExitedReviewMode(_)) }) .await; wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn codex_delegate_ignores_legacy_deltas() { skip_if_no_network!(); // Single response with reasoning summary deltas. let sse_stream = sse(vec![ ev_response_created("resp-1"), ev_reasoning_item_added("reason-1", &["initial"]), ev_reasoning_summary_text_delta("think-1"), ev_completed("resp-1"), ]); let server = start_mock_server().await; mount_sse_sequence(&server, vec![sse_stream]).await; let mut builder = test_codex(); let test = builder.build(&server).await.expect("build test codex"); // Kick off review (delegated). test.codex .submit(Op::Review { review_request: ReviewRequest { target: ReviewTarget::Custom { instructions: "Please review".to_string(), }, user_facing_hint: None, }, }) .await .expect("submit review"); let mut reasoning_delta_count = 0; let mut legacy_reasoning_delta_count = 0; loop { let ev = wait_for_event(&test.codex, |_| true).await; match ev { EventMsg::ReasoningContentDelta(_) => reasoning_delta_count += 1, EventMsg::AgentReasoningDelta(_) => legacy_reasoning_delta_count += 1, EventMsg::TaskComplete(_) => break, _ => {} } } assert_eq!(reasoning_delta_count, 1, "expected one new reasoning delta"); assert_eq!( legacy_reasoning_delta_count, 1, "expected one legacy reasoning delta" ); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/stream_no_completed.rs
codex-rs/core/tests/suite/stream_no_completed.rs
//! Verifies that the agent retries when the SSE stream terminates before //! delivering a `response.completed` event. use codex_core::ModelProviderInfo; use codex_core::WireApi; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_protocol::user_input::UserInput; use core_test_support::load_sse_fixture; use core_test_support::load_sse_fixture_with_id; use core_test_support::skip_if_no_network; use core_test_support::test_codex::TestCodex; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use wiremock::Mock; use wiremock::MockServer; use wiremock::Request; use wiremock::Respond; use wiremock::ResponseTemplate; use wiremock::matchers::method; use wiremock::matchers::path; fn sse_incomplete() -> String { load_sse_fixture("tests/fixtures/incomplete_sse.json") } fn sse_completed(id: &str) -> String { load_sse_fixture_with_id("tests/fixtures/completed_template.json", id) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn retries_on_early_close() { skip_if_no_network!(); let server = MockServer::start().await; struct SeqResponder; impl Respond for SeqResponder { fn respond(&self, _: &Request) -> ResponseTemplate { use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; static CALLS: AtomicUsize = AtomicUsize::new(0); let n = CALLS.fetch_add(1, Ordering::SeqCst); if n == 0 { ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_raw(sse_incomplete(), "text/event-stream") } else { ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_raw(sse_completed("resp_ok"), "text/event-stream") } } } Mock::given(method("POST")) .and(path("/v1/responses")) .respond_with(SeqResponder {}) .expect(2) .mount(&server) .await; // Configure retry behavior explicitly to avoid mutating process-wide // environment variables. let model_provider = ModelProviderInfo { name: "openai".into(), base_url: Some(format!("{}/v1", server.uri())), // Environment variable that should exist in the test environment. // ModelClient will return an error if the environment variable for the // provider is not set. env_key: Some("PATH".into()), env_key_instructions: None, experimental_bearer_token: None, wire_api: WireApi::Responses, query_params: None, http_headers: None, env_http_headers: None, // exercise retry path: first attempt yields incomplete stream, so allow 1 retry request_max_retries: Some(0), stream_max_retries: Some(1), stream_idle_timeout_ms: Some(2000), requires_openai_auth: false, }; let TestCodex { codex, .. } = test_codex() .with_config(move |config| { config.model_provider = model_provider; }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); // Wait until TaskComplete (should succeed after retry). wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/truncation.rs
codex-rs/core/tests/suite/truncation.rs
#![cfg(not(target_os = "windows"))] #![allow(clippy::unwrap_used, clippy::expect_used)] use anyhow::Context; use anyhow::Result; use codex_core::config::types::McpServerConfig; use codex_core::config::types::McpServerTransportConfig; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::user_input::UserInput; use core_test_support::assert_regex_match; use core_test_support::responses; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_sse_once; use core_test_support::responses::mount_sse_sequence; use core_test_support::responses::sse; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use escargot::CargoBuild; use serde_json::Value; use serde_json::json; use std::collections::HashMap; use std::time::Duration; // Verifies byte-truncation formatting for function error output (RespondToModel errors) #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn truncate_function_error_trims_respond_to_model() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_model("test-gpt-5.1-codex"); let test = builder.build(&server).await?; // Construct a very long, non-existent path to force a RespondToModel error with a large message let long_path = "long path text should trigger truncation".repeat(8_000); let call_id = "grep-huge-error"; let args = json!({ "pattern": "alpha", "path": long_path, "limit": 10 }); let responses = vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "grep_files", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ]; let mock = mount_sse_sequence(&server, responses).await; test.submit_turn_with_policy( "trigger grep_files with long path to test truncation", SandboxPolicy::DangerFullAccess, ) .await?; let output = mock .function_call_output_text(call_id) .context("function error output present")?; tracing::debug!(output = %output, "truncated function error output"); // Expect plaintext with token-based truncation marker and no omitted-lines marker assert!( serde_json::from_str::<serde_json::Value>(&output).is_err(), "expected error output to be plain text", ); assert!( !output.contains("Total output lines:"), "error output should not include line-based truncation header: {output}", ); let truncated_pattern = r"(?s)^unable to access `.*tokens truncated.*$"; assert_regex_match(truncated_pattern, &output); assert!( !output.contains("omitted"), "line omission marker should not appear when no lines were dropped: {output}" ); Ok(()) } // Verifies that a standard tool call (shell_command) exceeding the model formatting // limits is truncated before being sent back to the model. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn tool_call_output_configured_limit_chars_type() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; // Use a model that exposes the shell_command tool. let mut builder = test_codex().with_model("gpt-5.1").with_config(|config| { config.tool_output_token_limit = Some(100_000); }); let fixture = builder.build(&server).await?; let call_id = "shell-too-large"; let command = if cfg!(windows) { "for ($i=1; $i -le 100000; $i++) { Write-Output $i }" } else { "seq 1 100000" }; let args = serde_json::json!({ "command": command, "timeout_ms": 5_000, }); // First response: model tells us to run the tool; second: complete the turn. mount_sse_once( &server, sse(vec![ responses::ev_response_created("resp-1"), responses::ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?), responses::ev_completed("resp-1"), ]), ) .await; let mock2 = mount_sse_once( &server, sse(vec![ responses::ev_assistant_message("msg-1", "done"), responses::ev_completed("resp-2"), ]), ) .await; fixture .submit_turn_with_policy("trigger big shell output", SandboxPolicy::DangerFullAccess) .await?; // Inspect what we sent back to the model; it should contain a truncated // function_call_output for the shell call. let output = mock2 .single_request() .function_call_output_text(call_id) .context("function_call_output present for shell call")?; let output = output.replace("\r\n", "\n"); // Expect plain text (not JSON) containing the entire shell output. assert!( serde_json::from_str::<Value>(&output).is_err(), "expected truncated shell output to be plain text" ); assert!( (400000..=401000).contains(&output.len()), "we should be almost 100k tokens" ); assert!( !output.contains("tokens truncated"), "shell output should not contain tokens truncated marker: {output}" ); Ok(()) } // Verifies that a standard tool call (shell_command) exceeding the model formatting // limits is truncated before being sent back to the model. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn tool_call_output_exceeds_limit_truncated_chars_limit() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; // Use a model that exposes the shell_command tool. let mut builder = test_codex().with_model("gpt-5.1"); let fixture = builder.build(&server).await?; let call_id = "shell-too-large"; let command = if cfg!(windows) { "for ($i=1; $i -le 100000; $i++) { Write-Output $i }" } else { "seq 1 100000" }; let args = serde_json::json!({ "command": command, "timeout_ms": 5_000, }); // First response: model tells us to run the tool; second: complete the turn. mount_sse_once( &server, sse(vec![ responses::ev_response_created("resp-1"), responses::ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?), responses::ev_completed("resp-1"), ]), ) .await; let mock2 = mount_sse_once( &server, sse(vec![ responses::ev_assistant_message("msg-1", "done"), responses::ev_completed("resp-2"), ]), ) .await; fixture .submit_turn_with_policy("trigger big shell output", SandboxPolicy::DangerFullAccess) .await?; // Inspect what we sent back to the model; it should contain a truncated // function_call_output for the shell call. let output = mock2 .single_request() .function_call_output_text(call_id) .context("function_call_output present for shell call")?; let output = output.replace("\r\n", "\n"); // Expect plain text (not JSON) containing the entire shell output. assert!( serde_json::from_str::<Value>(&output).is_err(), "expected truncated shell output to be plain text" ); let truncated_pattern = r#"(?s)^Exit code: 0\nWall time: [0-9]+(?:\.[0-9]+)? seconds\nTotal output lines: 100000\nOutput:\n.*?…\d+ chars truncated….*$"#; assert_regex_match(truncated_pattern, &output); let len = output.len(); assert!( (9_900..=10_100).contains(&len), "expected ~10k chars after truncation, got {len}" ); Ok(()) } // Verifies that a standard tool call (shell_command) exceeding the model formatting // limits is truncated before being sent back to the model. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn tool_call_output_exceeds_limit_truncated_for_model() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; // Use a model that exposes the shell_command tool. let mut builder = test_codex().with_model("gpt-5.1-codex"); let fixture = builder.build(&server).await?; let call_id = "shell-too-large"; let command = if cfg!(windows) { "for ($i=1; $i -le 100000; $i++) { Write-Output $i }" } else { "seq 1 100000" }; let args = serde_json::json!({ "command": command, "timeout_ms": 5_000, }); // First response: model tells us to run the tool; second: complete the turn. mount_sse_once( &server, sse(vec![ responses::ev_response_created("resp-1"), responses::ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?), responses::ev_completed("resp-1"), ]), ) .await; let mock2 = mount_sse_once( &server, sse(vec![ responses::ev_assistant_message("msg-1", "done"), responses::ev_completed("resp-2"), ]), ) .await; fixture .submit_turn_with_policy("trigger big shell output", SandboxPolicy::DangerFullAccess) .await?; // Inspect what we sent back to the model; it should contain a truncated // function_call_output for the shell call. let output = mock2 .single_request() .function_call_output_text(call_id) .context("function_call_output present for shell call")?; let output = output.replace("\r\n", "\n"); // Expect plain text (not JSON) containing the entire shell output. assert!( serde_json::from_str::<Value>(&output).is_err(), "expected truncated shell output to be plain text" ); let truncated_pattern = r#"(?s)^Exit code: 0 Wall time: [0-9]+(?:\.[0-9]+)? seconds Total output lines: 100000 Output: 1 2 3 4 5 6 .*…137224 tokens truncated.* 99999 100000 $"#; assert_regex_match(truncated_pattern, &output); Ok(()) } // Ensures shell_command outputs that exceed the line limit are truncated only once. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn tool_call_output_truncated_only_once() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_model("gpt-5.1-codex"); let fixture = builder.build(&server).await?; let call_id = "shell-single-truncation"; let command = if cfg!(windows) { "for ($i=1; $i -le 10000; $i++) { Write-Output $i }" } else { "seq 1 10000" }; let args = serde_json::json!({ "command": command, "timeout_ms": 5_000, }); mount_sse_once( &server, sse(vec![ responses::ev_response_created("resp-1"), responses::ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?), responses::ev_completed("resp-1"), ]), ) .await; let mock2 = mount_sse_once( &server, sse(vec![ responses::ev_assistant_message("msg-1", "done"), responses::ev_completed("resp-2"), ]), ) .await; fixture .submit_turn_with_policy("trigger big shell output", SandboxPolicy::DangerFullAccess) .await?; let output = mock2 .single_request() .function_call_output_text(call_id) .context("function_call_output present for shell call")?; let truncation_markers = output.matches("tokens truncated").count(); assert_eq!( truncation_markers, 1, "shell output should carry only one truncation marker: {output}" ); Ok(()) } // Verifies that an MCP tool call result exceeding the model formatting limits // is truncated before being sent back to the model. #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn mcp_tool_call_output_exceeds_limit_truncated_for_model() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let call_id = "rmcp-truncated"; let server_name = "rmcp"; let tool_name = format!("mcp__{server_name}__echo"); // Build a very large message to exceed 10KiB once serialized. let large_msg = "long-message-with-newlines-".repeat(6000); let args_json = serde_json::json!({ "message": large_msg }); mount_sse_once( &server, sse(vec![ responses::ev_response_created("resp-1"), responses::ev_function_call(call_id, &tool_name, &args_json.to_string()), responses::ev_completed("resp-1"), ]), ) .await; let mock2 = mount_sse_once( &server, sse(vec![ responses::ev_assistant_message("msg-1", "rmcp echo tool completed."), responses::ev_completed("resp-2"), ]), ) .await; // Compile the rmcp stdio test server and configure it. let rmcp_test_server_bin = CargoBuild::new() .package("codex-rmcp-client") .bin("test_stdio_server") .run()? .path() .to_string_lossy() .into_owned(); let mut builder = test_codex().with_config(move |config| { config.mcp_servers.insert( server_name.to_string(), codex_core::config::types::McpServerConfig { transport: codex_core::config::types::McpServerTransportConfig::Stdio { command: rmcp_test_server_bin, args: Vec::new(), env: None, env_vars: Vec::new(), cwd: None, }, enabled: true, startup_timeout_sec: Some(std::time::Duration::from_secs(10)), tool_timeout_sec: None, enabled_tools: None, disabled_tools: None, }, ); config.tool_output_token_limit = Some(500); }); let fixture = builder.build(&server).await?; fixture .submit_turn_with_policy( "call the rmcp echo tool with a very large message", SandboxPolicy::ReadOnly, ) .await?; // The MCP tool call output is converted to a function_call_output for the model. let output = mock2 .single_request() .function_call_output_text(call_id) .context("function_call_output present for rmcp call")?; assert!( !output.contains("Total output lines:"), "MCP output should not include line-based truncation header: {output}" ); let truncated_pattern = r#"(?s)^\{"echo":\s*"ECHOING: long-message-with-newlines-.*tokens truncated.*long-message-with-newlines-.*$"#; assert_regex_match(truncated_pattern, &output); assert!(output.len() < 2500, "{}", output.len()); Ok(()) } // Verifies that an MCP image tool output is serialized as content_items array with // the image preserved and no truncation summary appended (since there are no text items). #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn mcp_image_output_preserves_image_and_no_text_summary() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let call_id = "rmcp-image-no-trunc"; let server_name = "rmcp"; let tool_name = format!("mcp__{server_name}__image"); mount_sse_once( &server, sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, &tool_name, "{}"), ev_completed("resp-1"), ]), ) .await; let final_mock = mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ) .await; // Build the stdio rmcp server and pass a tiny PNG via data URL so it can construct ImageContent. let rmcp_test_server_bin = CargoBuild::new() .package("codex-rmcp-client") .bin("test_stdio_server") .run()? .path() .to_string_lossy() .into_owned(); // 1x1 PNG data URL let openai_png = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAwMB/ee9bQAAAABJRU5ErkJggg=="; let mut builder = test_codex().with_config(move |config| { config.mcp_servers.insert( server_name.to_string(), McpServerConfig { transport: McpServerTransportConfig::Stdio { command: rmcp_test_server_bin, args: Vec::new(), env: Some(HashMap::from([( "MCP_TEST_IMAGE_DATA_URL".to_string(), openai_png.to_string(), )])), env_vars: Vec::new(), cwd: None, }, enabled: true, startup_timeout_sec: Some(Duration::from_secs(10)), tool_timeout_sec: None, enabled_tools: None, disabled_tools: None, }, ); }); let fixture = builder.build(&server).await?; let session_model = fixture.session_configured.model.clone(); fixture .codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "call the rmcp image tool".into(), }], final_output_json_schema: None, cwd: fixture.cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::ReadOnly, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; // Wait for completion to ensure the outbound request is captured. wait_for_event(&fixture.codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let output_item = final_mock.single_request().function_call_output(call_id); // Expect exactly one array element: the image item; and no trailing summary text. let output = output_item.get("output").expect("output"); assert!(output.is_array(), "expected array output"); let arr = output.as_array().unwrap(); assert_eq!(arr.len(), 1, "no truncation summary should be appended"); assert_eq!( arr[0], json!({"type": "input_image", "image_url": openai_png}) ); Ok(()) } // Token-based policy should report token counts even when truncation is byte-estimated. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn token_policy_marker_reports_tokens() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex() .with_model("gpt-5.1-codex") .with_config(|config| { config.tool_output_token_limit = Some(50); // small budget to force truncation }); let fixture = builder.build(&server).await?; let call_id = "shell-token-marker"; let args = json!({ "command": "seq 1 150", "timeout_ms": 5_000, }); mount_sse_once( &server, sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), ) .await; let done_mock = mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ) .await; fixture .submit_turn_with_policy("run the shell tool", SandboxPolicy::DangerFullAccess) .await?; let output = done_mock .single_request() .function_call_output_text(call_id) .context("shell output present")?; let pattern = r"(?s)^Exit code: 0\nWall time: [0-9]+(?:\.[0-9]+)? seconds\nTotal output lines: 150\nOutput:\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19.*tokens truncated.*129\n130\n131\n132\n133\n134\n135\n136\n137\n138\n139\n140\n141\n142\n143\n144\n145\n146\n147\n148\n149\n150\n$"; assert_regex_match(pattern, &output); Ok(()) } // Byte-based policy should report bytes removed. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn byte_policy_marker_reports_bytes() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_model("gpt-5.1").with_config(|config| { config.tool_output_token_limit = Some(50); // ~200 byte cap }); let fixture = builder.build(&server).await?; let call_id = "shell-byte-marker"; let args = json!({ "command": "seq 1 150", "timeout_ms": 5_000, }); mount_sse_once( &server, sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), ) .await; let done_mock = mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ) .await; fixture .submit_turn_with_policy("run the shell tool", SandboxPolicy::DangerFullAccess) .await?; let output = done_mock .single_request() .function_call_output_text(call_id) .context("shell output present")?; let pattern = r"(?s)^Exit code: 0\nWall time: [0-9]+(?:\.[0-9]+)? seconds\nTotal output lines: 150\nOutput:\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19.*chars truncated.*129\n130\n131\n132\n133\n134\n135\n136\n137\n138\n139\n140\n141\n142\n143\n144\n145\n146\n147\n148\n149\n150\n$"; assert_regex_match(pattern, &output); Ok(()) } // shell_command output should remain intact when the config opts into a large token budget. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn shell_command_output_not_truncated_with_custom_limit() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex() .with_model("gpt-5.1-codex") .with_config(|config| { config.tool_output_token_limit = Some(50_000); // ample budget }); let fixture = builder.build(&server).await?; let call_id = "shell-no-trunc"; let args = json!({ "command": "seq 1 1000", "timeout_ms": 5_000, }); let expected_body: String = (1..=1000).map(|i| format!("{i}\n")).collect(); mount_sse_once( &server, sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), ) .await; let done_mock = mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ) .await; fixture .submit_turn_with_policy( "run big output without truncation", SandboxPolicy::DangerFullAccess, ) .await?; let output = done_mock .single_request() .function_call_output_text(call_id) .context("shell output present")?; assert!( output.ends_with(&expected_body), "expected entire shell output when budget increased: {output}" ); assert!( !output.contains("truncated"), "output should remain untruncated with ample budget" ); Ok(()) } // MCP server output should also remain intact when the config increases the token limit. #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn mcp_tool_call_output_not_truncated_with_custom_limit() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let call_id = "rmcp-untruncated"; let server_name = "rmcp"; let tool_name = format!("mcp__{server_name}__echo"); let large_msg = "a".repeat(80_000); let args_json = serde_json::json!({ "message": large_msg }); mount_sse_once( &server, sse(vec![ responses::ev_response_created("resp-1"), responses::ev_function_call(call_id, &tool_name, &args_json.to_string()), responses::ev_completed("resp-1"), ]), ) .await; let mock2 = mount_sse_once( &server, sse(vec![ responses::ev_assistant_message("msg-1", "rmcp echo tool completed."), responses::ev_completed("resp-2"), ]), ) .await; let rmcp_test_server_bin = CargoBuild::new() .package("codex-rmcp-client") .bin("test_stdio_server") .run()? .path() .to_string_lossy() .into_owned(); let mut builder = test_codex().with_config(move |config| { config.tool_output_token_limit = Some(50_000); config.mcp_servers.insert( server_name.to_string(), codex_core::config::types::McpServerConfig { transport: codex_core::config::types::McpServerTransportConfig::Stdio { command: rmcp_test_server_bin, args: Vec::new(), env: None, env_vars: Vec::new(), cwd: None, }, enabled: true, startup_timeout_sec: Some(std::time::Duration::from_secs(10)), tool_timeout_sec: None, enabled_tools: None, disabled_tools: None, }, ); }); let fixture = builder.build(&server).await?; fixture .submit_turn_with_policy( "call the rmcp echo tool with a very large message", SandboxPolicy::ReadOnly, ) .await?; let output = mock2 .single_request() .function_call_output_text(call_id) .context("function_call_output present for rmcp call")?; let parsed: Value = serde_json::from_str(&output)?; assert_eq!( output.len(), 80031, "parsed MCP output should retain its serialized length" ); let expected_echo = format!("ECHOING: {large_msg}"); let echo_str = parsed["echo"] .as_str() .context("echo field should be a string in rmcp echo output")?; assert_eq!( echo_str.len(), expected_echo.len(), "echo length should match" ); assert_eq!(echo_str, expected_echo); assert!( !output.contains("truncated"), "output should not include truncation markers when limit is raised: {output}" ); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/shell_command.rs
codex-rs/core/tests/suite/shell_command.rs
use anyhow::Result; use codex_core::features::Feature; use core_test_support::assert_regex_match; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_sse_sequence; use core_test_support::responses::sse; use core_test_support::skip_if_no_network; use core_test_support::skip_if_windows; use core_test_support::test_codex::TestCodexBuilder; use core_test_support::test_codex::TestCodexHarness; use core_test_support::test_codex::test_codex; use serde_json::json; use test_case::test_case; fn shell_responses_with_timeout( call_id: &str, command: &str, login: Option<bool>, timeout_ms: i64, ) -> Vec<String> { let args = json!({ "command": command, "timeout_ms": timeout_ms, "login": login, }); #[allow(clippy::expect_used)] let arguments = serde_json::to_string(&args).expect("serialize shell command arguments"); vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "shell_command", &arguments), ev_completed("resp-1"), ]), sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ] } fn shell_responses(call_id: &str, command: &str, login: Option<bool>) -> Vec<String> { shell_responses_with_timeout(call_id, command, login, 2_000) } async fn shell_command_harness_with( configure: impl FnOnce(TestCodexBuilder) -> TestCodexBuilder, ) -> Result<TestCodexHarness> { let builder = configure(test_codex()).with_config(|config| { config.include_apply_patch_tool = true; }); TestCodexHarness::with_builder(builder).await } async fn mount_shell_responses( harness: &TestCodexHarness, call_id: &str, command: &str, login: Option<bool>, ) { mount_sse_sequence(harness.server(), shell_responses(call_id, command, login)).await; } async fn mount_shell_responses_with_timeout( harness: &TestCodexHarness, call_id: &str, command: &str, login: Option<bool>, timeout_ms: i64, ) { mount_sse_sequence( harness.server(), shell_responses_with_timeout(call_id, command, login, timeout_ms), ) .await; } fn assert_shell_command_output(output: &str, expected: &str) -> Result<()> { let normalized_output = output .replace("\r\n", "\n") .replace('\r', "\n") .trim_end_matches('\n') .to_string(); let expected_pattern = format!( r"(?s)^Exit code: 0\nWall time: [0-9]+(?:\.[0-9]+)? seconds\nOutput:\n{expected}\n?$" ); assert_regex_match(&expected_pattern, &normalized_output); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn shell_command_works() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let harness = shell_command_harness_with(|builder| builder.with_model("gpt-5.1")).await?; let call_id = "shell-command-call"; mount_shell_responses(&harness, call_id, "echo 'hello, world'", None).await; harness.submit("run the echo command").await?; let output = harness.function_call_stdout(call_id).await; assert_shell_command_output(&output, "hello, world")?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn output_with_login() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let harness = shell_command_harness_with(|builder| builder.with_model("gpt-5.1")).await?; let call_id = "shell-command-call-login-true"; mount_shell_responses(&harness, call_id, "echo 'hello, world'", Some(true)).await; harness.submit("run the echo command with login").await?; let output = harness.function_call_stdout(call_id).await; assert_shell_command_output(&output, "hello, world")?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn output_without_login() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let harness = shell_command_harness_with(|builder| builder.with_model("gpt-5.1")).await?; let call_id = "shell-command-call-login-false"; mount_shell_responses(&harness, call_id, "echo 'hello, world'", Some(false)).await; harness.submit("run the echo command without login").await?; let output = harness.function_call_stdout(call_id).await; assert_shell_command_output(&output, "hello, world")?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn multi_line_output_with_login() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let harness = shell_command_harness_with(|builder| builder.with_model("gpt-5.1")).await?; let call_id = "shell-command-call-first-extra-login"; mount_shell_responses( &harness, call_id, "echo 'first line\nsecond line'", Some(true), ) .await; harness.submit("run the command with login").await?; let output = harness.function_call_stdout(call_id).await; assert_shell_command_output(&output, "first line\nsecond line")?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn pipe_output_with_login() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); skip_if_windows!(Ok(())); let harness = shell_command_harness_with(|builder| builder.with_model("gpt-5.1")).await?; let call_id = "shell-command-call-second-extra-no-login"; mount_shell_responses(&harness, call_id, "echo 'hello, world' | cat", None).await; harness.submit("run the command without login").await?; let output = harness.function_call_stdout(call_id).await; assert_shell_command_output(&output, "hello, world")?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn pipe_output_without_login() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); skip_if_windows!(Ok(())); let harness = shell_command_harness_with(|builder| builder.with_model("gpt-5.1")).await?; let call_id = "shell-command-call-third-extra-login-false"; mount_shell_responses(&harness, call_id, "echo 'hello, world' | cat", Some(false)).await; harness.submit("run the command without login").await?; let output = harness.function_call_stdout(call_id).await; assert_shell_command_output(&output, "hello, world")?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn shell_command_times_out_with_timeout_ms() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let harness = shell_command_harness_with(|builder| builder.with_model("gpt-5.1")).await?; let call_id = "shell-command-timeout"; let command = if cfg!(windows) { "timeout /t 5" } else { "sleep 5" }; mount_shell_responses_with_timeout(&harness, call_id, command, None, 200).await; harness .submit("run a long command with a short timeout") .await?; let output = harness.function_call_stdout(call_id).await; let normalized_output = output .replace("\r\n", "\n") .replace('\r', "\n") .trim_end_matches('\n') .to_string(); let expected_pattern = r"(?s)^Exit code: 124\nWall time: [0-9]+(?:\.[0-9]+)? seconds\nOutput:\ncommand timed out after [0-9]+ milliseconds\n?$"; assert_regex_match(expected_pattern, &normalized_output); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(true ; "with_login")] #[test_case(false ; "without_login")] async fn unicode_output(login: bool) -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let harness = shell_command_harness_with(|builder| { builder.with_model("gpt-5.2").with_config(|config| { config.features.enable(Feature::PowershellUtf8); }) }) .await?; let call_id = "unicode_output"; mount_shell_responses( &harness, call_id, "git -c alias.say='!printf \"%s\" \"naïve_café\"' say", Some(login), ) .await; harness.submit("run the command without login").await?; let output = harness.function_call_stdout(call_id).await; assert_shell_command_output(&output, "naïve_café")?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(true ; "with_login")] #[test_case(false ; "without_login")] async fn unicode_output_with_newlines(login: bool) -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let harness = shell_command_harness_with(|builder| { builder.with_model("gpt-5.2").with_config(|config| { config.features.enable(Feature::PowershellUtf8); }) }) .await?; let call_id = "unicode_output"; mount_shell_responses( &harness, call_id, "echo 'line1\nnaïve café\nline3'", Some(login), ) .await; harness.submit("run the command without login").await?; let output = harness.function_call_stdout(call_id).await; assert_shell_command_output(&output, "line1\\nnaïve café\\nline3")?; Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/models_etag_responses.rs
codex-rs/core/tests/suite/models_etag_responses.rs
#![cfg(not(target_os = "windows"))] use std::sync::Arc; use anyhow::Result; use codex_core::CodexAuth; use codex_core::features::Feature; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::openai_models::ModelsResponse; use codex_protocol::user_input::UserInput; use core_test_support::responses; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_local_shell_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::sse; use core_test_support::responses::sse_response; use core_test_support::skip_if_no_network; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use pretty_assertions::assert_eq; use wiremock::MockServer; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn refresh_models_on_models_etag_mismatch_and_avoid_duplicate_models_fetch() -> Result<()> { skip_if_no_network!(Ok(())); const ETAG_1: &str = "\"models-etag-1\""; const ETAG_2: &str = "\"models-etag-2\""; const CALL_ID: &str = "local-shell-call-1"; let server = MockServer::start().await; // 1) On spawn, Codex fetches /models and stores the ETag. let spawn_models_mock = responses::mount_models_once_with_etag( &server, ModelsResponse { models: Vec::new() }, ETAG_1, ) .await; let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing(); let mut builder = test_codex() .with_auth(auth) .with_model("gpt-5") .with_config(|config| { config.features.enable(Feature::RemoteModels); // Keep this test deterministic: no request retries, and a small stream retry budget. config.model_provider.request_max_retries = Some(0); config.model_provider.stream_max_retries = Some(1); }); let test = builder.build(&server).await?; let codex = Arc::clone(&test.codex); let cwd = Arc::clone(&test.cwd); let session_model = test.session_configured.model.clone(); assert_eq!(spawn_models_mock.requests().len(), 1); assert_eq!(spawn_models_mock.single_request_path(), "/v1/models"); // 2) If the server sends a different X-Models-Etag on /responses, Codex refreshes /models. let refresh_models_mock = responses::mount_models_once_with_etag( &server, ModelsResponse { models: Vec::new() }, ETAG_2, ) .await; // First /responses request (user message) succeeds and returns a tool call. // It also includes a mismatched X-Models-Etag, which should trigger a /models refresh. let first_response_body = sse(vec![ ev_response_created("resp-1"), ev_local_shell_call(CALL_ID, "completed", vec!["/bin/echo", "etag ok"]), ev_completed("resp-1"), ]); responses::mount_response_once( &server, sse_response(first_response_body).insert_header("X-Models-Etag", ETAG_2), ) .await; // Second /responses request (tool output) includes the same X-Models-Etag; Codex should not // refetch /models again after it has already refreshed the catalog. let completion_response_body = sse(vec![ ev_response_created("resp-2"), ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]); let tool_output_mock = responses::mount_response_once( &server, sse_response(completion_response_body).insert_header("X-Models-Etag", ETAG_2), ) .await; codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "please run a tool".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let _ = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; // Assert /models was refreshed exactly once after the X-Models-Etag mismatch. assert_eq!(refresh_models_mock.requests().len(), 1); assert_eq!(refresh_models_mock.single_request_path(), "/v1/models"); let refresh_req = refresh_models_mock .requests() .into_iter() .next() .expect("one request"); // Ensure Codex includes client_version on refresh. (This is a stable signal that we're using the /models client.) assert!( refresh_req .url .query_pairs() .any(|(k, _)| k == "client_version"), "expected /models refresh to include client_version query param" ); // Assert the tool output /responses request succeeded and did not trigger another /models fetch. let tool_req = tool_output_mock.single_request(); let _ = tool_req.function_call_output(CALL_ID); assert_eq!(refresh_models_mock.requests().len(), 1); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/apply_patch_cli.rs
codex-rs/core/tests/suite/apply_patch_cli.rs
#![allow(clippy::expect_used)] use anyhow::Result; use core_test_support::responses::ev_apply_patch_call; use core_test_support::responses::ev_apply_patch_custom_tool_call; use core_test_support::responses::ev_shell_command_call; use core_test_support::test_codex::ApplyPatchModelOutput; use pretty_assertions::assert_eq; use std::fs; use std::sync::atomic::AtomicI32; use std::sync::atomic::Ordering; use codex_core::features::Feature; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::user_input::UserInput; use core_test_support::assert_regex_match; use core_test_support::responses::ev_apply_patch_function_call; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_sse_sequence; use core_test_support::responses::sse; use core_test_support::skip_if_no_network; use core_test_support::test_codex::TestCodexBuilder; use core_test_support::test_codex::TestCodexHarness; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use serde_json::json; use test_case::test_case; use wiremock::Mock; use wiremock::Respond; use wiremock::ResponseTemplate; use wiremock::matchers::method; use wiremock::matchers::path_regex; pub async fn apply_patch_harness() -> Result<TestCodexHarness> { apply_patch_harness_with(|builder| builder).await } async fn apply_patch_harness_with( configure: impl FnOnce(TestCodexBuilder) -> TestCodexBuilder, ) -> Result<TestCodexHarness> { let builder = configure(test_codex()).with_config(|config| { config.include_apply_patch_tool = true; }); TestCodexHarness::with_builder(builder).await } pub async fn mount_apply_patch( harness: &TestCodexHarness, call_id: &str, patch: &str, assistant_msg: &str, output_type: ApplyPatchModelOutput, ) { mount_sse_sequence( harness.server(), apply_patch_responses(call_id, patch, assistant_msg, output_type), ) .await; } fn apply_patch_responses( call_id: &str, patch: &str, assistant_msg: &str, output_type: ApplyPatchModelOutput, ) -> Vec<String> { vec![ sse(vec![ ev_response_created("resp-1"), ev_apply_patch_call(call_id, patch, output_type), ev_completed("resp-1"), ]), sse(vec![ ev_assistant_message("msg-1", assistant_msg), ev_completed("resp-2"), ]), ] } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] async fn apply_patch_cli_multiple_operations_integration( output_type: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness_with(|builder| builder.with_model("gpt-5.1")).await?; // Seed workspace state let modify_path = harness.path("modify.txt"); let delete_path = harness.path("delete.txt"); fs::write(&modify_path, "line1\nline2\n")?; fs::write(&delete_path, "obsolete\n")?; let patch = "*** Begin Patch\n*** Add File: nested/new.txt\n+created\n*** Delete File: delete.txt\n*** Update File: modify.txt\n@@\n-line2\n+changed\n*** End Patch"; let call_id = "apply-multi-ops"; mount_apply_patch(&harness, call_id, patch, "done", output_type).await; harness.submit("please apply multi-ops patch").await?; let out = harness.apply_patch_output(call_id, output_type).await; let expected = r"(?s)^Exit code: 0 Wall time: [0-9]+(?:\.[0-9]+)? seconds Output: Success. Updated the following files: A nested/new.txt M modify.txt D delete.txt ?$"; assert_regex_match(expected, &out); assert_eq!( fs::read_to_string(harness.path("nested/new.txt"))?, "created\n" ); assert_eq!(fs::read_to_string(&modify_path)?, "line1\nchanged\n"); assert!(!delete_path.exists()); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] #[test_case(ApplyPatchModelOutput::ShellCommandViaHeredoc)] async fn apply_patch_cli_multiple_chunks(model_output: ApplyPatchModelOutput) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let target = harness.path("multi.txt"); fs::write(&target, "line1\nline2\nline3\nline4\n")?; let patch = "*** Begin Patch\n*** Update File: multi.txt\n@@\n-line2\n+changed2\n@@\n-line4\n+changed4\n*** End Patch"; let call_id = "apply-multi-chunks"; mount_apply_patch(&harness, call_id, patch, "ok", model_output).await; harness.submit("apply multi-chunk patch").await?; assert_eq!( fs::read_to_string(&target)?, "line1\nchanged2\nline3\nchanged4\n" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] #[test_case(ApplyPatchModelOutput::ShellCommandViaHeredoc)] async fn apply_patch_cli_moves_file_to_new_directory( model_output: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let original = harness.path("old/name.txt"); let new_path = harness.path("renamed/dir/name.txt"); fs::create_dir_all(original.parent().expect("parent"))?; fs::write(&original, "old content\n")?; let patch = "*** Begin Patch\n*** Update File: old/name.txt\n*** Move to: renamed/dir/name.txt\n@@\n-old content\n+new content\n*** End Patch"; let call_id = "apply-move"; mount_apply_patch(&harness, call_id, patch, "ok", model_output).await; harness.submit("apply move patch").await?; assert!(!original.exists()); assert_eq!(fs::read_to_string(&new_path)?, "new content\n"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] #[test_case(ApplyPatchModelOutput::ShellCommandViaHeredoc)] async fn apply_patch_cli_updates_file_appends_trailing_newline( model_output: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let target = harness.path("no_newline.txt"); fs::write(&target, "no newline at end")?; let patch = "*** Begin Patch\n*** Update File: no_newline.txt\n@@\n-no newline at end\n+first line\n+second line\n*** End Patch"; let call_id = "apply-append-nl"; mount_apply_patch(&harness, call_id, patch, "ok", model_output).await; harness.submit("apply newline patch").await?; let contents = fs::read_to_string(&target)?; assert!(contents.ends_with('\n')); assert_eq!(contents, "first line\nsecond line\n"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] #[test_case(ApplyPatchModelOutput::ShellCommandViaHeredoc)] async fn apply_patch_cli_insert_only_hunk_modifies_file( model_output: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let target = harness.path("insert_only.txt"); fs::write(&target, "alpha\nomega\n")?; let patch = "*** Begin Patch\n*** Update File: insert_only.txt\n@@\n alpha\n+beta\n omega\n*** End Patch"; let call_id = "apply-insert-only"; mount_apply_patch(&harness, call_id, patch, "ok", model_output).await; harness.submit("insert lines via apply_patch").await?; assert_eq!(fs::read_to_string(&target)?, "alpha\nbeta\nomega\n"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] #[test_case(ApplyPatchModelOutput::ShellCommandViaHeredoc)] async fn apply_patch_cli_move_overwrites_existing_destination( model_output: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let original = harness.path("old/name.txt"); let destination = harness.path("renamed/dir/name.txt"); fs::create_dir_all(original.parent().expect("parent"))?; fs::create_dir_all(destination.parent().expect("parent"))?; fs::write(&original, "from\n")?; fs::write(&destination, "existing\n")?; let patch = "*** Begin Patch\n*** Update File: old/name.txt\n*** Move to: renamed/dir/name.txt\n@@\n-from\n+new\n*** End Patch"; let call_id = "apply-move-overwrite"; mount_apply_patch(&harness, call_id, patch, "ok", model_output).await; harness.submit("apply move overwrite patch").await?; assert!(!original.exists()); assert_eq!(fs::read_to_string(&destination)?, "new\n"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] #[test_case(ApplyPatchModelOutput::ShellCommandViaHeredoc)] async fn apply_patch_cli_move_without_content_change_has_no_turn_diff( model_output: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let test = harness.test(); let codex = test.codex.clone(); let cwd = test.cwd.clone(); let original = harness.path("old/name.txt"); let destination = harness.path("renamed/name.txt"); fs::create_dir_all(original.parent().expect("parent should exist"))?; fs::write(&original, "same\n")?; let patch = "*** Begin Patch\n*** Update File: old/name.txt\n*** Move to: renamed/name.txt\n@@\n same\n*** End Patch"; let call_id = "apply-move-no-change"; mount_apply_patch(&harness, call_id, patch, "ok", model_output).await; let model = test.session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "rename without content change".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let mut saw_turn_diff = false; wait_for_event(&codex, |event| match event { EventMsg::TurnDiff(_) => { saw_turn_diff = true; false } EventMsg::TaskComplete(_) => true, _ => false, }) .await; assert!(!saw_turn_diff, "pure rename should not emit a turn diff"); assert!(!original.exists()); assert_eq!(fs::read_to_string(&destination)?, "same\n"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] #[test_case(ApplyPatchModelOutput::ShellCommandViaHeredoc)] async fn apply_patch_cli_add_overwrites_existing_file( model_output: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let path = harness.path("duplicate.txt"); fs::write(&path, "old content\n")?; let patch = "*** Begin Patch\n*** Add File: duplicate.txt\n+new content\n*** End Patch"; let call_id = "apply-add-overwrite"; mount_apply_patch(&harness, call_id, patch, "ok", model_output).await; harness.submit("apply add overwrite patch").await?; assert_eq!(fs::read_to_string(&path)?, "new content\n"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] #[test_case(ApplyPatchModelOutput::ShellCommandViaHeredoc)] async fn apply_patch_cli_rejects_invalid_hunk_header( model_output: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let patch = "*** Begin Patch\n*** Frobnicate File: foo\n*** End Patch"; let call_id = "apply-invalid-header"; mount_apply_patch(&harness, call_id, patch, "ok", model_output).await; harness.submit("apply invalid header patch").await?; let out = harness.apply_patch_output(call_id, model_output).await; assert!( out.contains("apply_patch verification failed"), "expected verification failure message" ); assert!( out.contains("is not a valid hunk header"), "expected parse diagnostics in output: {out:?}" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] #[test_case(ApplyPatchModelOutput::ShellCommandViaHeredoc)] async fn apply_patch_cli_reports_missing_context( model_output: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let target = harness.path("modify.txt"); fs::write(&target, "line1\nline2\n")?; let patch = "*** Begin Patch\n*** Update File: modify.txt\n@@\n-missing\n+changed\n*** End Patch"; let call_id = "apply-missing-context"; mount_apply_patch(&harness, call_id, patch, "ok", model_output).await; harness.submit("apply missing context patch").await?; let out = harness.apply_patch_output(call_id, model_output).await; assert!( out.contains("apply_patch verification failed"), "expected verification failure message" ); assert!(out.contains("Failed to find expected lines in")); assert_eq!(fs::read_to_string(&target)?, "line1\nline2\n"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] #[test_case(ApplyPatchModelOutput::ShellCommandViaHeredoc)] async fn apply_patch_cli_reports_missing_target_file( model_output: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let patch = "*** Begin Patch\n*** Update File: missing.txt\n@@\n-nope\n+better\n*** End Patch"; let call_id = "apply-missing-file"; mount_apply_patch(&harness, call_id, patch, "fail", model_output).await; harness.submit("attempt to update a missing file").await?; let out = harness.apply_patch_output(call_id, model_output).await; assert!( out.contains("apply_patch verification failed"), "expected verification failure message" ); assert!( out.contains("Failed to read file to update"), "expected missing file diagnostics: {out}" ); assert!( out.contains("missing.txt"), "expected missing file path in diagnostics: {out}" ); assert!(!harness.path("missing.txt").exists()); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] #[test_case(ApplyPatchModelOutput::ShellCommandViaHeredoc)] async fn apply_patch_cli_delete_missing_file_reports_error( model_output: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let patch = "*** Begin Patch\n*** Delete File: missing.txt\n*** End Patch"; let call_id = "apply-delete-missing"; mount_apply_patch(&harness, call_id, patch, "fail", model_output).await; harness.submit("attempt to delete missing file").await?; let out = harness.apply_patch_output(call_id, model_output).await; assert!( out.contains("apply_patch verification failed"), "expected verification failure message: {out}" ); assert!( out.contains("Failed to read"), "missing delete diagnostics should mention read failure: {out}" ); assert!( out.contains("missing.txt"), "missing delete diagnostics should surface target path: {out}" ); assert!(!harness.path("missing.txt").exists()); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] #[test_case(ApplyPatchModelOutput::ShellCommandViaHeredoc)] async fn apply_patch_cli_rejects_empty_patch(model_output: ApplyPatchModelOutput) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let patch = "*** Begin Patch\n*** End Patch"; let call_id = "apply-empty"; mount_apply_patch(&harness, call_id, patch, "ok", model_output).await; harness.submit("apply empty patch").await?; let out = harness.apply_patch_output(call_id, model_output).await; assert!( out.contains("patch rejected: empty patch"), "expected rejection for empty patch: {out}" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] #[test_case(ApplyPatchModelOutput::ShellCommandViaHeredoc)] async fn apply_patch_cli_delete_directory_reports_verification_error( model_output: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; fs::create_dir(harness.path("dir"))?; let patch = "*** Begin Patch\n*** Delete File: dir\n*** End Patch"; let call_id = "apply-delete-dir"; mount_apply_patch(&harness, call_id, patch, "ok", model_output).await; harness.submit("delete a directory via apply_patch").await?; let out = harness.apply_patch_output(call_id, model_output).await; assert!(out.contains("apply_patch verification failed")); assert!(out.contains("Failed to read")); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] #[test_case(ApplyPatchModelOutput::ShellCommandViaHeredoc)] async fn apply_patch_cli_rejects_path_traversal_outside_workspace( model_output: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let escape_path = harness .test() .cwd .path() .parent() .expect("cwd should have parent") .join("escape.txt"); let _ = fs::remove_file(&escape_path); let patch = "*** Begin Patch\n*** Add File: ../escape.txt\n+outside\n*** End Patch"; let call_id = "apply-path-traversal"; mount_apply_patch(&harness, call_id, patch, "fail", model_output).await; let sandbox_policy = SandboxPolicy::WorkspaceWrite { writable_roots: vec![], network_access: false, exclude_tmpdir_env_var: true, exclude_slash_tmp: true, }; harness .submit_with_policy( "attempt to escape workspace via apply_patch", sandbox_policy, ) .await?; let out = harness.apply_patch_output(call_id, model_output).await; assert!( out.contains( "patch rejected: writing outside of the project; rejected by user approval settings" ), "expected rejection message for path traversal: {out}" ); assert!( !escape_path.exists(), "path traversal should be rejected; tool output: {out}" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] #[test_case(ApplyPatchModelOutput::ShellCommandViaHeredoc)] async fn apply_patch_cli_rejects_move_path_traversal_outside_workspace( model_output: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let escape_path = harness .test() .cwd .path() .parent() .expect("cwd should have parent") .join("escape-move.txt"); let _ = fs::remove_file(&escape_path); let source = harness.path("stay.txt"); fs::write(&source, "from\n")?; let patch = "*** Begin Patch\n*** Update File: stay.txt\n*** Move to: ../escape-move.txt\n@@\n-from\n+to\n*** End Patch"; let call_id = "apply-move-traversal"; mount_apply_patch(&harness, call_id, patch, "fail", model_output).await; let sandbox_policy = SandboxPolicy::WorkspaceWrite { writable_roots: vec![], network_access: false, exclude_tmpdir_env_var: true, exclude_slash_tmp: true, }; harness .submit_with_policy("attempt move traversal via apply_patch", sandbox_policy) .await?; let out = harness.apply_patch_output(call_id, model_output).await; assert!( out.contains( "patch rejected: writing outside of the project; rejected by user approval settings" ), "expected rejection message for path traversal: {out}" ); assert!( !escape_path.exists(), "move path traversal should be rejected; tool output: {out}" ); assert_eq!(fs::read_to_string(&source)?, "from\n"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] #[test_case(ApplyPatchModelOutput::ShellCommandViaHeredoc)] async fn apply_patch_cli_verification_failure_has_no_side_effects( model_output: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness_with(|builder| { builder.with_config(|config| { config.features.enable(Feature::ApplyPatchFreeform); }) }) .await?; // Compose a patch that would create a file, then fail verification on an update. let call_id = "apply-partial-no-side-effects"; let patch = "*** Begin Patch\n*** Add File: created.txt\n+hello\n*** Update File: missing.txt\n@@\n-old\n+new\n*** End Patch"; mount_apply_patch(&harness, call_id, patch, "failed", model_output).await; harness.submit("attempt partial apply patch").await?; let created = harness.path("created.txt"); assert!( !created.exists(), "verification failure should prevent any filesystem changes" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn apply_patch_shell_command_heredoc_with_cd_updates_relative_workdir() -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness_with(|builder| builder.with_model("gpt-5.1")).await?; // Prepare a file inside a subdir; update it via cd && apply_patch heredoc form. let sub = harness.path("sub"); fs::create_dir_all(&sub)?; let target = sub.join("in_sub.txt"); fs::write(&target, "before\n")?; let script = "cd sub && apply_patch <<'EOF'\n*** Begin Patch\n*** Update File: in_sub.txt\n@@\n-before\n+after\n*** End Patch\nEOF\n"; let call_id = "shell-heredoc-cd"; let bodies = vec![ sse(vec![ ev_response_created("resp-1"), ev_shell_command_call(call_id, script), ev_completed("resp-1"), ]), sse(vec![ ev_assistant_message("msg-1", "ok"), ev_completed("resp-2"), ]), ]; mount_sse_sequence(harness.server(), bodies).await; harness.submit("apply via shell heredoc with cd").await?; let out = harness.function_call_stdout(call_id).await; assert!( out.contains("Success."), "expected successful apply_patch invocation via shell_command: {out}" ); assert_eq!(fs::read_to_string(&target)?, "after\n"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn apply_patch_cli_can_use_shell_command_output_as_patch_input() -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness_with(|builder| builder.with_model("gpt-5.1")).await?; let source_contents = "line1\nnaïve café\nline3\n"; let source_path = harness.path("source.txt"); fs::write(&source_path, source_contents)?; let read_call_id = "read-source"; let apply_call_id = "apply-from-read"; fn stdout_from_shell_output(output: &str) -> String { let normalized = output.replace("\r\n", "\n").replace('\r', "\n"); normalized .split_once("Output:\n") .map(|x| x.1) .unwrap_or("") .trim_end_matches('\n') .to_string() } fn function_call_output_text(body: &serde_json::Value, call_id: &str) -> String { body.get("input") .and_then(serde_json::Value::as_array) .and_then(|items| { items.iter().find(|item| { item.get("type").and_then(serde_json::Value::as_str) == Some("function_call_output") && item.get("call_id").and_then(serde_json::Value::as_str) == Some(call_id) }) }) .and_then(|item| item.get("output").and_then(serde_json::Value::as_str)) .expect("function_call_output output string") .to_string() } struct DynamicApplyFromRead { num_calls: AtomicI32, read_call_id: String, apply_call_id: String, } impl Respond for DynamicApplyFromRead { fn respond(&self, request: &wiremock::Request) -> ResponseTemplate { let call_num = self.num_calls.fetch_add(1, Ordering::SeqCst); match call_num { 0 => { let command = if cfg!(windows) { "Get-Content -Encoding utf8 source.txt" } else { "cat source.txt" }; let body = sse(vec![ ev_response_created("resp-1"), ev_shell_command_call(&self.read_call_id, command), ev_completed("resp-1"), ]); ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_string(body) } 1 => { let body_json: serde_json::Value = request.body_json().expect("request body should be json"); let read_output = function_call_output_text(&body_json, &self.read_call_id); eprintln!("read_output: \n{read_output}"); let stdout = stdout_from_shell_output(&read_output); eprintln!("stdout: \n{stdout}"); let patch_lines = stdout .lines() .map(|line| format!("+{line}")) .collect::<Vec<_>>() .join("\n"); let patch = format!( "*** Begin Patch\n*** Add File: target.txt\n{patch_lines}\n*** End Patch" ); eprintln!("patch: \n{patch}"); let body = sse(vec![ ev_response_created("resp-2"), ev_apply_patch_custom_tool_call(&self.apply_call_id, &patch), ev_completed("resp-2"), ]); ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_string(body) } 2 => { let body = sse(vec![ ev_assistant_message("msg-1", "ok"), ev_completed("resp-3"), ]); ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_string(body) } _ => panic!("no response for call {call_num}"), } } } let responder = DynamicApplyFromRead { num_calls: AtomicI32::new(0), read_call_id: read_call_id.to_string(), apply_call_id: apply_call_id.to_string(), }; Mock::given(method("POST")) .and(path_regex(".*/responses$")) .respond_with(responder) .expect(3) .mount(harness.server()) .await; harness .submit("read source.txt, then apply it to target.txt") .await?; let target_contents = fs::read_to_string(harness.path("target.txt"))?; assert_eq!(target_contents, source_contents); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn apply_patch_shell_command_heredoc_with_cd_emits_turn_diff() -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness_with(|builder| builder.with_model("gpt-5.1")).await?; let test = harness.test(); let codex = test.codex.clone(); let cwd = test.cwd.clone(); // Prepare a file inside a subdir; update it via cd && apply_patch heredoc form. let sub = test.workspace_path("sub"); fs::create_dir_all(&sub)?; let target = sub.join("in_sub.txt"); fs::write(&target, "before\n")?; let script = "cd sub && apply_patch <<'EOF'\n*** Begin Patch\n*** Update File: in_sub.txt\n@@\n-before\n+after\n*** End Patch\nEOF\n"; let call_id = "shell-heredoc-cd"; let args = json!({ "command": script, "timeout_ms": 5_000 }); let bodies = vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_assistant_message("msg-1", "ok"), ev_completed("resp-2"), ]), ]; mount_sse_sequence(harness.server(), bodies).await; let model = test.session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "apply via shell heredoc with cd".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let mut saw_turn_diff = None; let mut saw_patch_begin = false; let mut patch_end_success = None; wait_for_event(&codex, |event| match event { EventMsg::PatchApplyBegin(begin) => { saw_patch_begin = true; assert_eq!(begin.call_id, call_id); false } EventMsg::PatchApplyEnd(end) => {
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/unified_exec.rs
codex-rs/core/tests/suite/unified_exec.rs
use std::collections::HashMap; use std::ffi::OsStr; use std::fs; use std::sync::OnceLock; use anyhow::Context; use anyhow::Result; use codex_core::features::Feature; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::ExecCommandSource; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::user_input::UserInput; use core_test_support::assert_regex_match; use core_test_support::process::wait_for_pid_file; use core_test_support::process::wait_for_process_exit; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::get_responses_request_bodies; use core_test_support::responses::mount_sse_sequence; use core_test_support::responses::sse; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::skip_if_sandbox; use core_test_support::skip_if_windows; use core_test_support::test_codex::TestCodex; use core_test_support::test_codex::TestCodexHarness; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use core_test_support::wait_for_event_match; use core_test_support::wait_for_event_with_timeout; use pretty_assertions::assert_eq; use regex_lite::Regex; use serde_json::Value; use serde_json::json; use tokio::time::Duration; fn extract_output_text(item: &Value) -> Option<&str> { item.get("output").and_then(|value| match value { Value::String(text) => Some(text.as_str()), Value::Object(obj) => obj.get("content").and_then(Value::as_str), _ => None, }) } #[derive(Debug)] struct ParsedUnifiedExecOutput { chunk_id: Option<String>, wall_time_seconds: f64, process_id: Option<String>, exit_code: Option<i32>, original_token_count: Option<usize>, output: String, } #[allow(clippy::expect_used)] fn parse_unified_exec_output(raw: &str) -> Result<ParsedUnifiedExecOutput> { static OUTPUT_REGEX: OnceLock<Regex> = OnceLock::new(); let regex = OUTPUT_REGEX.get_or_init(|| { Regex::new(concat!( r#"(?s)^(?:Total output lines: \d+\n\n)?"#, r#"(?:Chunk ID: (?P<chunk_id>[^\n]+)\n)?"#, r#"Wall time: (?P<wall_time>-?\d+(?:\.\d+)?) seconds\n"#, r#"(?:Process exited with code (?P<exit_code>-?\d+)\n)?"#, r#"(?:Process running with session ID (?P<process_id>-?\d+)\n)?"#, r#"(?:Original token count: (?P<original_token_count>\d+)\n)?"#, r#"Output:\n?(?P<output>.*)$"#, )) .expect("valid unified exec output regex") }); let cleaned = raw.trim_matches('\r'); let captures = regex .captures(cleaned) .ok_or_else(|| anyhow::anyhow!("missing Output section in unified exec output {raw}"))?; let chunk_id = captures .name("chunk_id") .map(|value| value.as_str().to_string()); let wall_time_seconds = captures .name("wall_time") .expect("wall_time group present") .as_str() .parse::<f64>() .context("failed to parse wall time seconds")?; let exit_code = captures .name("exit_code") .map(|value| { value .as_str() .parse::<i32>() .context("failed to parse exit code from unified exec output") }) .transpose()?; let process_id = captures .name("process_id") .map(|value| value.as_str().to_string()); let original_token_count = captures .name("original_token_count") .map(|value| { value .as_str() .parse::<usize>() .context("failed to parse original token count from unified exec output") }) .transpose()?; let output = captures .name("output") .expect("output group present") .as_str() .to_string(); Ok(ParsedUnifiedExecOutput { chunk_id, wall_time_seconds, process_id, exit_code, original_token_count, output, }) } fn collect_tool_outputs(bodies: &[Value]) -> Result<HashMap<String, ParsedUnifiedExecOutput>> { let mut outputs = HashMap::new(); for body in bodies { if let Some(items) = body.get("input").and_then(Value::as_array) { for item in items { if item.get("type").and_then(Value::as_str) != Some("function_call_output") { continue; } if let Some(call_id) = item.get("call_id").and_then(Value::as_str) { let content = extract_output_text(item) .ok_or_else(|| anyhow::anyhow!("missing tool output content"))?; let trimmed = content.trim(); if trimmed.is_empty() { continue; } let parsed = parse_unified_exec_output(content).with_context(|| { format!("failed to parse unified exec output for {call_id}") })?; outputs.insert(call_id.to_string(), parsed); } } } } Ok(outputs) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn unified_exec_intercepts_apply_patch_exec_command() -> Result<()> { skip_if_no_network!(Ok(())); skip_if_sandbox!(Ok(())); skip_if_windows!(Ok(())); let builder = test_codex().with_config(|config| { config.include_apply_patch_tool = true; config.use_experimental_unified_exec_tool = true; config.features.enable(Feature::UnifiedExec); }); let harness = TestCodexHarness::with_builder(builder).await?; let patch = "*** Begin Patch\n*** Add File: uexec_apply.txt\n+hello from unified exec\n*** End Patch"; let command = format!("apply_patch <<'EOF'\n{patch}\nEOF\n"); let call_id = "uexec-apply-patch"; let args = json!({ "cmd": command, "yield_time_ms": 250, }); let responses = vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "exec_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_response_created("resp-2"), ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ]; mount_sse_sequence(harness.server(), responses).await; let test = harness.test(); let codex = test.codex.clone(); let cwd = test.cwd_path().to_path_buf(); let session_model = test.session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "apply patch via unified exec".into(), }], final_output_json_schema: None, cwd, approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let mut saw_patch_begin = false; let mut patch_end = None; let mut saw_exec_begin = false; let mut saw_exec_end = false; wait_for_event(&codex, |event| match event { EventMsg::PatchApplyBegin(begin) if begin.call_id == call_id => { saw_patch_begin = true; assert!( begin .changes .keys() .any(|path| path.file_name() == Some(OsStr::new("uexec_apply.txt"))), "expected apply_patch changes to target uexec_apply.txt", ); false } EventMsg::PatchApplyEnd(end) if end.call_id == call_id => { patch_end = Some(end.clone()); false } EventMsg::ExecCommandBegin(event) if event.call_id == call_id => { saw_exec_begin = true; false } EventMsg::ExecCommandEnd(event) if event.call_id == call_id => { saw_exec_end = true; false } EventMsg::TaskComplete(_) => true, _ => false, }) .await; assert!( saw_patch_begin, "expected apply_patch to emit PatchApplyBegin" ); let patch_end = patch_end.expect("expected apply_patch to emit PatchApplyEnd"); assert!( patch_end.success, "expected apply_patch to finish successfully: stdout={:?} stderr={:?}", patch_end.stdout, patch_end.stderr, ); assert!( !saw_exec_begin, "apply_patch should be intercepted before exec_command begin" ); assert!( !saw_exec_end, "apply_patch should not emit exec_command end events" ); let output = harness.function_call_stdout(call_id).await; assert!( output.contains("Success. Updated the following files:"), "expected apply_patch output, got: {output:?}" ); assert!( output.contains("A uexec_apply.txt"), "expected apply_patch file summary, got: {output:?}" ); assert_eq!( fs::read_to_string(harness.path("uexec_apply.txt"))?, "hello from unified exec\n" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn unified_exec_emits_exec_command_begin_event() -> Result<()> { skip_if_no_network!(Ok(())); skip_if_sandbox!(Ok(())); skip_if_windows!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_model("gpt-5").with_config(|config| { config.use_experimental_unified_exec_tool = true; config.features.enable(Feature::UnifiedExec); }); let TestCodex { codex, cwd, session_configured, .. } = builder.build(&server).await?; let call_id = "uexec-begin-event"; let args = json!({ "shell": "bash".to_string(), "cmd": "/bin/echo hello unified exec".to_string(), "yield_time_ms": 250, }); let responses = vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "exec_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_response_created("resp-2"), ev_assistant_message("msg-1", "finished"), ev_completed("resp-2"), ]), ]; mount_sse_sequence(&server, responses).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "emit begin event".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let begin_event = wait_for_event_match(&codex, |msg| match msg { EventMsg::ExecCommandBegin(event) if event.call_id == call_id => Some(event.clone()), _ => None, }) .await; assert_command(&begin_event.command, "-lc", "/bin/echo hello unified exec"); assert_eq!(begin_event.cwd, cwd.path()); wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn unified_exec_resolves_relative_workdir() -> Result<()> { skip_if_no_network!(Ok(())); skip_if_sandbox!(Ok(())); skip_if_windows!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_model("gpt-5").with_config(|config| { config.use_experimental_unified_exec_tool = true; config.features.enable(Feature::UnifiedExec); }); let TestCodex { codex, cwd, session_configured, .. } = builder.build(&server).await?; let workdir_rel = std::path::PathBuf::from("uexec_relative_workdir"); std::fs::create_dir_all(cwd.path().join(&workdir_rel))?; let call_id = "uexec-workdir-relative"; let args = json!({ "cmd": "pwd", "yield_time_ms": 250, "workdir": workdir_rel.to_string_lossy().to_string(), }); let responses = vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "exec_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_response_created("resp-2"), ev_assistant_message("msg-1", "finished"), ev_completed("resp-2"), ]), ]; mount_sse_sequence(&server, responses).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "run relative workdir test".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let begin_event = wait_for_event_match(&codex, |msg| match msg { EventMsg::ExecCommandBegin(event) if event.call_id == call_id => Some(event.clone()), _ => None, }) .await; assert_eq!( begin_event.cwd, cwd.path().join(workdir_rel), "exec_command cwd should resolve relative workdir against turn cwd", ); wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[ignore = "flaky"] async fn unified_exec_respects_workdir_override() -> Result<()> { skip_if_no_network!(Ok(())); skip_if_sandbox!(Ok(())); skip_if_windows!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_model("gpt-5").with_config(|config| { config.use_experimental_unified_exec_tool = true; config.features.enable(Feature::UnifiedExec); }); let TestCodex { codex, cwd, session_configured, .. } = builder.build(&server).await?; let workdir = cwd.path().join("uexec_workdir_test"); std::fs::create_dir_all(&workdir)?; let call_id = "uexec-workdir"; let args = json!({ "cmd": "pwd", "yield_time_ms": 250, "workdir": workdir.to_string_lossy().to_string(), }); let responses = vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "exec_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_response_created("resp-2"), ev_assistant_message("msg-1", "finished"), ev_completed("resp-2"), ]), ]; mount_sse_sequence(&server, responses).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "run workdir test".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let begin_event = wait_for_event_match(&codex, |msg| match msg { EventMsg::ExecCommandBegin(event) if event.call_id == call_id => Some(event.clone()), _ => None, }) .await; assert_eq!( begin_event.cwd, workdir, "exec_command cwd should reflect the requested workdir override" ); wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; let requests = server.received_requests().await.expect("recorded requests"); assert!(!requests.is_empty(), "expected at least one POST request"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn unified_exec_emits_exec_command_end_event() -> Result<()> { skip_if_no_network!(Ok(())); skip_if_sandbox!(Ok(())); skip_if_windows!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_config(|config| { config.use_experimental_unified_exec_tool = true; config.features.enable(Feature::UnifiedExec); }); let TestCodex { codex, cwd, session_configured, .. } = builder.build(&server).await?; let call_id = "uexec-end-event"; let args = json!({ "cmd": "/bin/echo END-EVENT".to_string(), "yield_time_ms": 250, }); let poll_call_id = "uexec-end-event-poll"; let poll_args = json!({ "chars": "", "session_id": 1000, "yield_time_ms": 250, }); let responses = vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "exec_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_response_created("resp-2"), ev_function_call( poll_call_id, "write_stdin", &serde_json::to_string(&poll_args)?, ), ev_completed("resp-2"), ]), sse(vec![ ev_response_created("resp-3"), ev_assistant_message("msg-1", "finished"), ev_completed("resp-3"), ]), ]; mount_sse_sequence(&server, responses).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "emit end event".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let end_event = wait_for_event_match(&codex, |msg| match msg { EventMsg::ExecCommandEnd(ev) if ev.call_id == call_id => Some(ev.clone()), _ => None, }) .await; assert_eq!(end_event.exit_code, 0); assert!( end_event.aggregated_output.contains("END-EVENT"), "expected aggregated output to contain marker" ); wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn unified_exec_emits_output_delta_for_exec_command() -> Result<()> { skip_if_no_network!(Ok(())); skip_if_sandbox!(Ok(())); skip_if_windows!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_config(|config| { config.use_experimental_unified_exec_tool = true; config.features.enable(Feature::UnifiedExec); }); let TestCodex { codex, cwd, session_configured, .. } = builder.build(&server).await?; let call_id = "uexec-delta-1"; let args = json!({ "cmd": "printf 'HELLO-UEXEC'", "yield_time_ms": 1000, }); let responses = vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "exec_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_response_created("resp-2"), ev_assistant_message("msg-1", "finished"), ev_completed("resp-2"), ]), ]; mount_sse_sequence(&server, responses).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "emit delta".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let event = wait_for_event_match(&codex, |msg| match msg { EventMsg::ExecCommandEnd(ev) if ev.call_id == call_id => Some(ev.clone()), _ => None, }) .await; let text = event.stdout; assert!( text.contains("HELLO-UEXEC"), "delta chunk missing expected text: {text:?}", ); wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn unified_exec_full_lifecycle_with_background_end_event() -> Result<()> { skip_if_no_network!(Ok(())); skip_if_sandbox!(Ok(())); skip_if_windows!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_config(|config| { config.use_experimental_unified_exec_tool = true; config.features.enable(Feature::UnifiedExec); }); let TestCodex { codex, cwd, session_configured, .. } = builder.build(&server).await?; let call_id = "uexec-full-lifecycle"; // This timing force the long-standing PTY let args = json!({ "cmd": "sleep 0.5; printf 'HELLO-FULL-LIFECYCLE'", "yield_time_ms": 1000, }); let responses = vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "exec_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_response_created("resp-2"), ev_assistant_message("msg-1", "finished"), ev_completed("resp-2"), ]), ]; mount_sse_sequence(&server, responses).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "exercise full unified exec lifecycle".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let mut begin_event = None; let mut end_event = None; let mut task_completed = false; loop { let msg = wait_for_event(&codex, |_| true).await; match msg { EventMsg::ExecCommandBegin(ev) if ev.call_id == call_id => begin_event = Some(ev), EventMsg::ExecCommandEnd(ev) if ev.call_id == call_id => { assert!( end_event.is_none(), "expected a single ExecCommandEnd event for this call id" ); end_event = Some(ev); if task_completed && end_event.is_some() { break; } } EventMsg::TaskComplete(_) => { task_completed = true; if task_completed && end_event.is_some() { break; } } _ => {} } } let begin_event = begin_event.expect("expected ExecCommandBegin event"); assert_eq!(begin_event.call_id, call_id); assert!( begin_event.process_id.is_some(), "begin event should include a process_id for a long-lived session" ); let end_event = end_event.expect("expected ExecCommandEnd event"); assert_eq!(end_event.call_id, call_id); assert_eq!(end_event.exit_code, 0); assert!( end_event.process_id.is_some(), "end event should include process_id emitted by background watcher" ); assert!( end_event.aggregated_output.contains("HELLO-FULL-LIFECYCLE"), "aggregated_output should contain the full PTY transcript; got {:?}", end_event.aggregated_output ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn unified_exec_emits_terminal_interaction_for_write_stdin() -> Result<()> { skip_if_no_network!(Ok(())); skip_if_sandbox!(Ok(())); skip_if_windows!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_config(|config| { config.use_experimental_unified_exec_tool = true; config.features.enable(Feature::UnifiedExec); }); let TestCodex { codex, cwd, session_configured, .. } = builder.build(&server).await?; let open_call_id = "uexec-open"; let open_args = json!({ "cmd": "/bin/bash -i", "yield_time_ms": 200, }); let stdin_call_id = "uexec-stdin-delta"; let stdin_args = json!({ "chars": "echo WSTDIN-MARK\\n", "session_id": 1000, "yield_time_ms": 800, }); let responses = vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call( open_call_id, "exec_command", &serde_json::to_string(&open_args)?, ), ev_completed("resp-1"), ]), sse(vec![ ev_response_created("resp-2"), ev_function_call( stdin_call_id, "write_stdin", &serde_json::to_string(&stdin_args)?, ), ev_completed("resp-2"), ]), sse(vec![ ev_response_created("resp-3"), ev_assistant_message("msg-1", "done"), ev_completed("resp-3"), ]), ]; mount_sse_sequence(&server, responses).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "stdin delta".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let mut terminal_interaction = None; loop { let msg = wait_for_event(&codex, |_| true).await; match msg { EventMsg::TerminalInteraction(ev) if ev.call_id == open_call_id => { terminal_interaction = Some(ev); } EventMsg::TaskComplete(_) => break, _ => {} } } let delta = terminal_interaction.expect("expected TerminalInteraction event"); assert_eq!(delta.process_id, "1000"); let expected_stdin = stdin_args .get("chars") .and_then(Value::as_str) .expect("stdin chars"); assert_eq!(delta.stdin, expected_stdin); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn unified_exec_terminal_interaction_captures_delayed_output() -> Result<()> { skip_if_no_network!(Ok(())); skip_if_sandbox!(Ok(())); skip_if_windows!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_config(|config| { config.use_experimental_unified_exec_tool = true; config.features.enable(Feature::UnifiedExec); }); let TestCodex { codex, cwd, session_configured, .. } = builder.build(&server).await?; let open_call_id = "uexec-delayed-open"; let open_args = json!({ "cmd": "sleep 3 && echo MARKER1 && sleep 3 && echo MARKER2", "yield_time_ms": 10, }); // Poll stdin three times: first for no output, second after the first marker, // and a final long poll to capture the second marker. let first_poll_call_id = "uexec-delayed-poll-1"; let first_poll_args = json!({ "chars": "", "session_id": 1000, "yield_time_ms": 10, }); let second_poll_call_id = "uexec-delayed-poll-2"; let second_poll_args = json!({ "chars": "", "session_id": 1000, "yield_time_ms": 4000, }); let third_poll_call_id = "uexec-delayed-poll-3"; let third_poll_args = json!({ "chars": "", "session_id": 1000, "yield_time_ms": 6000, }); let responses = vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call( open_call_id, "exec_command", &serde_json::to_string(&open_args)?, ), ev_completed("resp-1"), ]), sse(vec![ ev_response_created("resp-2"), ev_function_call( first_poll_call_id, "write_stdin", &serde_json::to_string(&first_poll_args)?, ), ev_completed("resp-2"), ]), sse(vec![ ev_response_created("resp-3"), ev_function_call( second_poll_call_id, "write_stdin", &serde_json::to_string(&second_poll_args)?, ), ev_completed("resp-3"), ]), sse(vec![ ev_response_created("resp-4"), ev_function_call( third_poll_call_id, "write_stdin", &serde_json::to_string(&third_poll_args)?, ), ev_completed("resp-4"), ]), sse(vec![ ev_response_created("resp-5"), ev_assistant_message("msg-1", "complete"), ev_completed("resp-5"), ]), ]; mount_sse_sequence(&server, responses).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "delayed terminal interaction output".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let mut begin_event = None; let mut end_event = None; let mut task_completed = false; let mut terminal_events = Vec::new(); let mut delta_text = String::new(); // Consume all events for this turn so we can assert on each stage. loop { let msg = wait_for_event(&codex, |_| true).await; match msg { EventMsg::ExecCommandBegin(ev) if ev.call_id == open_call_id => { begin_event = Some(ev); } EventMsg::ExecCommandOutputDelta(ev) if ev.call_id == open_call_id => { delta_text.push_str(&String::from_utf8_lossy(&ev.chunk)); } EventMsg::TerminalInteraction(ev) if ev.call_id == open_call_id => { terminal_events.push(ev); } EventMsg::ExecCommandEnd(ev) if ev.call_id == open_call_id => { end_event = Some(ev); } EventMsg::TaskComplete(_) => { task_completed = true; } _ => {} }; if task_completed && end_event.is_some() { break; } } let begin_event = begin_event.expect("expected ExecCommandBegin event"); assert!( begin_event.process_id.is_some(), "begin event should include process_id for a live session" ); // We expect three terminal interactions matching the three write_stdin calls. assert_eq!( terminal_events.len(), 3, "expected three terminal interactions; got {terminal_events:?}" ); for event in &terminal_events { assert_eq!(event.call_id, open_call_id); assert_eq!(event.process_id, "1000"); } assert_eq!( terminal_events .iter()
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/compact_remote.rs
codex-rs/core/tests/suite/compact_remote.rs
#![allow(clippy::expect_used)] use std::fs; use anyhow::Result; use codex_core::CodexAuth; use codex_core::features::Feature; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::RolloutItem; use codex_core::protocol::RolloutLine; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseItem; use codex_protocol::user_input::UserInput; use core_test_support::responses; use core_test_support::responses::mount_sse_once; use core_test_support::responses::sse; use core_test_support::skip_if_no_network; use core_test_support::test_codex::TestCodexHarness; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use core_test_support::wait_for_event_match; use pretty_assertions::assert_eq; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn remote_compact_replaces_history_for_followups() -> Result<()> { skip_if_no_network!(Ok(())); let harness = TestCodexHarness::with_builder( test_codex() .with_auth(CodexAuth::create_dummy_chatgpt_auth_for_testing()) .with_config(|config| { config.features.enable(Feature::RemoteCompaction); }), ) .await?; let codex = harness.test().codex.clone(); let responses_mock = responses::mount_sse_sequence( harness.server(), vec![ responses::sse(vec![ responses::ev_assistant_message("m1", "FIRST_REMOTE_REPLY"), responses::ev_completed("resp-1"), ]), responses::sse(vec![ responses::ev_assistant_message("m2", "AFTER_COMPACT_REPLY"), responses::ev_completed("resp-2"), ]), ], ) .await; let compacted_history = vec![ ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: "REMOTE_COMPACTED_SUMMARY".to_string(), }], }, ResponseItem::Compaction { encrypted_content: "ENCRYPTED_COMPACTION_SUMMARY".to_string(), }, ]; let compact_mock = responses::mount_compact_json_once( harness.server(), serde_json::json!({ "output": compacted_history.clone() }), ) .await; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello remote compact".into(), }], }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; codex.submit(Op::Compact).await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "after compact".into(), }], }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let compact_request = compact_mock.single_request(); assert_eq!(compact_request.path(), "/v1/responses/compact"); assert_eq!( compact_request.header("chatgpt-account-id").as_deref(), Some("account_id") ); assert_eq!( compact_request.header("authorization").as_deref(), Some("Bearer Access Token") ); let compact_body = compact_request.body_json(); assert_eq!( compact_body.get("model").and_then(|v| v.as_str()), Some(harness.test().session_configured.model.as_str()) ); let compact_body_text = compact_body.to_string(); assert!( compact_body_text.contains("hello remote compact"), "expected compact request to include user history" ); assert!( compact_body_text.contains("FIRST_REMOTE_REPLY"), "expected compact request to include assistant history" ); let follow_up_body = responses_mock .requests() .last() .expect("follow-up request missing") .body_json() .to_string(); assert!( follow_up_body.contains("REMOTE_COMPACTED_SUMMARY"), "expected follow-up request to use compacted history" ); assert!( follow_up_body.contains("ENCRYPTED_COMPACTION_SUMMARY"), "expected follow-up request to include compaction summary item" ); assert!( !follow_up_body.contains("FIRST_REMOTE_REPLY"), "expected follow-up request to drop pre-compaction assistant messages" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn remote_compact_runs_automatically() -> Result<()> { skip_if_no_network!(Ok(())); let harness = TestCodexHarness::with_builder( test_codex() .with_auth(CodexAuth::create_dummy_chatgpt_auth_for_testing()) .with_config(|config| { config.features.enable(Feature::RemoteCompaction); }), ) .await?; let codex = harness.test().codex.clone(); mount_sse_once( harness.server(), sse(vec![ responses::ev_shell_command_call("m1", "echo 'hi'"), responses::ev_completed_with_tokens("resp-1", 100000000), // over token limit ]), ) .await; let responses_mock = mount_sse_once( harness.server(), responses::sse(vec![ responses::ev_assistant_message("m2", "AFTER_COMPACT_REPLY"), responses::ev_completed("resp-2"), ]), ) .await; let compacted_history = vec![ ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: "REMOTE_COMPACTED_SUMMARY".to_string(), }], }, ResponseItem::Compaction { encrypted_content: "ENCRYPTED_COMPACTION_SUMMARY".to_string(), }, ]; let compact_mock = responses::mount_compact_json_once( harness.server(), serde_json::json!({ "output": compacted_history.clone() }), ) .await; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello remote compact".into(), }], }) .await?; let message = wait_for_event_match(&codex, |ev| match ev { EventMsg::ContextCompacted(_) => Some(true), _ => None, }) .await; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; assert!(message); assert_eq!(compact_mock.requests().len(), 1); let follow_up_body = responses_mock.single_request().body_json().to_string(); assert!(follow_up_body.contains("REMOTE_COMPACTED_SUMMARY")); assert!(follow_up_body.contains("ENCRYPTED_COMPACTION_SUMMARY")); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn remote_compact_persists_replacement_history_in_rollout() -> Result<()> { skip_if_no_network!(Ok(())); let harness = TestCodexHarness::with_builder( test_codex() .with_auth(CodexAuth::create_dummy_chatgpt_auth_for_testing()) .with_config(|config| { config.features.enable(Feature::RemoteCompaction); }), ) .await?; let codex = harness.test().codex.clone(); let rollout_path = harness.test().session_configured.rollout_path.clone(); let responses_mock = responses::mount_sse_once( harness.server(), responses::sse(vec![ responses::ev_assistant_message("m1", "COMPACT_BASELINE_REPLY"), responses::ev_completed("resp-1"), ]), ) .await; let compacted_history = vec![ ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: "COMPACTED_USER_SUMMARY".to_string(), }], }, ResponseItem::Compaction { encrypted_content: "ENCRYPTED_COMPACTION_SUMMARY".to_string(), }, ResponseItem::Message { id: None, role: "assistant".to_string(), content: vec![ContentItem::OutputText { text: "COMPACTED_ASSISTANT_NOTE".to_string(), }], }, ]; let compact_mock = responses::mount_compact_json_once( harness.server(), serde_json::json!({ "output": compacted_history.clone() }), ) .await; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "needs compaction".into(), }], }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; codex.submit(Op::Compact).await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; codex.submit(Op::Shutdown).await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await; assert_eq!(responses_mock.requests().len(), 1); assert_eq!(compact_mock.requests().len(), 1); let rollout_text = fs::read_to_string(&rollout_path)?; let mut saw_compacted_history = false; for line in rollout_text .lines() .map(str::trim) .filter(|l| !l.is_empty()) { let Ok(entry) = serde_json::from_str::<RolloutLine>(line) else { continue; }; if let RolloutItem::Compacted(compacted) = entry.item && compacted.message.is_empty() && compacted.replacement_history.as_ref() == Some(&compacted_history) { saw_compacted_history = true; break; } } assert!( saw_compacted_history, "expected rollout to persist remote compaction history" ); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/tool_parallelism.rs
codex-rs/core/tests/suite/tool_parallelism.rs
#![cfg(not(target_os = "windows"))] #![allow(clippy::unwrap_used)] use std::fs; use std::time::Duration; use std::time::Instant; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::user_input::UserInput; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::ev_shell_command_call_with_args; use core_test_support::responses::mount_sse_once; use core_test_support::responses::mount_sse_sequence; use core_test_support::responses::sse; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::streaming_sse::StreamingSseChunk; use core_test_support::streaming_sse::start_streaming_sse_server; use core_test_support::test_codex::TestCodex; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use pretty_assertions::assert_eq; use serde_json::Value; use serde_json::json; use tokio::sync::oneshot; async fn run_turn(test: &TestCodex, prompt: &str) -> anyhow::Result<()> { let session_model = test.session_configured.model.clone(); test.codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: prompt.into(), }], final_output_json_schema: None, cwd: test.cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; Ok(()) } async fn run_turn_and_measure(test: &TestCodex, prompt: &str) -> anyhow::Result<Duration> { let start = Instant::now(); run_turn(test, prompt).await?; Ok(start.elapsed()) } #[allow(clippy::expect_used)] async fn build_codex_with_test_tool(server: &wiremock::MockServer) -> anyhow::Result<TestCodex> { let mut builder = test_codex().with_model("test-gpt-5.1-codex"); builder.build(server).await } fn assert_parallel_duration(actual: Duration) { // Allow headroom for runtime overhead while still differentiating from serial execution. assert!( actual < Duration::from_millis(750), "expected parallel execution to finish quickly, got {actual:?}" ); } fn assert_serial_duration(actual: Duration) { assert!( actual >= Duration::from_millis(500), "expected serial execution to take longer, got {actual:?}" ); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn read_file_tools_run_in_parallel() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let test = build_codex_with_test_tool(&server).await?; let warmup_args = json!({ "sleep_after_ms": 10, "barrier": { "id": "parallel-test-sync-warmup", "participants": 2, "timeout_ms": 1_000, } }) .to_string(); let parallel_args = json!({ "sleep_after_ms": 300, "barrier": { "id": "parallel-test-sync", "participants": 2, "timeout_ms": 1_000, } }) .to_string(); let warmup_first = sse(vec![ json!({"type": "response.created", "response": {"id": "resp-warm-1"}}), ev_function_call("warm-call-1", "test_sync_tool", &warmup_args), ev_function_call("warm-call-2", "test_sync_tool", &warmup_args), ev_completed("resp-warm-1"), ]); let warmup_second = sse(vec![ ev_assistant_message("warm-msg-1", "warmup complete"), ev_completed("resp-warm-2"), ]); let first_response = sse(vec![ json!({"type": "response.created", "response": {"id": "resp-1"}}), ev_function_call("call-1", "test_sync_tool", &parallel_args), ev_function_call("call-2", "test_sync_tool", &parallel_args), ev_completed("resp-1"), ]); let second_response = sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]); mount_sse_sequence( &server, vec![warmup_first, warmup_second, first_response, second_response], ) .await; run_turn(&test, "warm up parallel tool").await?; let duration = run_turn_and_measure(&test, "exercise sync tool").await?; assert_parallel_duration(duration); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn non_parallel_tools_run_serially() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_model("gpt-5.1"); let test = builder.build(&server).await?; let shell_args = json!({ "command": "sleep 0.3", "timeout_ms": 1_000, }); let args_one = serde_json::to_string(&shell_args)?; let args_two = serde_json::to_string(&shell_args)?; let first_response = sse(vec![ json!({"type": "response.created", "response": {"id": "resp-1"}}), ev_function_call("call-1", "shell_command", &args_one), ev_function_call("call-2", "shell_command", &args_two), ev_completed("resp-1"), ]); let second_response = sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]); mount_sse_sequence(&server, vec![first_response, second_response]).await; let duration = run_turn_and_measure(&test, "run shell_command twice").await?; assert_serial_duration(duration); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn mixed_tools_fall_back_to_serial() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let test = build_codex_with_test_tool(&server).await?; let sync_args = json!({ "sleep_after_ms": 300 }) .to_string(); let shell_args = serde_json::to_string(&json!({ "command": "sleep 0.3", "timeout_ms": 1_000, }))?; let first_response = sse(vec![ json!({"type": "response.created", "response": {"id": "resp-1"}}), ev_function_call("call-1", "test_sync_tool", &sync_args), ev_function_call("call-2", "shell_command", &shell_args), ev_completed("resp-1"), ]); let second_response = sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]); mount_sse_sequence(&server, vec![first_response, second_response]).await; let duration = run_turn_and_measure(&test, "mix tools").await?; assert_serial_duration(duration); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn tool_results_grouped() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let test = build_codex_with_test_tool(&server).await?; let shell_args = serde_json::to_string(&json!({ "command": "echo 'shell output'", "timeout_ms": 1_000, }))?; mount_sse_once( &server, sse(vec![ json!({"type": "response.created", "response": {"id": "resp-1"}}), ev_function_call("call-1", "shell_command", &shell_args), ev_function_call("call-2", "shell_command", &shell_args), ev_function_call("call-3", "shell_command", &shell_args), ev_completed("resp-1"), ]), ) .await; let tool_output_request = mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ) .await; run_turn(&test, "run shell three times").await?; let input = tool_output_request.single_request().input(); // find all function_call inputs with indexes let function_calls = input .iter() .enumerate() .filter(|(_, item)| item.get("type").and_then(Value::as_str) == Some("function_call")) .collect::<Vec<_>>(); let function_call_outputs = input .iter() .enumerate() .filter(|(_, item)| { item.get("type").and_then(Value::as_str) == Some("function_call_output") }) .collect::<Vec<_>>(); assert_eq!(function_calls.len(), 3); assert_eq!(function_call_outputs.len(), 3); for (index, _) in &function_calls { for (output_index, _) in &function_call_outputs { assert!( *index < *output_index, "all function calls must come before outputs" ); } } // output should come in the order of the function calls let zipped = function_calls .iter() .zip(function_call_outputs.iter()) .collect::<Vec<_>>(); for (call, output) in zipped { assert_eq!( call.1.get("call_id").and_then(Value::as_str), output.1.get("call_id").and_then(Value::as_str) ); } Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn shell_tools_start_before_response_completed_when_stream_delayed() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let output_file = tempfile::NamedTempFile::new()?; let output_path = output_file.path(); let first_response_id = "resp-1"; let second_response_id = "resp-2"; let command = format!( "perl -MTime::HiRes -e 'print int(Time::HiRes::time()*1000), \"\\n\"' >> \"{}\"", output_path.display() ); let args = json!({ "command": command, "timeout_ms": 1_000, }); let first_chunk = sse(vec![ ev_response_created(first_response_id), ev_shell_command_call_with_args("call-1", &args), ev_shell_command_call_with_args("call-2", &args), ev_shell_command_call_with_args("call-3", &args), ev_shell_command_call_with_args("call-4", &args), ]); let second_chunk = sse(vec![ev_completed(first_response_id)]); let follow_up = sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed(second_response_id), ]); let (first_gate_tx, first_gate_rx) = oneshot::channel(); let (completion_gate_tx, completion_gate_rx) = oneshot::channel(); let (follow_up_gate_tx, follow_up_gate_rx) = oneshot::channel(); let (streaming_server, completion_receivers) = start_streaming_sse_server(vec![ vec![ StreamingSseChunk { gate: Some(first_gate_rx), body: first_chunk, }, StreamingSseChunk { gate: Some(completion_gate_rx), body: second_chunk, }, ], vec![StreamingSseChunk { gate: Some(follow_up_gate_rx), body: follow_up, }], ]) .await; let mut builder = test_codex().with_model("gpt-5.1"); let test = builder .build_with_streaming_server(&streaming_server) .await?; let session_model = test.session_configured.model.clone(); test.codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "stream delayed completion".into(), }], final_output_json_schema: None, cwd: test.cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let _ = first_gate_tx.send(()); let _ = follow_up_gate_tx.send(()); let timestamps = tokio::time::timeout(Duration::from_secs(1), async { loop { let contents = fs::read_to_string(output_path)?; let timestamps = contents .lines() .filter(|line| !line.trim().is_empty()) .map(|line| { line.trim() .parse::<i64>() .map_err(|err| anyhow::anyhow!("invalid timestamp {line:?}: {err}")) }) .collect::<Result<Vec<_>, _>>()?; if timestamps.len() == 4 { return Ok::<_, anyhow::Error>(timestamps); } tokio::time::sleep(Duration::from_millis(10)).await; } }) .await??; let _ = completion_gate_tx.send(()); wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let mut completion_iter = completion_receivers.into_iter(); let completed_at = completion_iter .next() .expect("completion receiver missing") .await .expect("completion timestamp missing"); let count = i64::try_from(timestamps.len()).expect("timestamp count fits in i64"); assert_eq!(count, 4); for timestamp in timestamps { assert!( timestamp <= completed_at, "timestamp {timestamp} should be before or equal to completed {completed_at}" ); } streaming_server.shutdown().await; Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/list_dir.rs
codex-rs/core/tests/suite/list_dir.rs
#![cfg(not(target_os = "windows"))] use core_test_support::responses::mount_function_call_agent_response; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::test_codex; use pretty_assertions::assert_eq; use serde_json::json; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[ignore = "disabled until we enable list_dir tool"] async fn list_dir_tool_returns_entries() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let test = test_codex().build(&server).await?; let dir_path = test.cwd.path().join("sample_dir"); std::fs::create_dir(&dir_path)?; std::fs::write(dir_path.join("alpha.txt"), "first file")?; std::fs::create_dir(dir_path.join("nested"))?; let dir_path = dir_path.to_string_lossy().to_string(); let call_id = "list-dir-call"; let arguments = json!({ "dir_path": dir_path, "offset": 1, "limit": 2, }) .to_string(); let mocks = mount_function_call_agent_response(&server, call_id, &arguments, "list_dir").await; test.submit_turn("list directory contents").await?; let req = mocks.completion.single_request(); let (content_opt, _) = req .function_call_output_content_and_success(call_id) .expect("function_call_output present"); let output = content_opt.expect("output content present in tool output"); assert_eq!(output, "E1: [file] alpha.txt\nE2: [dir] nested"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[ignore = "disabled until we enable list_dir tool"] async fn list_dir_tool_depth_one_omits_children() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let test = test_codex().build(&server).await?; let dir_path = test.cwd.path().join("depth_one"); std::fs::create_dir(&dir_path)?; std::fs::write(dir_path.join("alpha.txt"), "alpha")?; std::fs::create_dir(dir_path.join("nested"))?; std::fs::write(dir_path.join("nested").join("beta.txt"), "beta")?; let dir_path = dir_path.to_string_lossy().to_string(); let call_id = "list-dir-depth1"; let arguments = json!({ "dir_path": dir_path, "offset": 1, "limit": 10, "depth": 1, }) .to_string(); let mocks = mount_function_call_agent_response(&server, call_id, &arguments, "list_dir").await; test.submit_turn("list directory contents depth one") .await?; let req = mocks.completion.single_request(); let (content_opt, _) = req .function_call_output_content_and_success(call_id) .expect("function_call_output present"); let output = content_opt.expect("output content present in tool output"); assert_eq!(output, "E1: [file] alpha.txt\nE2: [dir] nested"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[ignore = "disabled until we enable list_dir tool"] async fn list_dir_tool_depth_two_includes_children_only() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let test = test_codex().build(&server).await?; let dir_path = test.cwd.path().join("depth_two"); std::fs::create_dir(&dir_path)?; std::fs::write(dir_path.join("alpha.txt"), "alpha")?; let nested = dir_path.join("nested"); std::fs::create_dir(&nested)?; std::fs::write(nested.join("beta.txt"), "beta")?; let deeper = nested.join("grand"); std::fs::create_dir(&deeper)?; std::fs::write(deeper.join("gamma.txt"), "gamma")?; let dir_path_string = dir_path.to_string_lossy().to_string(); let call_id = "list-dir-depth2"; let arguments = json!({ "dir_path": dir_path_string, "offset": 1, "limit": 10, "depth": 2, }) .to_string(); let mocks = mount_function_call_agent_response(&server, call_id, &arguments, "list_dir").await; test.submit_turn("list directory contents depth two") .await?; let req = mocks.completion.single_request(); let (content_opt, _) = req .function_call_output_content_and_success(call_id) .expect("function_call_output present"); let output = content_opt.expect("output content present in tool output"); assert_eq!( output, "E1: [file] alpha.txt\nE2: [dir] nested\nE3: [file] nested/beta.txt\nE4: [dir] nested/grand" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[ignore = "disabled until we enable list_dir tool"] async fn list_dir_tool_depth_three_includes_grandchildren() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let test = test_codex().build(&server).await?; let dir_path = test.cwd.path().join("depth_three"); std::fs::create_dir(&dir_path)?; std::fs::write(dir_path.join("alpha.txt"), "alpha")?; let nested = dir_path.join("nested"); std::fs::create_dir(&nested)?; std::fs::write(nested.join("beta.txt"), "beta")?; let deeper = nested.join("grand"); std::fs::create_dir(&deeper)?; std::fs::write(deeper.join("gamma.txt"), "gamma")?; let dir_path_string = dir_path.to_string_lossy().to_string(); let call_id = "list-dir-depth3"; let arguments = json!({ "dir_path": dir_path_string, "offset": 1, "limit": 10, "depth": 3, }) .to_string(); let mocks = mount_function_call_agent_response(&server, call_id, &arguments, "list_dir").await; test.submit_turn("list directory contents depth three") .await?; let req = mocks.completion.single_request(); let (content_opt, _) = req .function_call_output_content_and_success(call_id) .expect("function_call_output present"); let output = content_opt.expect("output content present in tool output"); assert_eq!( output, "E1: [file] alpha.txt\nE2: [dir] nested\nE3: [file] nested/beta.txt\nE4: [dir] nested/grand\nE5: [file] nested/grand/gamma.txt" ); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/items.rs
codex-rs/core/tests/suite/items.rs
#![cfg(not(target_os = "windows"))] use anyhow::Ok; use codex_core::protocol::EventMsg; use codex_core::protocol::ItemCompletedEvent; use codex_core::protocol::ItemStartedEvent; use codex_core::protocol::Op; use codex_protocol::items::TurnItem; use codex_protocol::user_input::UserInput; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_message_item_added; use core_test_support::responses::ev_output_text_delta; use core_test_support::responses::ev_reasoning_item; use core_test_support::responses::ev_reasoning_item_added; use core_test_support::responses::ev_reasoning_summary_text_delta; use core_test_support::responses::ev_reasoning_text_delta; use core_test_support::responses::ev_response_created; use core_test_support::responses::ev_web_search_call_added; use core_test_support::responses::ev_web_search_call_done; use core_test_support::responses::mount_sse_once; use core_test_support::responses::sse; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::TestCodex; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event_match; use pretty_assertions::assert_eq; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn user_message_item_is_emitted() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let TestCodex { codex, .. } = test_codex().build(&server).await?; let first_response = sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]); mount_sse_once(&server, first_response).await; codex .submit(Op::UserInput { items: (vec![UserInput::Text { text: "please inspect sample.txt".into(), }]), }) .await?; let started_item = wait_for_event_match(&codex, |ev| match ev { EventMsg::ItemStarted(ItemStartedEvent { item: TurnItem::UserMessage(item), .. }) => Some(item.clone()), _ => None, }) .await; let completed_item = wait_for_event_match(&codex, |ev| match ev { EventMsg::ItemCompleted(ItemCompletedEvent { item: TurnItem::UserMessage(item), .. }) => Some(item.clone()), _ => None, }) .await; assert_eq!(started_item.id, completed_item.id); assert_eq!( started_item.content, vec![UserInput::Text { text: "please inspect sample.txt".into(), }] ); assert_eq!( completed_item.content, vec![UserInput::Text { text: "please inspect sample.txt".into(), }] ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn assistant_message_item_is_emitted() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let TestCodex { codex, .. } = test_codex().build(&server).await?; let first_response = sse(vec![ ev_response_created("resp-1"), ev_assistant_message("msg-1", "all done"), ev_completed("resp-1"), ]); mount_sse_once(&server, first_response).await; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "please summarize results".into(), }], }) .await?; let started = wait_for_event_match(&codex, |ev| match ev { EventMsg::ItemStarted(ItemStartedEvent { item: TurnItem::AgentMessage(item), .. }) => Some(item.clone()), _ => None, }) .await; let completed = wait_for_event_match(&codex, |ev| match ev { EventMsg::ItemCompleted(ItemCompletedEvent { item: TurnItem::AgentMessage(item), .. }) => Some(item.clone()), _ => None, }) .await; assert_eq!(started.id, completed.id); let Some(codex_protocol::items::AgentMessageContent::Text { text }) = completed.content.first() else { panic!("expected agent message text content"); }; assert_eq!(text, "all done"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn reasoning_item_is_emitted() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let TestCodex { codex, .. } = test_codex().build(&server).await?; let reasoning_item = ev_reasoning_item( "reasoning-1", &["Consider inputs", "Compute output"], &["Detailed reasoning trace"], ); let first_response = sse(vec![ ev_response_created("resp-1"), reasoning_item, ev_completed("resp-1"), ]); mount_sse_once(&server, first_response).await; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "explain your reasoning".into(), }], }) .await?; let started = wait_for_event_match(&codex, |ev| match ev { EventMsg::ItemStarted(ItemStartedEvent { item: TurnItem::Reasoning(item), .. }) => Some(item.clone()), _ => None, }) .await; let completed = wait_for_event_match(&codex, |ev| match ev { EventMsg::ItemCompleted(ItemCompletedEvent { item: TurnItem::Reasoning(item), .. }) => Some(item.clone()), _ => None, }) .await; assert_eq!(started.id, completed.id); assert_eq!( completed.summary_text, vec!["Consider inputs".to_string(), "Compute output".to_string()] ); assert_eq!( completed.raw_content, vec!["Detailed reasoning trace".to_string()] ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn web_search_item_is_emitted() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let TestCodex { codex, .. } = test_codex().build(&server).await?; let web_search_added = ev_web_search_call_added("web-search-1", "in_progress", "weather seattle"); let web_search_done = ev_web_search_call_done("web-search-1", "completed", "weather seattle"); let first_response = sse(vec![ ev_response_created("resp-1"), web_search_added, web_search_done, ev_completed("resp-1"), ]); mount_sse_once(&server, first_response).await; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "find the weather".into(), }], }) .await?; let started = wait_for_event_match(&codex, |ev| match ev { EventMsg::ItemStarted(ItemStartedEvent { item: TurnItem::WebSearch(item), .. }) => Some(item.clone()), _ => None, }) .await; let completed = wait_for_event_match(&codex, |ev| match ev { EventMsg::ItemCompleted(ItemCompletedEvent { item: TurnItem::WebSearch(item), .. }) => Some(item.clone()), _ => None, }) .await; assert_eq!(started.id, completed.id); assert_eq!(completed.query, "weather seattle"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn agent_message_content_delta_has_item_metadata() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let TestCodex { codex, session_configured, .. } = test_codex().build(&server).await?; let stream = sse(vec![ ev_response_created("resp-1"), ev_message_item_added("msg-1", ""), ev_output_text_delta("streamed response"), ev_assistant_message("msg-1", "streamed response"), ev_completed("resp-1"), ]); mount_sse_once(&server, stream).await; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "please stream text".into(), }], }) .await?; let (started_turn_id, started_item) = wait_for_event_match(&codex, |ev| match ev { EventMsg::ItemStarted(ItemStartedEvent { turn_id, item: TurnItem::AgentMessage(item), .. }) => Some((turn_id.clone(), item.clone())), _ => None, }) .await; let delta_event = wait_for_event_match(&codex, |ev| match ev { EventMsg::AgentMessageContentDelta(event) => Some(event.clone()), _ => None, }) .await; let legacy_delta = wait_for_event_match(&codex, |ev| match ev { EventMsg::AgentMessageDelta(event) => Some(event.clone()), _ => None, }) .await; let completed_item = wait_for_event_match(&codex, |ev| match ev { EventMsg::ItemCompleted(ItemCompletedEvent { item: TurnItem::AgentMessage(item), .. }) => Some(item.clone()), _ => None, }) .await; let session_id = session_configured.session_id.to_string(); assert_eq!(delta_event.thread_id, session_id); assert_eq!(delta_event.turn_id, started_turn_id); assert_eq!(delta_event.item_id, started_item.id); assert_eq!(delta_event.delta, "streamed response"); assert_eq!(legacy_delta.delta, "streamed response"); assert_eq!(completed_item.id, started_item.id); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn reasoning_content_delta_has_item_metadata() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let TestCodex { codex, .. } = test_codex().build(&server).await?; let stream = sse(vec![ ev_response_created("resp-1"), ev_reasoning_item_added("reasoning-1", &[""]), ev_reasoning_summary_text_delta("step one"), ev_reasoning_item("reasoning-1", &["step one"], &[]), ev_completed("resp-1"), ]); mount_sse_once(&server, stream).await; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "reason through it".into(), }], }) .await?; let reasoning_item = wait_for_event_match(&codex, |ev| match ev { EventMsg::ItemStarted(ItemStartedEvent { item: TurnItem::Reasoning(item), .. }) => Some(item.clone()), _ => None, }) .await; let delta_event = wait_for_event_match(&codex, |ev| match ev { EventMsg::ReasoningContentDelta(event) => Some(event.clone()), _ => None, }) .await; let legacy_delta = wait_for_event_match(&codex, |ev| match ev { EventMsg::AgentReasoningDelta(event) => Some(event.clone()), _ => None, }) .await; assert_eq!(delta_event.item_id, reasoning_item.id); assert_eq!(delta_event.delta, "step one"); assert_eq!(legacy_delta.delta, "step one"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn reasoning_raw_content_delta_respects_flag() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let TestCodex { codex, .. } = test_codex() .with_config(|config| { config.show_raw_agent_reasoning = true; }) .build(&server) .await?; let stream = sse(vec![ ev_response_created("resp-1"), ev_reasoning_item_added("reasoning-raw", &[""]), ev_reasoning_text_delta("raw detail"), ev_reasoning_item("reasoning-raw", &["complete"], &["raw detail"]), ev_completed("resp-1"), ]); mount_sse_once(&server, stream).await; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "show raw reasoning".into(), }], }) .await?; let reasoning_item = wait_for_event_match(&codex, |ev| match ev { EventMsg::ItemStarted(ItemStartedEvent { item: TurnItem::Reasoning(item), .. }) => Some(item.clone()), _ => None, }) .await; let delta_event = wait_for_event_match(&codex, |ev| match ev { EventMsg::ReasoningRawContentDelta(event) => Some(event.clone()), _ => None, }) .await; let legacy_delta = wait_for_event_match(&codex, |ev| match ev { EventMsg::AgentReasoningRawContentDelta(event) => Some(event.clone()), _ => None, }) .await; assert_eq!(delta_event.item_id, reasoning_item.id); assert_eq!(delta_event.delta, "raw detail"); assert_eq!(legacy_delta.delta, "raw detail"); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/model_overrides.rs
codex-rs/core/tests/suite/model_overrides.rs
use codex_core::CodexAuth; use codex_core::ConversationManager; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_protocol::openai_models::ReasoningEffort; use core_test_support::load_default_config_for_test; use core_test_support::wait_for_event; use pretty_assertions::assert_eq; use tempfile::TempDir; const CONFIG_TOML: &str = "config.toml"; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn override_turn_context_does_not_persist_when_config_exists() { let codex_home = TempDir::new().unwrap(); let config_path = codex_home.path().join(CONFIG_TOML); let initial_contents = "model = \"gpt-4o\"\n"; tokio::fs::write(&config_path, initial_contents) .await .expect("seed config.toml"); let mut config = load_default_config_for_test(&codex_home).await; config.model = Some("gpt-4o".to_string()); let conversation_manager = ConversationManager::with_models_provider( CodexAuth::from_api_key("Test API Key"), config.model_provider.clone(), ); let codex = conversation_manager .new_conversation(config) .await .expect("create conversation") .conversation; codex .submit(Op::OverrideTurnContext { cwd: None, approval_policy: None, sandbox_policy: None, model: Some("o3".to_string()), effort: Some(Some(ReasoningEffort::High)), summary: None, }) .await .expect("submit override"); codex.submit(Op::Shutdown).await.expect("request shutdown"); wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await; let contents = tokio::fs::read_to_string(&config_path) .await .expect("read config.toml after override"); assert_eq!(contents, initial_contents); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn override_turn_context_does_not_create_config_file() { let codex_home = TempDir::new().unwrap(); let config_path = codex_home.path().join(CONFIG_TOML); assert!( !config_path.exists(), "test setup should start without config" ); let config = load_default_config_for_test(&codex_home).await; let conversation_manager = ConversationManager::with_models_provider( CodexAuth::from_api_key("Test API Key"), config.model_provider.clone(), ); let codex = conversation_manager .new_conversation(config) .await .expect("create conversation") .conversation; codex .submit(Op::OverrideTurnContext { cwd: None, approval_policy: None, sandbox_policy: None, model: Some("o3".to_string()), effort: Some(Some(ReasoningEffort::Medium)), summary: None, }) .await .expect("submit override"); codex.submit(Op::Shutdown).await.expect("request shutdown"); wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await; assert!( !config_path.exists(), "override should not create config.toml" ); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/text_encoding_fix.rs
codex-rs/core/tests/suite/text_encoding_fix.rs
//! Integration test for the text encoding fix for issue #6178. //! //! These tests simulate VSCode's shell preview on Windows/WSL where the output //! may be encoded with a legacy code page before it reaches Codex. use codex_core::exec::StreamOutput; use pretty_assertions::assert_eq; #[test] fn test_utf8_shell_output() { // Baseline: UTF-8 output should bypass the detector and remain unchanged. assert_eq!(decode_shell_output("пример".as_bytes()), "пример"); } #[test] fn test_cp1251_shell_output() { // VS Code shells on Windows frequently surface CP1251 bytes for Cyrillic text. assert_eq!(decode_shell_output(b"\xEF\xF0\xE8\xEC\xE5\xF0"), "пример"); } #[test] fn test_cp866_shell_output() { // Native cmd.exe still defaults to CP866; make sure we recognize that too. assert_eq!(decode_shell_output(b"\xAF\xE0\xA8\xAC\xA5\xE0"), "пример"); } #[test] fn test_windows_1252_smart_decoding() { // Smart detection should turn fancy quotes/dashes into the proper Unicode glyphs. assert_eq!( decode_shell_output(b"\x93\x94 test \x96 dash"), "\u{201C}\u{201D} test \u{2013} dash" ); } #[test] fn test_smart_decoding_improves_over_lossy_utf8() { // Regression guard: String::from_utf8_lossy() alone used to emit replacement chars here. let bytes = b"\x93\x94 test \x96 dash"; assert!( String::from_utf8_lossy(bytes).contains('\u{FFFD}'), "lossy UTF-8 should inject replacement chars" ); assert_eq!( decode_shell_output(bytes), "\u{201C}\u{201D} test \u{2013} dash", "smart decoding should keep curly quotes intact" ); } #[test] fn test_mixed_ascii_and_legacy_encoding() { // Commands tend to mix ASCII status text with Latin-1 bytes (e.g. café). assert_eq!(decode_shell_output(b"Output: caf\xE9"), "Output: café"); // codespell:ignore caf } #[test] fn test_pure_latin1_shell_output() { // Latin-1 by itself should still decode correctly (regression coverage for the older tests). assert_eq!(decode_shell_output(b"caf\xE9"), "café"); // codespell:ignore caf } #[test] fn test_invalid_bytes_still_fall_back_to_lossy() { // If detection fails, we still want the user to see replacement characters. let bytes = b"\xFF\xFE\xFD"; assert_eq!(decode_shell_output(bytes), String::from_utf8_lossy(bytes)); } fn decode_shell_output(bytes: &[u8]) -> String { StreamOutput { text: bytes.to_vec(), truncated_after_lines: None, } .from_utf8_lossy() .text }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/shell_snapshot.rs
codex-rs/core/tests/suite/shell_snapshot.rs
use anyhow::Result; use codex_core::features::Feature; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::ExecCommandBeginEvent; use codex_core::protocol::ExecCommandEndEvent; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::user_input::UserInput; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_sse_sequence; use core_test_support::responses::sse; use core_test_support::test_codex::TestCodexHarness; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use core_test_support::wait_for_event_match; use pretty_assertions::assert_eq; use serde_json::json; use std::path::PathBuf; use tokio::fs; use tokio::time::Duration; use tokio::time::sleep; #[derive(Debug)] struct SnapshotRun { begin: ExecCommandBeginEvent, end: ExecCommandEndEvent, snapshot_path: PathBuf, snapshot_content: String, codex_home: PathBuf, } #[allow(clippy::expect_used)] async fn run_snapshot_command(command: &str) -> Result<SnapshotRun> { let builder = test_codex().with_config(|config| { config.use_experimental_unified_exec_tool = true; config.features.enable(Feature::UnifiedExec); config.features.enable(Feature::ShellSnapshot); }); let harness = TestCodexHarness::with_builder(builder).await?; let args = json!({ "cmd": command, "yield_time_ms": 1000, }); let call_id = "shell-snapshot-exec"; let responses = vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "exec_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_response_created("resp-2"), ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ]; mount_sse_sequence(harness.server(), responses).await; let test = harness.test(); let codex = test.codex.clone(); let codex_home = test.home.path().to_path_buf(); let session_model = test.session_configured.model.clone(); let cwd = test.cwd_path().to_path_buf(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "run unified exec with shell snapshot".into(), }], final_output_json_schema: None, cwd, approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let begin = wait_for_event_match(&codex, |ev| match ev { EventMsg::ExecCommandBegin(ev) if ev.call_id == call_id => Some(ev.clone()), _ => None, }) .await; let mut entries = fs::read_dir(codex_home.join("shell_snapshots")).await?; let snapshot_path = entries .next_entry() .await? .map(|entry| entry.path()) .expect("shell snapshot created"); let snapshot_content = fs::read_to_string(&snapshot_path).await?; let end = wait_for_event_match(&codex, |ev| match ev { EventMsg::ExecCommandEnd(ev) if ev.call_id == call_id => Some(ev.clone()), _ => None, }) .await; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; Ok(SnapshotRun { begin, end, snapshot_path, snapshot_content, codex_home, }) } #[allow(clippy::expect_used)] async fn run_shell_command_snapshot(command: &str) -> Result<SnapshotRun> { let builder = test_codex().with_config(|config| { config.features.enable(Feature::ShellSnapshot); }); let harness = TestCodexHarness::with_builder(builder).await?; let args = json!({ "command": command, "timeout_ms": 1000, }); let call_id = "shell-snapshot-command"; let responses = vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_response_created("resp-2"), ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ]; mount_sse_sequence(harness.server(), responses).await; let test = harness.test(); let codex = test.codex.clone(); let codex_home = test.home.path().to_path_buf(); let session_model = test.session_configured.model.clone(); let cwd = test.cwd_path().to_path_buf(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "run shell_command with shell snapshot".into(), }], final_output_json_schema: None, cwd, approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let begin = wait_for_event_match(&codex, |ev| match ev { EventMsg::ExecCommandBegin(ev) if ev.call_id == call_id => Some(ev.clone()), _ => None, }) .await; let mut entries = fs::read_dir(codex_home.join("shell_snapshots")).await?; let snapshot_path = entries .next_entry() .await? .map(|entry| entry.path()) .expect("shell snapshot created"); let snapshot_content = fs::read_to_string(&snapshot_path).await?; let end = wait_for_event_match(&codex, |ev| match ev { EventMsg::ExecCommandEnd(ev) if ev.call_id == call_id => Some(ev.clone()), _ => None, }) .await; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; Ok(SnapshotRun { begin, end, snapshot_path, snapshot_content, codex_home, }) } fn normalize_newlines(text: &str) -> String { text.replace("\r\n", "\n") } fn assert_posix_snapshot_sections(snapshot: &str) { assert!(snapshot.contains("# Snapshot file")); assert!(snapshot.contains("aliases ")); assert!(snapshot.contains("exports ")); assert!(snapshot.contains("setopts ")); assert!( snapshot.contains("PATH"), "snapshot should include PATH exports; snapshot={snapshot:?}" ); } #[cfg_attr(not(target_os = "linux"), ignore)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn linux_unified_exec_uses_shell_snapshot() -> Result<()> { let command = "echo snapshot-linux"; let run = run_snapshot_command(command).await?; let stdout = normalize_newlines(&run.end.stdout); assert_eq!(run.begin.command.get(1).map(String::as_str), Some("-lc")); assert_eq!(run.begin.command.get(2).map(String::as_str), Some(command)); assert_eq!(run.begin.command.len(), 3); assert!(run.snapshot_path.starts_with(&run.codex_home)); assert_posix_snapshot_sections(&run.snapshot_content); assert_eq!(run.end.exit_code, 0); assert!( stdout.contains("snapshot-linux"), "stdout should contain snapshot marker; stdout={stdout:?}" ); Ok(()) } #[cfg_attr(target_os = "windows", ignore)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn linux_shell_command_uses_shell_snapshot() -> Result<()> { let command = "echo shell-command-snapshot-linux"; let run = run_shell_command_snapshot(command).await?; assert_eq!(run.begin.command.get(1).map(String::as_str), Some("-lc")); assert_eq!(run.begin.command.get(2).map(String::as_str), Some(command)); assert_eq!(run.begin.command.len(), 3); assert!(run.snapshot_path.starts_with(&run.codex_home)); assert_posix_snapshot_sections(&run.snapshot_content); assert_eq!( normalize_newlines(&run.end.stdout).trim(), "shell-command-snapshot-linux" ); assert_eq!(run.end.exit_code, 0); Ok(()) } #[cfg_attr(target_os = "windows", ignore)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn shell_command_snapshot_still_intercepts_apply_patch() -> Result<()> { let builder = test_codex().with_config(|config| { config.features.enable(Feature::ShellSnapshot); config.include_apply_patch_tool = true; }); let harness = TestCodexHarness::with_builder(builder).await?; let test = harness.test(); let codex = test.codex.clone(); let cwd = test.cwd_path().to_path_buf(); let codex_home = test.home.path().to_path_buf(); let target = cwd.join("snapshot-apply.txt"); let script = "apply_patch <<'EOF'\n*** Begin Patch\n*** Add File: snapshot-apply.txt\n+hello from snapshot\n*** End Patch\nEOF\n"; let args = json!({ "command": script, "timeout_ms": 1_000, }); let call_id = "shell-snapshot-apply-patch"; let responses = vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_response_created("resp-2"), ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ]; mount_sse_sequence(harness.server(), responses).await; let model = test.session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "apply patch via shell_command with snapshot".into(), }], final_output_json_schema: None, cwd: cwd.clone(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model, effort: None, summary: ReasoningSummary::Auto, }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; assert_eq!(fs::read_to_string(&target).await?, "hello from snapshot\n"); let mut entries = fs::read_dir(codex_home.join("shell_snapshots")).await?; let snapshot_path = entries .next_entry() .await? .map(|entry| entry.path()) .expect("shell snapshot created"); let snapshot_content = fs::read_to_string(&snapshot_path).await?; assert_posix_snapshot_sections(&snapshot_content); Ok(()) } #[cfg_attr(target_os = "windows", ignore)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn shell_snapshot_deleted_after_shutdown_with_skills() -> Result<()> { let builder = test_codex().with_config(|config| { config.features.enable(Feature::ShellSnapshot); }); let harness = TestCodexHarness::with_builder(builder).await?; let home = harness.test().home.clone(); let codex_home = home.path().to_path_buf(); let codex = harness.test().codex.clone(); let mut entries = fs::read_dir(codex_home.join("shell_snapshots")).await?; let snapshot_path = entries .next_entry() .await? .map(|entry| entry.path()) .expect("shell snapshot created"); assert!(snapshot_path.exists()); codex.submit(Op::Shutdown {}).await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await; drop(codex); drop(harness); sleep(Duration::from_millis(150)).await; assert_eq!( snapshot_path.exists(), false, "snapshot should be removed after shutdown" ); Ok(()) } #[cfg_attr(not(target_os = "macos"), ignore)] #[cfg_attr( target_os = "macos", ignore = "requires unrestricted networking on macOS" )] #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn macos_unified_exec_uses_shell_snapshot() -> Result<()> { let command = "echo snapshot-macos"; let run = run_snapshot_command(command).await?; let shell_path = run .begin .command .first() .expect("shell path recorded") .clone(); assert_eq!(run.begin.command.get(1).map(String::as_str), Some("-c")); assert_eq!( run.begin.command.get(2).map(String::as_str), Some(". \"$0\" && exec \"$@\"") ); assert_eq!(run.begin.command.get(4), Some(&shell_path)); assert_eq!(run.begin.command.get(5).map(String::as_str), Some("-c")); assert_eq!(run.begin.command.last(), Some(&command.to_string())); assert!(run.snapshot_path.starts_with(&run.codex_home)); assert_posix_snapshot_sections(&run.snapshot_content); assert_eq!(normalize_newlines(&run.end.stdout).trim(), "snapshot-macos"); assert_eq!(run.end.exit_code, 0); Ok(()) } // #[cfg_attr(not(target_os = "windows"), ignore)] #[ignore] #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn windows_unified_exec_uses_shell_snapshot() -> Result<()> { let command = "Write-Output snapshot-windows"; let run = run_snapshot_command(command).await?; let snapshot_index = run .begin .command .iter() .position(|arg| arg.contains("shell_snapshots")) .expect("snapshot argument exists"); assert!(run.begin.command.iter().any(|arg| arg == "-NoProfile")); assert!( run.begin .command .iter() .any(|arg| arg == "param($snapshot) . $snapshot; & @args") ); assert!(snapshot_index > 0); assert_eq!(run.begin.command.last(), Some(&command.to_string())); assert!(run.snapshot_path.starts_with(&run.codex_home)); assert!(run.snapshot_content.contains("# Snapshot file")); assert!(run.snapshot_content.contains("# aliases ")); assert!(run.snapshot_content.contains("# exports ")); assert_eq!( normalize_newlines(&run.end.stdout).trim(), "snapshot-windows" ); assert_eq!(run.end.exit_code, 0); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/compact.rs
codex-rs/core/tests/suite/compact.rs
#![allow(clippy::expect_used)] use codex_core::CodexAuth; use codex_core::ConversationManager; use codex_core::ModelProviderInfo; use codex_core::NewConversation; use codex_core::built_in_model_providers; use codex_core::compact::SUMMARIZATION_PROMPT; use codex_core::compact::SUMMARY_PREFIX; use codex_core::config::Config; use codex_core::features::Feature; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::RolloutItem; use codex_core::protocol::RolloutLine; use codex_core::protocol::SandboxPolicy; use codex_core::protocol::WarningEvent; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::user_input::UserInput; use core_test_support::load_default_config_for_test; use core_test_support::responses::ev_local_shell_call; use core_test_support::responses::ev_reasoning_item; use core_test_support::skip_if_no_network; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use core_test_support::wait_for_event_match; use std::collections::VecDeque; use tempfile::TempDir; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_completed_with_tokens; use core_test_support::responses::ev_function_call; use core_test_support::responses::get_responses_requests; use core_test_support::responses::mount_compact_json_once; use core_test_support::responses::mount_sse_once; use core_test_support::responses::mount_sse_once_match; use core_test_support::responses::mount_sse_sequence; use core_test_support::responses::sse; use core_test_support::responses::sse_failed; use core_test_support::responses::start_mock_server; use pretty_assertions::assert_eq; use serde_json::json; use wiremock::MockServer; // --- Test helpers ----------------------------------------------------------- pub(super) const FIRST_REPLY: &str = "FIRST_REPLY"; pub(super) const SUMMARY_TEXT: &str = "SUMMARY_ONLY_CONTEXT"; const THIRD_USER_MSG: &str = "next turn"; const AUTO_SUMMARY_TEXT: &str = "AUTO_SUMMARY"; const FIRST_AUTO_MSG: &str = "token limit start"; const SECOND_AUTO_MSG: &str = "token limit push"; const MULTI_AUTO_MSG: &str = "multi auto"; const SECOND_LARGE_REPLY: &str = "SECOND_LARGE_REPLY"; const FIRST_AUTO_SUMMARY: &str = "FIRST_AUTO_SUMMARY"; const SECOND_AUTO_SUMMARY: &str = "SECOND_AUTO_SUMMARY"; const FINAL_REPLY: &str = "FINAL_REPLY"; const CONTEXT_LIMIT_MESSAGE: &str = "Your input exceeds the context window of this model. Please adjust your input and try again."; const DUMMY_FUNCTION_NAME: &str = "unsupported_tool"; const DUMMY_CALL_ID: &str = "call-multi-auto"; const FUNCTION_CALL_LIMIT_MSG: &str = "function call limit push"; const POST_AUTO_USER_MSG: &str = "post auto follow-up"; pub(super) const COMPACT_WARNING_MESSAGE: &str = "Heads up: Long conversations and multiple compactions can cause the model to be less accurate. Start a new conversation when possible to keep conversations small and targeted."; fn auto_summary(summary: &str) -> String { summary.to_string() } fn summary_with_prefix(summary: &str) -> String { format!("{SUMMARY_PREFIX}\n{summary}") } fn drop_call_id(value: &mut serde_json::Value) { match value { serde_json::Value::Object(obj) => { obj.retain(|k, _| k != "call_id"); for v in obj.values_mut() { drop_call_id(v); } } serde_json::Value::Array(arr) => { for v in arr { drop_call_id(v); } } _ => {} } } fn set_test_compact_prompt(config: &mut Config) { config.compact_prompt = Some(SUMMARIZATION_PROMPT.to_string()); } fn body_contains_text(body: &str, text: &str) -> bool { body.contains(&json_fragment(text)) } fn json_fragment(text: &str) -> String { serde_json::to_string(text) .expect("serialize text to JSON") .trim_matches('"') .to_string() } fn non_openai_model_provider(server: &MockServer) -> ModelProviderInfo { let mut provider = built_in_model_providers()["openai"].clone(); provider.name = "OpenAI (test)".into(); provider.base_url = Some(format!("{}/v1", server.uri())); provider } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn summarize_context_three_requests_and_instructions() { skip_if_no_network!(); // Set up a mock server that we can inspect after the run. let server = start_mock_server().await; // SSE 1: assistant replies normally so it is recorded in history. let sse1 = sse(vec![ ev_assistant_message("m1", FIRST_REPLY), ev_completed("r1"), ]); // SSE 2: summarizer returns a summary message. let sse2 = sse(vec![ ev_assistant_message("m2", SUMMARY_TEXT), ev_completed("r2"), ]); // SSE 3: minimal completed; we only need to capture the request body. let sse3 = sse(vec![ev_completed("r3")]); // Mount the three expected requests in sequence so the assertions below can // inspect them without relying on specific prompt markers. let request_log = mount_sse_sequence(&server, vec![sse1, sse2, sse3]).await; // Build config pointing to the mock server and spawn Codex. let model_provider = non_openai_model_provider(&server); let home = TempDir::new().unwrap(); let mut config = load_default_config_for_test(&home).await; config.model_provider = model_provider; set_test_compact_prompt(&mut config); config.model_auto_compact_token_limit = Some(200_000); let conversation_manager = ConversationManager::with_models_provider( CodexAuth::from_api_key("dummy"), config.model_provider.clone(), ); let NewConversation { conversation: codex, session_configured, .. } = conversation_manager.new_conversation(config).await.unwrap(); let rollout_path = session_configured.rollout_path; // 1) Normal user input – should hit server once. codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello world".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; // 2) Summarize – second hit should include the summarization prompt. codex.submit(Op::Compact).await.unwrap(); let warning_event = wait_for_event(&codex, |ev| matches!(ev, EventMsg::Warning(_))).await; let EventMsg::Warning(WarningEvent { message }) = warning_event else { panic!("expected warning event after compact"); }; assert_eq!(message, COMPACT_WARNING_MESSAGE); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; // 3) Next user input – third hit; history should include only the summary. codex .submit(Op::UserInput { items: vec![UserInput::Text { text: THIRD_USER_MSG.into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; // Inspect the three captured requests. let requests = request_log.requests(); assert_eq!(requests.len(), 3, "expected exactly three requests"); let body1 = requests[0].body_json(); let body2 = requests[1].body_json(); let body3 = requests[2].body_json(); // Manual compact should keep the baseline developer instructions. let instr1 = body1.get("instructions").and_then(|v| v.as_str()).unwrap(); let instr2 = body2.get("instructions").and_then(|v| v.as_str()).unwrap(); assert_eq!( instr1, instr2, "manual compact should keep the standard developer instructions" ); // The summarization request should include the injected user input marker. let body2_str = body2.to_string(); let input2 = body2.get("input").and_then(|v| v.as_array()).unwrap(); let has_compact_prompt = body_contains_text(&body2_str, SUMMARIZATION_PROMPT); assert!( has_compact_prompt, "compaction request should include the summarize trigger" ); // The last item is the user message created from the injected input. let last2 = input2.last().unwrap(); assert_eq!(last2.get("type").unwrap().as_str().unwrap(), "message"); assert_eq!(last2.get("role").unwrap().as_str().unwrap(), "user"); let text2 = last2["content"][0]["text"].as_str().unwrap(); assert_eq!( text2, SUMMARIZATION_PROMPT, "expected summarize trigger, got `{text2}`" ); // Third request must contain the refreshed instructions, compacted user history, and new user message. let input3 = body3.get("input").and_then(|v| v.as_array()).unwrap(); assert!( input3.len() >= 3, "expected refreshed context and new user message in third request" ); let mut messages: Vec<(String, String)> = Vec::new(); let expected_summary_message = summary_with_prefix(SUMMARY_TEXT); for item in input3 { if let Some("message") = item.get("type").and_then(|v| v.as_str()) { let role = item .get("role") .and_then(|v| v.as_str()) .unwrap_or_default() .to_string(); let text = item .get("content") .and_then(|v| v.as_array()) .and_then(|arr| arr.first()) .and_then(|entry| entry.get("text")) .and_then(|v| v.as_str()) .unwrap_or_default() .to_string(); messages.push((role, text)); } } // No previous assistant messages should remain and the new user message is present. let assistant_count = messages.iter().filter(|(r, _)| r == "assistant").count(); assert_eq!(assistant_count, 0, "assistant history should be cleared"); assert!( messages .iter() .any(|(r, t)| r == "user" && t == THIRD_USER_MSG), "third request should include the new user message" ); assert!( messages .iter() .any(|(r, t)| r == "user" && t == "hello world"), "third request should include the original user message" ); assert!( messages .iter() .any(|(r, t)| r == "user" && t == &expected_summary_message), "third request should include the summary message" ); assert!( !messages .iter() .any(|(_, text)| text.contains(SUMMARIZATION_PROMPT)), "third request should not include the summarize trigger" ); // Shut down Codex to flush rollout entries before inspecting the file. codex.submit(Op::Shutdown).await.unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await; // Verify rollout contains APITurn entries for each API call and a Compacted entry. println!("rollout path: {}", rollout_path.display()); let text = std::fs::read_to_string(&rollout_path).unwrap_or_else(|e| { panic!( "failed to read rollout file {}: {e}", rollout_path.display() ) }); let mut api_turn_count = 0usize; let mut saw_compacted_summary = false; for line in text.lines() { let trimmed = line.trim(); if trimmed.is_empty() { continue; } let Ok(entry): Result<RolloutLine, _> = serde_json::from_str(trimmed) else { continue; }; match entry.item { RolloutItem::TurnContext(_) => { api_turn_count += 1; } RolloutItem::Compacted(ci) => { if ci.message == expected_summary_message { saw_compacted_summary = true; } } _ => {} } } assert!( api_turn_count == 3, "expected three APITurn entries in rollout" ); assert!( saw_compacted_summary, "expected a Compacted entry containing the summarizer output" ); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn manual_compact_uses_custom_prompt() { skip_if_no_network!(); let server = start_mock_server().await; let sse_stream = sse(vec![ev_completed("r1")]); mount_sse_once(&server, sse_stream).await; let custom_prompt = "Use this compact prompt instead"; let model_provider = non_openai_model_provider(&server); let home = TempDir::new().unwrap(); let mut config = load_default_config_for_test(&home).await; config.model_provider = model_provider; config.compact_prompt = Some(custom_prompt.to_string()); let conversation_manager = ConversationManager::with_models_provider( CodexAuth::from_api_key("dummy"), config.model_provider.clone(), ); let codex = conversation_manager .new_conversation(config) .await .expect("create conversation") .conversation; codex.submit(Op::Compact).await.expect("trigger compact"); let warning_event = wait_for_event(&codex, |ev| matches!(ev, EventMsg::Warning(_))).await; let EventMsg::Warning(WarningEvent { message }) = warning_event else { panic!("expected warning event after compact"); }; assert_eq!(message, COMPACT_WARNING_MESSAGE); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let requests = get_responses_requests(&server).await; let body = requests .iter() .find_map(|req| req.body_json::<serde_json::Value>().ok()) .expect("summary request body"); let input = body .get("input") .and_then(|v| v.as_array()) .expect("input array"); let mut found_custom_prompt = false; let mut found_default_prompt = false; for item in input { if item["type"].as_str() != Some("message") { continue; } let text = item["content"][0]["text"].as_str().unwrap_or_default(); if text == custom_prompt { found_custom_prompt = true; } if text == SUMMARIZATION_PROMPT { found_default_prompt = true; } } let used_prompt = found_custom_prompt || found_default_prompt; if used_prompt { assert!(found_custom_prompt, "custom prompt should be injected"); assert!( !found_default_prompt, "default prompt should be replaced when a compact prompt is used" ); } else { assert!( !found_default_prompt, "summarization prompt should not appear if compaction omits a prompt" ); } } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn manual_compact_emits_api_and_local_token_usage_events() { skip_if_no_network!(); let server = start_mock_server().await; // Compact run where the API reports zero tokens in usage. Our local // estimator should still compute a non-zero context size for the compacted // history. let sse_compact = sse(vec![ ev_assistant_message("m1", SUMMARY_TEXT), ev_completed_with_tokens("r1", 0), ]); mount_sse_once(&server, sse_compact).await; let model_provider = non_openai_model_provider(&server); let home = TempDir::new().unwrap(); let mut config = load_default_config_for_test(&home).await; config.model_provider = model_provider; set_test_compact_prompt(&mut config); let conversation_manager = ConversationManager::with_models_provider( CodexAuth::from_api_key("dummy"), config.model_provider.clone(), ); let NewConversation { conversation: codex, .. } = conversation_manager.new_conversation(config).await.unwrap(); // Trigger manual compact and collect TokenCount events for the compact turn. codex.submit(Op::Compact).await.unwrap(); // First TokenCount: from the compact API call (usage.total_tokens = 0). let first = wait_for_event_match(&codex, |ev| match ev { EventMsg::TokenCount(tc) => tc .info .as_ref() .map(|info| info.last_token_usage.total_tokens), _ => None, }) .await; // Second TokenCount: from the local post-compaction estimate. let last = wait_for_event_match(&codex, |ev| match ev { EventMsg::TokenCount(tc) => tc .info .as_ref() .map(|info| info.last_token_usage.total_tokens), _ => None, }) .await; // Ensure the compact task itself completes. wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; assert_eq!( first, 0, "expected first TokenCount from compact API usage to be zero" ); assert!( last > 0, "second TokenCount should reflect a non-zero estimated context size after compaction" ); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn multiple_auto_compact_per_task_runs_after_token_limit_hit() { skip_if_no_network!(); let server = start_mock_server().await; let non_openai_provider_name = non_openai_model_provider(&server).name; let codex = test_codex() .with_config(move |config| { config.model_provider.name = non_openai_provider_name; }) .build(&server) .await .expect("build codex") .codex; // user message let user_message = "create an app"; // Prepare the mock responses from the model // summary texts from model let first_summary_text = "The task is to create an app. I started to create a react app."; let second_summary_text = "The task is to create an app. I started to create a react app. then I realized that I need to create a node app."; let third_summary_text = "The task is to create an app. I started to create a react app. then I realized that I need to create a node app. then I realized that I need to create a python app."; // summary texts with prefix let prefixed_first_summary = summary_with_prefix(first_summary_text); let prefixed_second_summary = summary_with_prefix(second_summary_text); let prefixed_third_summary = summary_with_prefix(third_summary_text); // token used count after long work let token_count_used = 270_000; // token used count after compaction let token_count_used_after_compaction = 80000; // mock responses from the model let reasoning_response_1 = ev_reasoning_item("m1", &["I will create a react app"], &[]); let encrypted_content_1 = reasoning_response_1["item"]["encrypted_content"] .as_str() .unwrap(); // first chunk of work let model_reasoning_response_1_sse = sse(vec![ reasoning_response_1.clone(), ev_local_shell_call("r1-shell", "completed", vec!["echo", "make-react"]), ev_completed_with_tokens("r1", token_count_used), ]); // first compaction response let model_compact_response_1_sse = sse(vec![ ev_assistant_message("m2", first_summary_text), ev_completed_with_tokens("r2", token_count_used_after_compaction), ]); let reasoning_response_2 = ev_reasoning_item("m3", &["I will create a node app"], &[]); let encrypted_content_2 = reasoning_response_2["item"]["encrypted_content"] .as_str() .unwrap(); // second chunk of work let model_reasoning_response_2_sse = sse(vec![ reasoning_response_2.clone(), ev_local_shell_call("r3-shell", "completed", vec!["echo", "make-node"]), ev_completed_with_tokens("r3", token_count_used), ]); // second compaction response let model_compact_response_2_sse = sse(vec![ ev_assistant_message("m4", second_summary_text), ev_completed_with_tokens("r4", token_count_used_after_compaction), ]); let reasoning_response_3 = ev_reasoning_item("m6", &["I will create a python app"], &[]); let encrypted_content_3 = reasoning_response_3["item"]["encrypted_content"] .as_str() .unwrap(); // third chunk of work let model_reasoning_response_3_sse = sse(vec![ ev_reasoning_item("m6", &["I will create a python app"], &[]), ev_local_shell_call("r6-shell", "completed", vec!["echo", "make-python"]), ev_completed_with_tokens("r6", token_count_used), ]); // third compaction response let model_compact_response_3_sse = sse(vec![ ev_assistant_message("m7", third_summary_text), ev_completed_with_tokens("r7", token_count_used_after_compaction), ]); // final response let model_final_response_sse = sse(vec![ ev_assistant_message( "m8", "The task is to create an app. I started to create a react app. then I realized that I need to create a node app. then I realized that I need to create a python app.", ), ev_completed_with_tokens("r8", token_count_used_after_compaction + 1000), ]); // mount the mock responses from the model let bodies = vec![ model_reasoning_response_1_sse, model_compact_response_1_sse, model_reasoning_response_2_sse, model_compact_response_2_sse, model_reasoning_response_3_sse, model_compact_response_3_sse, model_final_response_sse, ]; mount_sse_sequence(&server, bodies).await; // Start the conversation with the user message codex .submit(Op::UserInput { items: vec![UserInput::Text { text: user_message.into(), }], }) .await .expect("submit user input"); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; // collect the requests payloads from the model let requests_payloads = get_responses_requests(&server).await; let body = requests_payloads[0] .body_json::<serde_json::Value>() .unwrap(); let input = body.get("input").and_then(|v| v.as_array()).unwrap(); fn normalize_inputs(values: &[serde_json::Value]) -> Vec<serde_json::Value> { values .iter() .filter(|value| { if value .get("type") .and_then(|ty| ty.as_str()) .is_some_and(|ty| ty == "function_call_output") { return false; } let text = value .get("content") .and_then(|content| content.as_array()) .and_then(|content| content.first()) .and_then(|item| item.get("text")) .and_then(|text| text.as_str()); // Ignore the cached UI prefix (project docs + skills) since it is not relevant to // compaction behavior and can change as bundled skills evolve. !text.is_some_and(|text| text.starts_with("# AGENTS.md instructions for ")) }) .cloned() .collect() } let initial_input = normalize_inputs(input); let environment_message = initial_input[0]["content"][0]["text"].as_str().unwrap(); // test 1: after compaction, we should have one environment message, one user message, and one user message with summary prefix let compaction_indices = [2, 4, 6]; let expected_summaries = [ prefixed_first_summary.as_str(), prefixed_second_summary.as_str(), prefixed_third_summary.as_str(), ]; for (i, expected_summary) in compaction_indices.into_iter().zip(expected_summaries) { let body = requests_payloads.clone()[i] .body_json::<serde_json::Value>() .unwrap(); let input = body.get("input").and_then(|v| v.as_array()).unwrap(); let input = normalize_inputs(input); assert_eq!(input.len(), 3); let environment_message = input[0]["content"][0]["text"].as_str().unwrap(); let user_message_received = input[1]["content"][0]["text"].as_str().unwrap(); let summary_message = input[2]["content"][0]["text"].as_str().unwrap(); assert_eq!(environment_message, environment_message); assert_eq!(user_message_received, user_message); assert_eq!( summary_message, expected_summary, "compaction request at index {i} should include the prefixed summary" ); } // test 2: the expected requests inputs should be as follows: let expected_requests_inputs = json!([ [ // 0: first request of the user message. { "content": [ { "text": environment_message, "type": "input_text" } ], "role": "user", "type": "message" }, { "content": [ { "text": "create an app", "type": "input_text" } ], "role": "user", "type": "message" } ] , [ // 1: first automatic compaction request. { "content": [ { "text": environment_message, "type": "input_text" } ], "role": "user", "type": "message" }, { "content": [ { "text": "create an app", "type": "input_text" } ], "role": "user", "type": "message" }, { "content": null, "encrypted_content": encrypted_content_1, "summary": [ { "text": "I will create a react app", "type": "summary_text" } ], "type": "reasoning" }, { "action": { "command": [ "echo", "make-react" ], "env": null, "timeout_ms": null, "type": "exec", "user": null, "working_directory": null }, "call_id": "r1-shell", "status": "completed", "type": "local_shell_call" }, { "call_id": "r1-shell", "output": "execution error: Io(Os { code: 2, kind: NotFound, message: \"No such file or directory\" })", "type": "function_call_output" }, { "content": [ { "text": SUMMARIZATION_PROMPT, "type": "input_text" } ], "role": "user", "type": "message" } ] , [ // 2: request after first automatic compaction. { "content": [ { "text": environment_message, "type": "input_text" } ], "role": "user", "type": "message" }, { "content": [ { "text": "create an app", "type": "input_text" } ], "role": "user", "type": "message" }, { "content": [ { "text": prefixed_first_summary.clone(), "type": "input_text" } ], "role": "user", "type": "message" } ] , [ // 3: request for second automatic compaction. { "content": [ { "text": environment_message, "type": "input_text" } ], "role": "user", "type": "message" }, { "content": [ { "text": "create an app", "type": "input_text" } ], "role": "user", "type": "message" }, { "content": [ { "text": prefixed_first_summary.clone(), "type": "input_text" } ], "role": "user", "type": "message" }, { "content": null, "encrypted_content": encrypted_content_2, "summary": [ { "text": "I will create a node app", "type": "summary_text" } ], "type": "reasoning" }, { "action": { "command": [ "echo", "make-node" ], "env": null, "timeout_ms": null, "type": "exec", "user": null, "working_directory": null }, "call_id": "r3-shell", "status": "completed", "type": "local_shell_call" }, { "call_id": "r3-shell", "output": "execution error: Io(Os { code: 2, kind: NotFound, message: \"No such file or directory\" })", "type": "function_call_output" }, { "content": [ { "text": SUMMARIZATION_PROMPT, "type": "input_text" } ], "role": "user", "type": "message" } ] , // 4: request after second automatic compaction. [ { "content": [ { "text": environment_message, "type": "input_text" } ], "role": "user", "type": "message" }, { "content": [ { "text": "create an app", "type": "input_text" } ], "role": "user", "type": "message" }, { "content": [ { "text": prefixed_second_summary.clone(), "type": "input_text" } ], "role": "user", "type": "message" } ] , [ // 5: request for third automatic compaction. { "content": [ { "text": environment_message, "type": "input_text" } ], "role": "user", "type": "message" }, { "content": [ { "text": "create an app", "type": "input_text" } ], "role": "user", "type": "message" }, { "content": [ { "text": prefixed_second_summary.clone(), "type": "input_text" } ], "role": "user", "type": "message" }, { "content": null, "encrypted_content": encrypted_content_3, "summary": [ { "text": "I will create a python app", "type": "summary_text" } ], "type": "reasoning" }, { "action": { "command": [ "echo", "make-python" ], "env": null, "timeout_ms": null, "type": "exec", "user": null, "working_directory": null }, "call_id": "r6-shell", "status": "completed", "type": "local_shell_call" }, { "call_id": "r6-shell", "output": "execution error: Io(Os { code: 2, kind: NotFound, message: \"No such file or directory\" })", "type": "function_call_output" }, { "content": [ { "text": SUMMARIZATION_PROMPT, "type": "input_text" } ], "role": "user", "type": "message" } ] , [ { // 6: request after third automatic compaction. "content": [ { "text": environment_message, "type": "input_text" } ], "role": "user", "type": "message" }, { "content": [ { "text": "create an app", "type": "input_text" } ], "role": "user", "type": "message" }, { "content": [ { "text": prefixed_third_summary.clone(), "type": "input_text" } ], "role": "user", "type": "message" } ] ]); for (i, request) in requests_payloads.iter().enumerate() { let body = request.body_json::<serde_json::Value>().unwrap(); let input = body.get("input").and_then(|v| v.as_array()).unwrap(); let expected_input = expected_requests_inputs[i].as_array().unwrap(); assert_eq!(normalize_inputs(input), normalize_inputs(expected_input)); } // test 3: the number of requests should be 7 assert_eq!(requests_payloads.len(), 7); } // Windows CI only: bump to 4 workers to prevent SSE/event starvation and test timeouts. #[cfg_attr(windows, tokio::test(flavor = "multi_thread", worker_threads = 4))] #[cfg_attr(not(windows), tokio::test(flavor = "multi_thread", worker_threads = 2))] async fn auto_compact_runs_after_token_limit_hit() { skip_if_no_network!(); let server = start_mock_server().await; let sse1 = sse(vec![
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/exec_policy.rs
codex-rs/core/tests/suite/exec_policy.rs
#![allow(clippy::unwrap_used, clippy::expect_used)] use anyhow::Result; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::user_input::UserInput; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_sse_once; use core_test_support::responses::sse; use core_test_support::responses::start_mock_server; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use serde_json::json; use std::fs; #[tokio::test] async fn execpolicy_blocks_shell_invocation() -> Result<()> { // TODO execpolicy doesn't parse powershell commands yet if cfg!(windows) { return Ok(()); } let mut builder = test_codex().with_config(|config| { let policy_path = config.codex_home.join("rules").join("policy.rules"); fs::create_dir_all( policy_path .parent() .expect("policy directory must have a parent"), ) .expect("create policy directory"); fs::write( &policy_path, r#"prefix_rule(pattern=["echo"], decision="forbidden")"#, ) .expect("write policy file"); }); let server = start_mock_server().await; let test = builder.build(&server).await?; let call_id = "shell-forbidden"; let args = json!({ "command": "echo blocked", "timeout_ms": 1_000, }); mount_sse_once( &server, sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), ) .await; mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ) .await; let session_model = test.session_configured.model.clone(); test.codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "run shell command".into(), }], final_output_json_schema: None, cwd: test.cwd_path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let EventMsg::ExecCommandEnd(end) = wait_for_event(&test.codex, |event| { matches!(event, EventMsg::ExecCommandEnd(_)) }) .await else { unreachable!() }; wait_for_event(&test.codex, |event| { matches!(event, EventMsg::TaskComplete(_)) }) .await; assert!( end.aggregated_output .contains("execpolicy forbids this command"), "unexpected output: {}", end.aggregated_output ); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/json_result.rs
codex-rs/core/tests/suite/json_result.rs
#![cfg(not(target_os = "windows"))] use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::user_input::UserInput; use core_test_support::responses; use core_test_support::skip_if_no_network; use core_test_support::test_codex::TestCodex; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use pretty_assertions::assert_eq; use responses::ev_assistant_message; use responses::ev_completed; use responses::sse; use responses::start_mock_server; const SCHEMA: &str = r#" { "type": "object", "properties": { "explanation": { "type": "string" }, "final_answer": { "type": "string" } }, "required": ["explanation", "final_answer"], "additionalProperties": false } "#; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn codex_returns_json_result_for_gpt5() -> anyhow::Result<()> { codex_returns_json_result("gpt-5.1".to_string()).await } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn codex_returns_json_result_for_gpt5_codex() -> anyhow::Result<()> { codex_returns_json_result("gpt-5.1-codex".to_string()).await } async fn codex_returns_json_result(model: String) -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let sse1 = sse(vec![ ev_assistant_message( "m2", r#"{"explanation": "explanation", "final_answer": "final_answer"}"#, ), ev_completed("r1"), ]); let expected_schema: serde_json::Value = serde_json::from_str(SCHEMA)?; let match_json_text_param = move |req: &wiremock::Request| { let body: serde_json::Value = serde_json::from_slice(&req.body).unwrap_or_default(); let Some(text) = body.get("text") else { return false; }; let Some(format) = text.get("format") else { return false; }; format.get("name") == Some(&serde_json::Value::String("codex_output_schema".into())) && format.get("type") == Some(&serde_json::Value::String("json_schema".into())) && format.get("strict") == Some(&serde_json::Value::Bool(true)) && format.get("schema") == Some(&expected_schema) }; responses::mount_sse_once_match(&server, match_json_text_param, sse1).await; let TestCodex { codex, cwd, .. } = test_codex().build(&server).await?; // 1) Normal user input – should hit server once. codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "hello world".into(), }], final_output_json_schema: Some(serde_json::from_str(SCHEMA)?), cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let message = wait_for_event(&codex, |ev| matches!(ev, EventMsg::AgentMessage(_))).await; if let EventMsg::AgentMessage(message) = message { let json: serde_json::Value = serde_json::from_str(&message.message)?; assert_eq!( json.get("explanation"), Some(&serde_json::Value::String("explanation".into())) ); assert_eq!( json.get("final_answer"), Some(&serde_json::Value::String("final_answer".into())) ); } else { anyhow::bail!("expected agent message event"); } Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/rollout_list_find.rs
codex-rs/core/tests/suite/rollout_list_find.rs
#![allow(clippy::unwrap_used, clippy::expect_used)] use std::io::Write; use std::path::Path; use std::path::PathBuf; use codex_core::find_conversation_path_by_id_str; use tempfile::TempDir; use uuid::Uuid; /// Create sessions/YYYY/MM/DD and write a minimal rollout file containing the /// provided conversation id in the SessionMeta line. Returns the absolute path. fn write_minimal_rollout_with_id(codex_home: &Path, id: Uuid) -> PathBuf { let sessions = codex_home.join("sessions/2024/01/01"); std::fs::create_dir_all(&sessions).unwrap(); let file = sessions.join(format!("rollout-2024-01-01T00-00-00-{id}.jsonl")); let mut f = std::fs::File::create(&file).unwrap(); // Minimal first line: session_meta with the id so content search can find it writeln!( f, "{}", serde_json::json!({ "timestamp": "2024-01-01T00:00:00.000Z", "type": "session_meta", "payload": { "id": id, "timestamp": "2024-01-01T00:00:00Z", "instructions": null, "cwd": ".", "originator": "test", "cli_version": "test", "model_provider": "test-provider" } }) ) .unwrap(); file } #[tokio::test] async fn find_locates_rollout_file_by_id() { let home = TempDir::new().unwrap(); let id = Uuid::new_v4(); let expected = write_minimal_rollout_with_id(home.path(), id); let found = find_conversation_path_by_id_str(home.path(), &id.to_string()) .await .unwrap(); assert_eq!(found.unwrap(), expected); } #[tokio::test] async fn find_handles_gitignore_covering_codex_home_directory() { let repo = TempDir::new().unwrap(); let codex_home = repo.path().join(".codex"); std::fs::create_dir_all(&codex_home).unwrap(); std::fs::write(repo.path().join(".gitignore"), ".codex/**\n").unwrap(); let id = Uuid::new_v4(); let expected = write_minimal_rollout_with_id(&codex_home, id); let found = find_conversation_path_by_id_str(&codex_home, &id.to_string()) .await .unwrap(); assert_eq!(found, Some(expected)); } #[tokio::test] async fn find_ignores_granular_gitignore_rules() { let home = TempDir::new().unwrap(); let id = Uuid::new_v4(); let expected = write_minimal_rollout_with_id(home.path(), id); std::fs::write(home.path().join("sessions/.gitignore"), "*.jsonl\n").unwrap(); let found = find_conversation_path_by_id_str(home.path(), &id.to_string()) .await .unwrap(); assert_eq!(found, Some(expected)); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/seatbelt.rs
codex-rs/core/tests/suite/seatbelt.rs
#![cfg(target_os = "macos")] //! Tests for the macOS sandboxing that are specific to Seatbelt. //! Tests that apply to both Mac and Linux sandboxing should go in sandbox.rs. use std::collections::HashMap; use std::path::Path; use std::path::PathBuf; use codex_core::protocol::SandboxPolicy; use codex_core::seatbelt::spawn_command_under_seatbelt; use codex_core::spawn::CODEX_SANDBOX_ENV_VAR; use codex_core::spawn::StdioPolicy; use tempfile::TempDir; struct TestScenario { repo_parent: PathBuf, file_outside_repo: PathBuf, repo_root: PathBuf, file_in_repo_root: PathBuf, file_in_dot_git_dir: PathBuf, } struct TestExpectations { file_outside_repo_is_writable: bool, file_in_repo_root_is_writable: bool, file_in_dot_git_dir_is_writable: bool, } impl TestScenario { async fn run_test(&self, policy: &SandboxPolicy, expectations: TestExpectations) { if std::env::var(CODEX_SANDBOX_ENV_VAR) == Ok("seatbelt".to_string()) { eprintln!("{CODEX_SANDBOX_ENV_VAR} is set to 'seatbelt', skipping test."); return; } assert_eq!( touch(&self.file_outside_repo, policy).await, expectations.file_outside_repo_is_writable ); assert_eq!( self.file_outside_repo.exists(), expectations.file_outside_repo_is_writable ); assert_eq!( touch(&self.file_in_repo_root, policy).await, expectations.file_in_repo_root_is_writable ); assert_eq!( self.file_in_repo_root.exists(), expectations.file_in_repo_root_is_writable ); assert_eq!( touch(&self.file_in_dot_git_dir, policy).await, expectations.file_in_dot_git_dir_is_writable ); assert_eq!( self.file_in_dot_git_dir.exists(), expectations.file_in_dot_git_dir_is_writable ); } } /// If the user has added a workspace root that is not a Git repo root, then /// the user has to specify `--skip-git-repo-check` or go through some /// interstitial that indicates they are taking on some risk because Git /// cannot be used to backup their work before the agent begins. /// /// Because the user has agreed to this risk, we do not try find all .git /// folders in the workspace and block them (though we could change our /// position on this in the future). #[tokio::test] async fn if_parent_of_repo_is_writable_then_dot_git_folder_is_writable() { let tmp = TempDir::new().expect("should be able to create temp dir"); let test_scenario = create_test_scenario(&tmp); let policy = SandboxPolicy::WorkspaceWrite { writable_roots: vec![test_scenario.repo_parent.as_path().try_into().unwrap()], network_access: false, exclude_tmpdir_env_var: true, exclude_slash_tmp: true, }; test_scenario .run_test( &policy, TestExpectations { file_outside_repo_is_writable: true, file_in_repo_root_is_writable: true, file_in_dot_git_dir_is_writable: true, }, ) .await; } /// When the writable root is the root of a Git repository (as evidenced by the /// presence of a .git folder), then the .git folder should be read-only if /// the policy is `WorkspaceWrite`. #[tokio::test] async fn if_git_repo_is_writable_root_then_dot_git_folder_is_read_only() { let tmp = TempDir::new().expect("should be able to create temp dir"); let test_scenario = create_test_scenario(&tmp); let policy = SandboxPolicy::WorkspaceWrite { writable_roots: vec![test_scenario.repo_root.as_path().try_into().unwrap()], network_access: false, exclude_tmpdir_env_var: true, exclude_slash_tmp: true, }; test_scenario .run_test( &policy, TestExpectations { file_outside_repo_is_writable: false, file_in_repo_root_is_writable: true, file_in_dot_git_dir_is_writable: false, }, ) .await; } /// Under DangerFullAccess, all writes should be permitted anywhere on disk, /// including inside the .git folder. #[tokio::test] async fn danger_full_access_allows_all_writes() { let tmp = TempDir::new().expect("should be able to create temp dir"); let test_scenario = create_test_scenario(&tmp); let policy = SandboxPolicy::DangerFullAccess; test_scenario .run_test( &policy, TestExpectations { file_outside_repo_is_writable: true, file_in_repo_root_is_writable: true, file_in_dot_git_dir_is_writable: true, }, ) .await; } /// Under ReadOnly, writes should not be permitted anywhere on disk. #[tokio::test] async fn read_only_forbids_all_writes() { let tmp = TempDir::new().expect("should be able to create temp dir"); let test_scenario = create_test_scenario(&tmp); let policy = SandboxPolicy::ReadOnly; test_scenario .run_test( &policy, TestExpectations { file_outside_repo_is_writable: false, file_in_repo_root_is_writable: false, file_in_dot_git_dir_is_writable: false, }, ) .await; } #[tokio::test] async fn openpty_works_under_seatbelt() { if std::env::var(CODEX_SANDBOX_ENV_VAR) == Ok("seatbelt".to_string()) { eprintln!("{CODEX_SANDBOX_ENV_VAR} is set to 'seatbelt', skipping test."); return; } if which::which("python3").is_err() { eprintln!("python3 not found in PATH, skipping test."); return; } let policy = SandboxPolicy::ReadOnly; let command_cwd = std::env::current_dir().expect("getcwd"); let sandbox_cwd = command_cwd.clone(); let mut child = spawn_command_under_seatbelt( vec![ "python3".to_string(), "-c".to_string(), r#"import os master, slave = os.openpty() os.write(slave, b"ping") assert os.read(master, 4) == b"ping""# .to_string(), ], command_cwd, &policy, sandbox_cwd.as_path(), StdioPolicy::RedirectForShellTool, HashMap::new(), ) .await .expect("should be able to spawn python under seatbelt"); let status = child .wait() .await .expect("should be able to wait for child process"); assert!(status.success(), "python exited with {status:?}"); } #[tokio::test] async fn java_home_finds_runtime_under_seatbelt() { if std::env::var(CODEX_SANDBOX_ENV_VAR) == Ok("seatbelt".to_string()) { eprintln!("{CODEX_SANDBOX_ENV_VAR} is set to 'seatbelt', skipping test."); return; } let java_home_path = Path::new("/usr/libexec/java_home"); if !java_home_path.exists() { eprintln!("/usr/libexec/java_home is not present, skipping test."); return; } let baseline_output = tokio::process::Command::new(java_home_path) .env_remove("JAVA_HOME") .output() .await .expect("should be able to invoke java_home outside seatbelt"); if !baseline_output.status.success() { eprintln!( "java_home exited with {:?} outside seatbelt, skipping test", baseline_output.status ); return; } let policy = SandboxPolicy::ReadOnly; let command_cwd = std::env::current_dir().expect("getcwd"); let sandbox_cwd = command_cwd.clone(); let mut env: HashMap<String, String> = std::env::vars().collect(); env.remove("JAVA_HOME"); env.remove(CODEX_SANDBOX_ENV_VAR); let child = spawn_command_under_seatbelt( vec![java_home_path.to_string_lossy().to_string()], command_cwd, &policy, sandbox_cwd.as_path(), StdioPolicy::RedirectForShellTool, env, ) .await .expect("should be able to spawn java_home under seatbelt"); let output = child .wait_with_output() .await .expect("should be able to wait for java_home child"); assert!( output.status.success(), "java_home under seatbelt exited with {:?}, stderr: {}", output.status, String::from_utf8_lossy(&output.stderr) ); let stdout = String::from_utf8_lossy(&output.stdout); assert!( !stdout.trim().is_empty(), "java_home stdout unexpectedly empty under seatbelt" ); } #[expect(clippy::expect_used)] fn create_test_scenario(tmp: &TempDir) -> TestScenario { let repo_parent = tmp.path().to_path_buf(); let repo_root = repo_parent.join("repo"); let dot_git_dir = repo_root.join(".git"); std::fs::create_dir(&repo_root).expect("should be able to create repo root"); std::fs::create_dir(&dot_git_dir).expect("should be able to create .git dir"); TestScenario { file_outside_repo: repo_parent.join("outside.txt"), repo_parent, file_in_repo_root: repo_root.join("repo_file.txt"), repo_root, file_in_dot_git_dir: dot_git_dir.join("dot_git_file.txt"), } } #[expect(clippy::expect_used)] /// Note that `path` must be absolute. async fn touch(path: &Path, policy: &SandboxPolicy) -> bool { assert!(path.is_absolute(), "Path must be absolute: {path:?}"); let command_cwd = std::env::current_dir().expect("getcwd"); let sandbox_cwd = command_cwd.clone(); let mut child = spawn_command_under_seatbelt( vec![ "/usr/bin/touch".to_string(), path.to_string_lossy().to_string(), ], command_cwd, policy, sandbox_cwd.as_path(), StdioPolicy::RedirectForShellTool, HashMap::new(), ) .await .expect("should be able to spawn command under seatbelt"); child .wait() .await .expect("should be able to wait for child process") .success() }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/user_notification.rs
codex-rs/core/tests/suite/user_notification.rs
#![cfg(not(target_os = "windows"))] use std::os::unix::fs::PermissionsExt; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_protocol::user_input::UserInput; use core_test_support::fs_wait; use core_test_support::responses; use core_test_support::skip_if_no_network; use core_test_support::test_codex::TestCodex; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use pretty_assertions::assert_eq; use serde_json::Value; use serde_json::json; use tempfile::TempDir; use responses::ev_assistant_message; use responses::ev_completed; use responses::sse; use responses::start_mock_server; use std::time::Duration; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[ignore = "flaky on ubuntu-24.04-arm - aarch64-unknown-linux-gnu"] // The notify script gets far enough to create (and therefore surface) the file, // but hasn’t flushed the JSON yet. Reading an empty file produces EOF while parsing // a value at line 1 column 0. May be caused by a slow runner. async fn summarize_context_three_requests_and_instructions() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let sse1 = sse(vec![ev_assistant_message("m1", "Done"), ev_completed("r1")]); responses::mount_sse_once(&server, sse1).await; let notify_dir = TempDir::new()?; // write a script to the notify that touches a file next to it let notify_script = notify_dir.path().join("notify.sh"); std::fs::write( &notify_script, r#"#!/bin/bash set -e echo -n "${@: -1}" > $(dirname "${0}")/notify.txt"#, )?; std::fs::set_permissions(&notify_script, std::fs::Permissions::from_mode(0o755))?; let notify_file = notify_dir.path().join("notify.txt"); let notify_script_str = notify_script.to_str().unwrap().to_string(); let TestCodex { codex, .. } = test_codex() .with_config(move |cfg| cfg.notify = Some(vec![notify_script_str])) .build(&server) .await?; // 1) Normal user input – should hit server once. codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello world".into(), }], }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; // We fork the notify script, so we need to wait for it to write to the file. fs_wait::wait_for_path_exists(&notify_file, Duration::from_secs(5)).await?; let notify_payload_raw = tokio::fs::read_to_string(&notify_file).await?; let payload: Value = serde_json::from_str(&notify_payload_raw)?; assert_eq!(payload["type"], json!("agent-turn-complete")); assert_eq!(payload["input-messages"], json!(["hello world"])); assert_eq!(payload["last-assistant-message"], json!("Done")); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/auth_refresh.rs
codex-rs/core/tests/suite/auth_refresh.rs
use anyhow::Context; use anyhow::Result; use base64::Engine; use chrono::Duration; use chrono::Utc; use codex_core::CodexAuth; use codex_core::auth::AuthCredentialsStoreMode; use codex_core::auth::AuthDotJson; use codex_core::auth::REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR; use codex_core::auth::RefreshTokenError; use codex_core::auth::load_auth_dot_json; use codex_core::auth::save_auth; use codex_core::error::RefreshTokenFailedReason; use codex_core::token_data::IdTokenInfo; use codex_core::token_data::TokenData; use core_test_support::skip_if_no_network; use pretty_assertions::assert_eq; use serde::Serialize; use serde_json::json; use std::ffi::OsString; use tempfile::TempDir; use wiremock::Mock; use wiremock::MockServer; use wiremock::ResponseTemplate; use wiremock::matchers::method; use wiremock::matchers::path; const INITIAL_ACCESS_TOKEN: &str = "initial-access-token"; const INITIAL_REFRESH_TOKEN: &str = "initial-refresh-token"; #[serial_test::serial(auth_refresh)] #[tokio::test] async fn refresh_token_succeeds_updates_storage() -> Result<()> { skip_if_no_network!(Ok(())); let server = MockServer::start().await; Mock::given(method("POST")) .and(path("/oauth/token")) .respond_with(ResponseTemplate::new(200).set_body_json(json!({ "access_token": "new-access-token", "refresh_token": "new-refresh-token" }))) .expect(1) .mount(&server) .await; let ctx = RefreshTokenTestContext::new(&server)?; let auth = ctx.auth.clone(); let access = auth .refresh_token() .await .context("refresh should succeed")?; assert_eq!(access, "new-access-token"); let stored = ctx.load_auth()?; let tokens = stored.tokens.as_ref().context("tokens should exist")?; assert_eq!(tokens.access_token, "new-access-token"); assert_eq!(tokens.refresh_token, "new-refresh-token"); let refreshed_at = stored .last_refresh .as_ref() .context("last_refresh should be recorded")?; assert!( *refreshed_at >= ctx.initial_last_refresh, "last_refresh should advance" ); let cached = auth .get_token_data() .await .context("token data should be cached")?; assert_eq!(cached.access_token, "new-access-token"); server.verify().await; Ok(()) } #[serial_test::serial(auth_refresh)] #[tokio::test] async fn refresh_token_returns_permanent_error_for_expired_refresh_token() -> Result<()> { skip_if_no_network!(Ok(())); let server = MockServer::start().await; Mock::given(method("POST")) .and(path("/oauth/token")) .respond_with(ResponseTemplate::new(401).set_body_json(json!({ "error": { "code": "refresh_token_expired" } }))) .expect(1) .mount(&server) .await; let ctx = RefreshTokenTestContext::new(&server)?; let auth = ctx.auth.clone(); let err = auth .refresh_token() .await .err() .context("refresh should fail")?; assert_eq!(err.failed_reason(), Some(RefreshTokenFailedReason::Expired)); let stored = ctx.load_auth()?; let tokens = stored.tokens.as_ref().context("tokens should remain")?; assert_eq!(tokens.access_token, INITIAL_ACCESS_TOKEN); assert_eq!(tokens.refresh_token, INITIAL_REFRESH_TOKEN); assert_eq!( *stored .last_refresh .as_ref() .context("last_refresh should remain unchanged")?, ctx.initial_last_refresh, ); server.verify().await; Ok(()) } #[serial_test::serial(auth_refresh)] #[tokio::test] async fn refresh_token_returns_transient_error_on_server_failure() -> Result<()> { skip_if_no_network!(Ok(())); let server = MockServer::start().await; Mock::given(method("POST")) .and(path("/oauth/token")) .respond_with(ResponseTemplate::new(500).set_body_json(json!({ "error": "temporary-failure" }))) .expect(1) .mount(&server) .await; let ctx = RefreshTokenTestContext::new(&server)?; let auth = ctx.auth.clone(); let err = auth .refresh_token() .await .err() .context("refresh should fail")?; assert!(matches!(err, RefreshTokenError::Transient(_))); assert_eq!(err.failed_reason(), None); let stored = ctx.load_auth()?; let tokens = stored.tokens.as_ref().context("tokens should remain")?; assert_eq!(tokens.access_token, INITIAL_ACCESS_TOKEN); assert_eq!(tokens.refresh_token, INITIAL_REFRESH_TOKEN); assert_eq!( *stored .last_refresh .as_ref() .context("last_refresh should remain unchanged")?, ctx.initial_last_refresh, ); server.verify().await; Ok(()) } struct RefreshTokenTestContext { codex_home: TempDir, auth: CodexAuth, initial_last_refresh: chrono::DateTime<Utc>, _env_guard: EnvGuard, } impl RefreshTokenTestContext { fn new(server: &MockServer) -> Result<Self> { let codex_home = TempDir::new()?; let initial_last_refresh = Utc::now() - Duration::days(1); let mut id_token = IdTokenInfo::default(); id_token.raw_jwt = minimal_jwt(); let tokens = TokenData { id_token, access_token: INITIAL_ACCESS_TOKEN.to_string(), refresh_token: INITIAL_REFRESH_TOKEN.to_string(), account_id: Some("account-id".to_string()), }; let auth_dot_json = AuthDotJson { openai_api_key: None, tokens: Some(tokens), last_refresh: Some(initial_last_refresh), }; save_auth( codex_home.path(), &auth_dot_json, AuthCredentialsStoreMode::File, )?; let endpoint = format!("{}/oauth/token", server.uri()); let env_guard = EnvGuard::set(REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR, endpoint); let auth = CodexAuth::from_auth_storage(codex_home.path(), AuthCredentialsStoreMode::File)? .context("auth should load from storage")?; Ok(Self { codex_home, auth, initial_last_refresh, _env_guard: env_guard, }) } fn load_auth(&self) -> Result<AuthDotJson> { load_auth_dot_json(self.codex_home.path(), AuthCredentialsStoreMode::File) .context("load auth.json")? .context("auth.json should exist") } } struct EnvGuard { key: &'static str, original: Option<OsString>, } impl EnvGuard { fn set(key: &'static str, value: String) -> Self { let original = std::env::var_os(key); // SAFETY: these tests execute serially, so updating the process environment is safe. unsafe { std::env::set_var(key, &value); } Self { key, original } } } impl Drop for EnvGuard { fn drop(&mut self) { // SAFETY: the guard restores the original environment value before other tests run. unsafe { match &self.original { Some(value) => std::env::set_var(self.key, value), None => std::env::remove_var(self.key), } } } } fn minimal_jwt() -> String { #[derive(Serialize)] struct Header { alg: &'static str, typ: &'static str, } let header = Header { alg: "none", typ: "JWT", }; let payload = json!({ "sub": "user-123" }); fn b64(data: &[u8]) -> String { base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(data) } let header_bytes = match serde_json::to_vec(&header) { Ok(bytes) => bytes, Err(err) => panic!("serialize header: {err}"), }; let payload_bytes = match serde_json::to_vec(&payload) { Ok(bytes) => bytes, Err(err) => panic!("serialize payload: {err}"), }; let header_b64 = b64(&header_bytes); let payload_b64 = b64(&payload_bytes); let signature_b64 = b64(b"sig"); format!("{header_b64}.{payload_b64}.{signature_b64}") }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/live_cli.rs
codex-rs/core/tests/suite/live_cli.rs
#![expect(clippy::expect_used)] //! Optional smoke tests that hit the real OpenAI /v1/responses endpoint. They are `#[ignore]` by //! default so CI stays deterministic and free. Developers can run them locally with //! `cargo test --test live_cli -- --ignored` provided they set a valid `OPENAI_API_KEY`. use assert_cmd::prelude::*; use predicates::prelude::*; use std::process::Command; use std::process::Stdio; use tempfile::TempDir; fn require_api_key() -> String { std::env::var("OPENAI_API_KEY") .expect("OPENAI_API_KEY env var not set — skip running live tests") } /// Helper that spawns the binary inside a TempDir with minimal flags. Returns (Assert, TempDir). fn run_live(prompt: &str) -> (assert_cmd::assert::Assert, TempDir) { #![expect(clippy::unwrap_used)] use std::io::Read; use std::io::Write; use std::thread; let dir = TempDir::new().unwrap(); // Build a plain `std::process::Command` so we have full control over the underlying stdio // handles. `assert_cmd`’s own `Command` wrapper always forces stdout/stderr to be piped // internally which prevents us from streaming them live to the terminal (see its `spawn` // implementation). Instead we configure the std `Command` ourselves, then later hand the // resulting `Output` to `assert_cmd` for the familiar assertions. let mut cmd = Command::new(codex_utils_cargo_bin::cargo_bin("codex-rs").unwrap()); cmd.current_dir(dir.path()); cmd.env("OPENAI_API_KEY", require_api_key()); // We want three things at once: // 1. live streaming of the child’s stdout/stderr while the test is running // 2. captured output so we can keep using assert_cmd’s `Assert` helpers // 3. cross‑platform behavior (best effort) // // To get that we: // • set both stdout and stderr to `piped()` so we can read them programmatically // • spawn a thread for each stream that copies bytes into two sinks: // – the parent process’ stdout/stderr for live visibility // – an in‑memory buffer so we can pass it to `assert_cmd` later // Pass the prompt through the `--` separator so the CLI knows when user input ends. cmd.arg("--allow-no-git-exec") .arg("-v") .arg("--") .arg(prompt); cmd.stdin(Stdio::piped()); cmd.stdout(Stdio::piped()); cmd.stderr(Stdio::piped()); let mut child = cmd.spawn().expect("failed to spawn codex-rs"); // Send the terminating newline so Session::run exits after the first turn. child .stdin .as_mut() .expect("child stdin unavailable") .write_all(b"\n") .expect("failed to write to child stdin"); // Helper that tees a ChildStdout/ChildStderr into both the parent’s stdio and a Vec<u8>. fn tee<R: Read + Send + 'static>( mut reader: R, mut writer: impl Write + Send + 'static, ) -> thread::JoinHandle<Vec<u8>> { thread::spawn(move || { let mut buf = Vec::new(); let mut chunk = [0u8; 4096]; loop { match reader.read(&mut chunk) { Ok(0) => break, Ok(n) => { writer.write_all(&chunk[..n]).ok(); writer.flush().ok(); buf.extend_from_slice(&chunk[..n]); } Err(_) => break, } } buf }) } let stdout_handle = tee( child.stdout.take().expect("child stdout"), std::io::stdout(), ); let stderr_handle = tee( child.stderr.take().expect("child stderr"), std::io::stderr(), ); let status = child.wait().expect("failed to wait on child"); let stdout = stdout_handle.join().expect("stdout thread panicked"); let stderr = stderr_handle.join().expect("stderr thread panicked"); let output = std::process::Output { status, stdout, stderr, }; (output.assert(), dir) } #[ignore] #[test] fn live_create_file_hello_txt() { if std::env::var("OPENAI_API_KEY").is_err() { eprintln!("skipping live_create_file_hello_txt – OPENAI_API_KEY not set"); return; } let (assert, dir) = run_live( "Use the shell tool with the apply_patch command to create a file named hello.txt containing the text 'hello'.", ); assert.success(); let path = dir.path().join("hello.txt"); assert!(path.exists(), "hello.txt was not created by the model"); let contents = std::fs::read_to_string(path).unwrap(); assert_eq!(contents.trim(), "hello"); } #[ignore] #[test] fn live_print_working_directory() { if std::env::var("OPENAI_API_KEY").is_err() { eprintln!("skipping live_print_working_directory – OPENAI_API_KEY not set"); return; } let (assert, dir) = run_live("Print the current working directory using the shell function."); assert .success() .stdout(predicate::str::contains(dir.path().to_string_lossy())); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/deprecation_notice.rs
codex-rs/core/tests/suite/deprecation_notice.rs
#![cfg(not(target_os = "windows"))] use anyhow::Ok; use codex_core::features::Feature; use codex_core::protocol::DeprecationNoticeEvent; use codex_core::protocol::EventMsg; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::TestCodex; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event_match; use pretty_assertions::assert_eq; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn emits_deprecation_notice_for_legacy_feature_flag() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_config(|config| { config.features.enable(Feature::UnifiedExec); config .features .record_legacy_usage_force("use_experimental_unified_exec_tool", Feature::UnifiedExec); config.use_experimental_unified_exec_tool = true; }); let TestCodex { codex, .. } = builder.build(&server).await?; let notice = wait_for_event_match(&codex, |event| match event { EventMsg::DeprecationNotice(ev) => Some(ev.clone()), _ => None, }) .await; let DeprecationNoticeEvent { summary, details } = notice; assert_eq!( summary, "`use_experimental_unified_exec_tool` is deprecated. Use `[features].unified_exec` instead." .to_string(), ); assert_eq!( details.as_deref(), Some( "Enable it with `--enable unified_exec` or `[features].unified_exec` in config.toml. See https://github.com/openai/codex/blob/main/docs/config.md#feature-flags for details." ), ); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/client.rs
codex-rs/core/tests/suite/client.rs
use codex_core::AuthManager; use codex_core::CodexAuth; use codex_core::ContentItem; use codex_core::ConversationManager; use codex_core::LocalShellAction; use codex_core::LocalShellExecAction; use codex_core::LocalShellStatus; use codex_core::ModelClient; use codex_core::ModelProviderInfo; use codex_core::NewConversation; use codex_core::Prompt; use codex_core::ResponseEvent; use codex_core::ResponseItem; use codex_core::WireApi; use codex_core::auth::AuthCredentialsStoreMode; use codex_core::built_in_model_providers; use codex_core::error::CodexErr; use codex_core::features::Feature; use codex_core::models_manager::manager::ModelsManager; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::SessionSource; use codex_otel::otel_manager::OtelManager; use codex_protocol::ConversationId; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::config_types::Verbosity; use codex_protocol::models::ReasoningItemContent; use codex_protocol::models::ReasoningItemReasoningSummary; use codex_protocol::models::WebSearchAction; use codex_protocol::openai_models::ReasoningEffort; use codex_protocol::user_input::UserInput; use core_test_support::load_default_config_for_test; use core_test_support::load_sse_fixture_with_id; use core_test_support::responses::ev_completed_with_tokens; use core_test_support::responses::get_responses_requests; use core_test_support::responses::mount_sse_once; use core_test_support::responses::mount_sse_once_match; use core_test_support::responses::sse; use core_test_support::responses::sse_failed; use core_test_support::skip_if_no_network; use core_test_support::test_codex::TestCodex; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use dunce::canonicalize as normalize_path; use futures::StreamExt; use serde_json::json; use std::io::Write; use std::sync::Arc; use tempfile::TempDir; use uuid::Uuid; use wiremock::Mock; use wiremock::MockServer; use wiremock::ResponseTemplate; use wiremock::matchers::body_string_contains; use wiremock::matchers::header_regex; use wiremock::matchers::method; use wiremock::matchers::path; use wiremock::matchers::query_param; /// Build minimal SSE stream with completed marker using the JSON fixture. fn sse_completed(id: &str) -> String { load_sse_fixture_with_id("tests/fixtures/completed_template.json", id) } #[expect(clippy::unwrap_used)] fn assert_message_role(request_body: &serde_json::Value, role: &str) { assert_eq!(request_body["role"].as_str().unwrap(), role); } #[expect(clippy::expect_used)] fn assert_message_equals(request_body: &serde_json::Value, text: &str) { let content = request_body["content"][0]["text"] .as_str() .expect("invalid message content"); assert_eq!( content, text, "expected message content '{content}' to equal '{text}'" ); } #[expect(clippy::expect_used)] fn assert_message_starts_with(request_body: &serde_json::Value, text: &str) { let content = request_body["content"][0]["text"] .as_str() .expect("invalid message content"); assert!( content.starts_with(text), "expected message content '{content}' to start with '{text}'" ); } #[expect(clippy::expect_used)] fn assert_message_ends_with(request_body: &serde_json::Value, text: &str) { let content = request_body["content"][0]["text"] .as_str() .expect("invalid message content"); assert!( content.ends_with(text), "expected message content '{content}' to end with '{text}'" ); } /// Writes an `auth.json` into the provided `codex_home` with the specified parameters. /// Returns the fake JWT string written to `tokens.id_token`. #[expect(clippy::unwrap_used)] fn write_auth_json( codex_home: &TempDir, openai_api_key: Option<&str>, chatgpt_plan_type: &str, access_token: &str, account_id: Option<&str>, ) -> String { use base64::Engine as _; let header = json!({ "alg": "none", "typ": "JWT" }); let payload = json!({ "email": "user@example.com", "https://api.openai.com/auth": { "chatgpt_plan_type": chatgpt_plan_type, "chatgpt_account_id": account_id.unwrap_or("acc-123") } }); let b64 = |b: &[u8]| base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(b); let header_b64 = b64(&serde_json::to_vec(&header).unwrap()); let payload_b64 = b64(&serde_json::to_vec(&payload).unwrap()); let signature_b64 = b64(b"sig"); let fake_jwt = format!("{header_b64}.{payload_b64}.{signature_b64}"); let mut tokens = json!({ "id_token": fake_jwt, "access_token": access_token, "refresh_token": "refresh-test", }); if let Some(acc) = account_id { tokens["account_id"] = json!(acc); } let auth_json = json!({ "OPENAI_API_KEY": openai_api_key, "tokens": tokens, // RFC3339 datetime; value doesn't matter for these tests "last_refresh": chrono::Utc::now(), }); std::fs::write( codex_home.path().join("auth.json"), serde_json::to_string_pretty(&auth_json).unwrap(), ) .unwrap(); fake_jwt } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn resume_includes_initial_messages_and_sends_prior_items() { skip_if_no_network!(); // Create a fake rollout session file with prior user + system + assistant messages. let tmpdir = TempDir::new().unwrap(); let session_path = tmpdir.path().join("resume-session.jsonl"); let mut f = std::fs::File::create(&session_path).unwrap(); let convo_id = Uuid::new_v4(); writeln!( f, "{}", json!({ "timestamp": "2024-01-01T00:00:00.000Z", "type": "session_meta", "payload": { "id": convo_id, "timestamp": "2024-01-01T00:00:00Z", "instructions": "be nice", "cwd": ".", "originator": "test_originator", "cli_version": "test_version", "model_provider": "test-provider" } }) ) .unwrap(); // Prior item: user message (should be delivered) let prior_user = codex_protocol::models::ResponseItem::Message { id: None, role: "user".to_string(), content: vec![codex_protocol::models::ContentItem::InputText { text: "resumed user message".to_string(), }], }; let prior_user_json = serde_json::to_value(&prior_user).unwrap(); writeln!( f, "{}", json!({ "timestamp": "2024-01-01T00:00:01.000Z", "type": "response_item", "payload": prior_user_json }) ) .unwrap(); // Prior item: system message (excluded from API history) let prior_system = codex_protocol::models::ResponseItem::Message { id: None, role: "system".to_string(), content: vec![codex_protocol::models::ContentItem::OutputText { text: "resumed system instruction".to_string(), }], }; let prior_system_json = serde_json::to_value(&prior_system).unwrap(); writeln!( f, "{}", json!({ "timestamp": "2024-01-01T00:00:02.000Z", "type": "response_item", "payload": prior_system_json }) ) .unwrap(); // Prior item: assistant message let prior_item = codex_protocol::models::ResponseItem::Message { id: None, role: "assistant".to_string(), content: vec![codex_protocol::models::ContentItem::OutputText { text: "resumed assistant message".to_string(), }], }; let prior_item_json = serde_json::to_value(&prior_item).unwrap(); writeln!( f, "{}", json!({ "timestamp": "2024-01-01T00:00:03.000Z", "type": "response_item", "payload": prior_item_json }) ) .unwrap(); drop(f); // Mock server that will receive the resumed request let server = MockServer::start().await; let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await; // Configure Codex to resume from our file let model_provider = ModelProviderInfo { base_url: Some(format!("{}/v1", server.uri())), ..built_in_model_providers()["openai"].clone() }; let codex_home = TempDir::new().unwrap(); let mut config = load_default_config_for_test(&codex_home).await; config.model_provider = model_provider; // Also configure user instructions to ensure they are NOT delivered on resume. config.user_instructions = Some("be nice".to_string()); let conversation_manager = ConversationManager::with_models_provider_and_home( CodexAuth::from_api_key("Test API Key"), config.model_provider.clone(), config.codex_home.clone(), ); let auth_manager = codex_core::AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key")); let NewConversation { conversation: codex, session_configured, .. } = conversation_manager .resume_conversation_from_rollout(config, session_path.clone(), auth_manager) .await .expect("resume conversation"); // 1) Assert initial_messages only includes existing EventMsg entries; response items are not converted let initial_msgs = session_configured .initial_messages .clone() .expect("expected initial messages option for resumed session"); let initial_json = serde_json::to_value(&initial_msgs).unwrap(); let expected_initial_json = json!([]); assert_eq!(initial_json, expected_initial_json); // 2) Submit new input; the request body must include the prior item followed by the new user input. codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let request = resp_mock.single_request(); let request_body = request.body_json(); let expected_input = json!([ { "type": "message", "role": "user", "content": [{ "type": "input_text", "text": "resumed user message" }] }, { "type": "message", "role": "assistant", "content": [{ "type": "output_text", "text": "resumed assistant message" }] }, { "type": "message", "role": "user", "content": [{ "type": "input_text", "text": "hello" }] } ]); assert_eq!(request_body["input"], expected_input); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn includes_conversation_id_and_model_headers_in_request() { skip_if_no_network!(); // Mock server let server = MockServer::start().await; // First request – must NOT include `previous_response_id`. let first = ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_raw(sse_completed("resp1"), "text/event-stream"); Mock::given(method("POST")) .and(path("/v1/responses")) .respond_with(first) .expect(1) .mount(&server) .await; let model_provider = ModelProviderInfo { base_url: Some(format!("{}/v1", server.uri())), ..built_in_model_providers()["openai"].clone() }; // Init session let codex_home = TempDir::new().unwrap(); let mut config = load_default_config_for_test(&codex_home).await; config.model_provider = model_provider; let conversation_manager = ConversationManager::with_models_provider_and_home( CodexAuth::from_api_key("Test API Key"), config.model_provider.clone(), config.codex_home.clone(), ); let NewConversation { conversation: codex, conversation_id, session_configured: _, } = conversation_manager .new_conversation(config) .await .expect("create new conversation"); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; // get request from the server let requests = get_responses_requests(&server).await; let request = requests .first() .expect("expected POST request to /responses"); let request_conversation_id = request.headers.get("conversation_id").unwrap(); let request_authorization = request.headers.get("authorization").unwrap(); let request_originator = request.headers.get("originator").unwrap(); assert_eq!( request_conversation_id.to_str().unwrap(), conversation_id.to_string() ); assert_eq!(request_originator.to_str().unwrap(), "codex_cli_rs"); assert_eq!( request_authorization.to_str().unwrap(), "Bearer Test API Key" ); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn includes_base_instructions_override_in_request() { skip_if_no_network!(); // Mock server let server = MockServer::start().await; let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await; let model_provider = ModelProviderInfo { base_url: Some(format!("{}/v1", server.uri())), ..built_in_model_providers()["openai"].clone() }; let codex_home = TempDir::new().unwrap(); let mut config = load_default_config_for_test(&codex_home).await; config.base_instructions = Some("test instructions".to_string()); config.model_provider = model_provider; let conversation_manager = ConversationManager::with_models_provider_and_home( CodexAuth::from_api_key("Test API Key"), config.model_provider.clone(), config.codex_home.clone(), ); let codex = conversation_manager .new_conversation(config) .await .expect("create new conversation") .conversation; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let request = resp_mock.single_request(); let request_body = request.body_json(); assert!( request_body["instructions"] .as_str() .unwrap() .contains("test instructions") ); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn chatgpt_auth_sends_correct_request() { skip_if_no_network!(); // Mock server let server = MockServer::start().await; // First request – must NOT include `previous_response_id`. let first = ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_raw(sse_completed("resp1"), "text/event-stream"); Mock::given(method("POST")) .and(path("/api/codex/responses")) .respond_with(first) .expect(1) .mount(&server) .await; let model_provider = ModelProviderInfo { base_url: Some(format!("{}/api/codex", server.uri())), ..built_in_model_providers()["openai"].clone() }; // Init session let codex_home = TempDir::new().unwrap(); let mut config = load_default_config_for_test(&codex_home).await; config.model_provider = model_provider; let conversation_manager = ConversationManager::with_models_provider_and_home( create_dummy_codex_auth(), config.model_provider.clone(), config.codex_home.clone(), ); let NewConversation { conversation: codex, conversation_id, session_configured: _, } = conversation_manager .new_conversation(config) .await .expect("create new conversation"); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; // get request from the server let requests = get_responses_requests(&server).await; let request = requests .first() .expect("expected POST request to /responses"); let request_conversation_id = request.headers.get("conversation_id").unwrap(); let request_authorization = request.headers.get("authorization").unwrap(); let request_originator = request.headers.get("originator").unwrap(); let request_chatgpt_account_id = request.headers.get("chatgpt-account-id").unwrap(); let request_body = request.body_json::<serde_json::Value>().unwrap(); assert_eq!( request_conversation_id.to_str().unwrap(), conversation_id.to_string() ); assert_eq!(request_originator.to_str().unwrap(), "codex_cli_rs"); assert_eq!( request_authorization.to_str().unwrap(), "Bearer Access Token" ); assert_eq!(request_chatgpt_account_id.to_str().unwrap(), "account_id"); assert!(request_body["stream"].as_bool().unwrap()); assert_eq!( request_body["include"][0].as_str().unwrap(), "reasoning.encrypted_content" ); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() { skip_if_no_network!(); // Mock server let server = MockServer::start().await; let first = ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_raw(sse_completed("resp1"), "text/event-stream"); // Expect API key header, no ChatGPT account header required. Mock::given(method("POST")) .and(path("/v1/responses")) .and(header_regex("Authorization", r"Bearer sk-test-key")) .respond_with(first) .expect(1) .mount(&server) .await; let model_provider = ModelProviderInfo { base_url: Some(format!("{}/v1", server.uri())), ..built_in_model_providers()["openai"].clone() }; // Init session let codex_home = TempDir::new().unwrap(); // Write auth.json that contains both API key and ChatGPT tokens for a plan that should prefer ChatGPT, // but config will force API key preference. let _jwt = write_auth_json( &codex_home, Some("sk-test-key"), "pro", "Access-123", Some("acc-123"), ); let mut config = load_default_config_for_test(&codex_home).await; config.model_provider = model_provider; let auth_manager = match CodexAuth::from_auth_storage(codex_home.path(), AuthCredentialsStoreMode::File) { Ok(Some(auth)) => codex_core::AuthManager::from_auth_for_testing(auth), Ok(None) => panic!("No CodexAuth found in codex_home"), Err(e) => panic!("Failed to load CodexAuth: {e}"), }; let conversation_manager = ConversationManager::new(auth_manager, SessionSource::Exec); let NewConversation { conversation: codex, .. } = conversation_manager .new_conversation(config) .await .expect("create new conversation"); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn includes_user_instructions_message_in_request() { skip_if_no_network!(); let server = MockServer::start().await; let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await; let model_provider = ModelProviderInfo { base_url: Some(format!("{}/v1", server.uri())), ..built_in_model_providers()["openai"].clone() }; let codex_home = TempDir::new().unwrap(); let mut config = load_default_config_for_test(&codex_home).await; config.model_provider = model_provider; config.user_instructions = Some("be nice".to_string()); let conversation_manager = ConversationManager::with_models_provider_and_home( CodexAuth::from_api_key("Test API Key"), config.model_provider.clone(), config.codex_home.clone(), ); let codex = conversation_manager .new_conversation(config) .await .expect("create new conversation") .conversation; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let request = resp_mock.single_request(); let request_body = request.body_json(); assert!( !request_body["instructions"] .as_str() .unwrap() .contains("be nice") ); assert_message_role(&request_body["input"][0], "user"); assert_message_starts_with(&request_body["input"][0], "# AGENTS.md instructions for "); assert_message_ends_with(&request_body["input"][0], "</INSTRUCTIONS>"); let ui_text = request_body["input"][0]["content"][0]["text"] .as_str() .expect("invalid message content"); assert!(ui_text.contains("<INSTRUCTIONS>")); assert!(ui_text.contains("be nice")); assert_message_role(&request_body["input"][1], "user"); assert_message_starts_with(&request_body["input"][1], "<environment_context>"); assert_message_ends_with(&request_body["input"][1], "</environment_context>"); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn skills_append_to_instructions() { skip_if_no_network!(); let server = MockServer::start().await; let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await; let model_provider = ModelProviderInfo { base_url: Some(format!("{}/v1", server.uri())), ..built_in_model_providers()["openai"].clone() }; let codex_home = TempDir::new().unwrap(); let skill_dir = codex_home.path().join("skills/demo"); std::fs::create_dir_all(&skill_dir).expect("create skill dir"); std::fs::write( skill_dir.join("SKILL.md"), "---\nname: demo\ndescription: build charts\n---\n\n# body\n", ) .expect("write skill"); let mut config = load_default_config_for_test(&codex_home).await; config.model_provider = model_provider; config.cwd = codex_home.path().to_path_buf(); config.features.enable(Feature::Skills); let conversation_manager = ConversationManager::with_models_provider_and_home( CodexAuth::from_api_key("Test API Key"), config.model_provider.clone(), config.codex_home.clone(), ); let codex = conversation_manager .new_conversation(config) .await .expect("create new conversation") .conversation; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let request = resp_mock.single_request(); let request_body = request.body_json(); assert_message_role(&request_body["input"][0], "user"); let instructions_text = request_body["input"][0]["content"][0]["text"] .as_str() .expect("instructions text"); assert!( instructions_text.contains("## Skills"), "expected skills section present" ); assert!( instructions_text.contains("demo: build charts"), "expected skill summary" ); let expected_path = normalize_path(skill_dir.join("SKILL.md")).unwrap(); let expected_path_str = expected_path.to_string_lossy().replace('\\', "/"); assert!( instructions_text.contains(&expected_path_str), "expected path {expected_path_str} in instructions" ); let _codex_home_guard = codex_home; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn includes_configured_effort_in_request() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = MockServer::start().await; let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await; let TestCodex { codex, .. } = test_codex() .with_model("gpt-5.1-codex") .with_config(|config| { config.model_reasoning_effort = Some(ReasoningEffort::Medium); }) .build(&server) .await?; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let request = resp_mock.single_request(); let request_body = request.body_json(); assert_eq!( request_body .get("reasoning") .and_then(|t| t.get("effort")) .and_then(|v| v.as_str()), Some("medium") ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn includes_no_effort_in_request() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = MockServer::start().await; let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await; let TestCodex { codex, .. } = test_codex() .with_model("gpt-5.1-codex") .build(&server) .await?; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let request = resp_mock.single_request(); let request_body = request.body_json(); assert_eq!( request_body .get("reasoning") .and_then(|t| t.get("effort")) .and_then(|v| v.as_str()), None ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn includes_default_reasoning_effort_in_request_when_defined_by_model_family() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = MockServer::start().await; let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await; let TestCodex { codex, .. } = test_codex().with_model("gpt-5.1").build(&server).await?; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let request = resp_mock.single_request(); let request_body = request.body_json(); assert_eq!( request_body .get("reasoning") .and_then(|t| t.get("effort")) .and_then(|v| v.as_str()), Some("medium") ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn configured_reasoning_summary_is_sent() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = MockServer::start().await; let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await; let TestCodex { codex, .. } = test_codex() .with_config(|config| { config.model_reasoning_summary = ReasoningSummary::Concise; }) .build(&server) .await?; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let request = resp_mock.single_request(); let request_body = request.body_json(); pretty_assertions::assert_eq!( request_body .get("reasoning") .and_then(|reasoning| reasoning.get("summary")) .and_then(|value| value.as_str()), Some("concise") ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn reasoning_summary_is_omitted_when_disabled() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = MockServer::start().await; let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await; let TestCodex { codex, .. } = test_codex() .with_config(|config| { config.model_reasoning_summary = ReasoningSummary::None; }) .build(&server) .await?; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let request = resp_mock.single_request(); let request_body = request.body_json(); pretty_assertions::assert_eq!( request_body .get("reasoning") .and_then(|reasoning| reasoning.get("summary")), None ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn includes_default_verbosity_in_request() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = MockServer::start().await; let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await; let TestCodex { codex, .. } = test_codex().with_model("gpt-5.1").build(&server).await?; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let request = resp_mock.single_request(); let request_body = request.body_json(); assert_eq!( request_body .get("text") .and_then(|t| t.get("verbosity")) .and_then(|v| v.as_str()), Some("low") ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn configured_verbosity_not_sent_for_models_without_support() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = MockServer::start().await; let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await; let TestCodex { codex, .. } = test_codex() .with_model("gpt-5.1-codex") .with_config(|config| { config.model_verbosity = Some(Verbosity::High); }) .build(&server) .await?; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let request = resp_mock.single_request(); let request_body = request.body_json(); assert!( request_body .get("text") .and_then(|t| t.get("verbosity")) .is_none() ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn configured_verbosity_is_sent() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = MockServer::start().await; let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await; let TestCodex { codex, .. } = test_codex() .with_model("gpt-5.1") .with_config(|config| { config.model_verbosity = Some(Verbosity::High); }) .build(&server) .await?; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let request = resp_mock.single_request(); let request_body = request.body_json(); assert_eq!( request_body .get("text") .and_then(|t| t.get("verbosity")) .and_then(|v| v.as_str()), Some("high") ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn includes_developer_instructions_message_in_request() { skip_if_no_network!(); let server = MockServer::start().await; let resp_mock = mount_sse_once(&server, sse_completed("resp1")).await;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/grep_files.rs
codex-rs/core/tests/suite/grep_files.rs
#![cfg(not(target_os = "windows"))] use anyhow::Result; use core_test_support::responses::mount_function_call_agent_response; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::TestCodex; use core_test_support::test_codex::test_codex; use std::collections::HashSet; use std::path::Path; use std::process::Command as StdCommand; const MODEL_WITH_TOOL: &str = "test-gpt-5.1-codex"; fn ripgrep_available() -> bool { StdCommand::new("rg") .arg("--version") .output() .map(|output| output.status.success()) .unwrap_or(false) } macro_rules! skip_if_ripgrep_missing { ($ret:expr $(,)?) => {{ if !ripgrep_available() { eprintln!("rg not available in PATH; skipping test"); return $ret; } }}; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn grep_files_tool_collects_matches() -> Result<()> { skip_if_no_network!(Ok(())); skip_if_ripgrep_missing!(Ok(())); let server = start_mock_server().await; let test = build_test_codex(&server).await?; let search_dir = test.cwd.path().join("src"); std::fs::create_dir_all(&search_dir)?; let alpha = search_dir.join("alpha.rs"); let beta = search_dir.join("beta.rs"); let gamma = search_dir.join("gamma.txt"); std::fs::write(&alpha, "alpha needle\n")?; std::fs::write(&beta, "beta needle\n")?; std::fs::write(&gamma, "needle in text but excluded\n")?; let call_id = "grep-files-collect"; let arguments = serde_json::json!({ "pattern": "needle", "path": search_dir.to_string_lossy(), "include": "*.rs", }) .to_string(); let mocks = mount_function_call_agent_response(&server, call_id, &arguments, "grep_files").await; test.submit_turn("please find uses of needle").await?; let req = mocks.completion.single_request(); let (content_opt, success_opt) = req .function_call_output_content_and_success(call_id) .expect("tool output present"); let content = content_opt.expect("content present"); let success = success_opt.unwrap_or(true); assert!( success, "expected success for matches, got content={content}" ); let entries = collect_file_names(&content); assert_eq!(entries.len(), 2, "content: {content}"); assert!( entries.contains("alpha.rs"), "missing alpha.rs in {entries:?}" ); assert!( entries.contains("beta.rs"), "missing beta.rs in {entries:?}" ); assert!( !entries.contains("gamma.txt"), "txt file should be filtered out: {entries:?}" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn grep_files_tool_reports_empty_results() -> Result<()> { skip_if_no_network!(Ok(())); skip_if_ripgrep_missing!(Ok(())); let server = start_mock_server().await; let test = build_test_codex(&server).await?; let search_dir = test.cwd.path().join("logs"); std::fs::create_dir_all(&search_dir)?; std::fs::write(search_dir.join("output.txt"), "no hits here")?; let call_id = "grep-files-empty"; let arguments = serde_json::json!({ "pattern": "needle", "path": search_dir.to_string_lossy(), "limit": 5, }) .to_string(); let mocks = mount_function_call_agent_response(&server, call_id, &arguments, "grep_files").await; test.submit_turn("search again").await?; let req = mocks.completion.single_request(); let (content_opt, success_opt) = req .function_call_output_content_and_success(call_id) .expect("tool output present"); let content = content_opt.expect("content present"); if let Some(success) = success_opt { assert!(!success, "expected success=false content={content}"); } assert_eq!(content, "No matches found."); Ok(()) } #[allow(clippy::expect_used)] async fn build_test_codex(server: &wiremock::MockServer) -> Result<TestCodex> { let mut builder = test_codex().with_model(MODEL_WITH_TOOL); builder.build(server).await } fn collect_file_names(content: &str) -> HashSet<String> { content .lines() .filter_map(|line| { if line.trim().is_empty() { return None; } Path::new(line) .file_name() .map(|name| name.to_string_lossy().into_owned()) }) .collect() }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/approvals.rs
codex-rs/core/tests/suite/approvals.rs
#![allow(clippy::unwrap_used, clippy::expect_used)] use anyhow::Result; use codex_core::config::Constrained; use codex_core::features::Feature; use codex_core::protocol::ApplyPatchApprovalRequestEvent; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::ExecApprovalRequestEvent; use codex_core::protocol::ExecPolicyAmendment; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_core::sandboxing::SandboxPermissions; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::protocol::ReviewDecision; use codex_protocol::user_input::UserInput; use core_test_support::responses::ev_apply_patch_function_call; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_sse_once; use core_test_support::responses::sse; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::TestCodex; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use pretty_assertions::assert_eq; use regex_lite::Regex; use serde_json::Value; use serde_json::json; use std::env; use std::fs; use std::path::PathBuf; use wiremock::Mock; use wiremock::MockServer; use wiremock::ResponseTemplate; use wiremock::matchers::method; use wiremock::matchers::path; #[derive(Clone, Copy)] enum TargetPath { Workspace(&'static str), OutsideWorkspace(&'static str), } impl TargetPath { fn resolve_for_patch(self, test: &TestCodex) -> (PathBuf, String) { match self { TargetPath::Workspace(name) => { let path = test.cwd.path().join(name); (path, name.to_string()) } TargetPath::OutsideWorkspace(name) => { let path = env::current_dir() .expect("current dir should be available") .join(name); (path.clone(), path.display().to_string()) } } } } #[derive(Clone)] enum ActionKind { WriteFile { target: TargetPath, content: &'static str, }, FetchUrl { endpoint: &'static str, response_body: &'static str, }, RunCommand { command: &'static str, }, RunUnifiedExecCommand { command: &'static str, justification: Option<&'static str>, }, ApplyPatchFunction { target: TargetPath, content: &'static str, }, ApplyPatchShell { target: TargetPath, content: &'static str, }, } const DEFAULT_UNIFIED_EXEC_JUSTIFICATION: &str = "Requires escalated permissions to bypass the sandbox in tests."; impl ActionKind { async fn prepare( &self, test: &TestCodex, server: &MockServer, call_id: &str, sandbox_permissions: SandboxPermissions, ) -> Result<(Value, Option<String>)> { match self { ActionKind::WriteFile { target, content } => { let (path, _) = target.resolve_for_patch(test); let _ = fs::remove_file(&path); let command = format!("printf {content:?} > {path:?} && cat {path:?}"); let event = shell_event(call_id, &command, 1_000, sandbox_permissions)?; Ok((event, Some(command))) } ActionKind::FetchUrl { endpoint, response_body, } => { Mock::given(method("GET")) .and(path(*endpoint)) .respond_with( ResponseTemplate::new(200).set_body_string(response_body.to_string()), ) .mount(server) .await; let url = format!("{}{}", server.uri(), endpoint); let escaped_url = url.replace('\'', "\\'"); let script = format!( "import sys\nimport urllib.request\nurl = '{escaped_url}'\ntry:\n data = urllib.request.urlopen(url, timeout=2).read().decode()\n print('OK:' + data.strip())\nexcept Exception as exc:\n print('ERR:' + exc.__class__.__name__)\n sys.exit(1)", ); let command = format!("python3 -c \"{script}\""); let event = shell_event(call_id, &command, 5_000, sandbox_permissions)?; Ok((event, Some(command))) } ActionKind::RunCommand { command } => { let event = shell_event(call_id, command, 1_000, sandbox_permissions)?; Ok((event, Some(command.to_string()))) } ActionKind::RunUnifiedExecCommand { command, justification, } => { let event = exec_command_event( call_id, command, Some(1000), sandbox_permissions, *justification, )?; Ok((event, Some(command.to_string()))) } ActionKind::ApplyPatchFunction { target, content } => { let (path, patch_path) = target.resolve_for_patch(test); let _ = fs::remove_file(&path); let patch = build_add_file_patch(&patch_path, content); Ok((ev_apply_patch_function_call(call_id, &patch), None)) } ActionKind::ApplyPatchShell { target, content } => { let (path, patch_path) = target.resolve_for_patch(test); let _ = fs::remove_file(&path); let patch = build_add_file_patch(&patch_path, content); let command = shell_apply_patch_command(&patch); let event = shell_event(call_id, &command, 5_000, sandbox_permissions)?; Ok((event, Some(command))) } } } } fn build_add_file_patch(patch_path: &str, content: &str) -> String { format!("*** Begin Patch\n*** Add File: {patch_path}\n+{content}\n*** End Patch\n") } fn shell_apply_patch_command(patch: &str) -> String { let mut script = String::from("apply_patch <<'PATCH'\n"); script.push_str(patch); if !patch.ends_with('\n') { script.push('\n'); } script.push_str("PATCH\n"); script } fn shell_event( call_id: &str, command: &str, timeout_ms: u64, sandbox_permissions: SandboxPermissions, ) -> Result<Value> { let mut args = json!({ "command": command, "timeout_ms": timeout_ms, }); if sandbox_permissions.requires_escalated_permissions() { args["sandbox_permissions"] = json!(sandbox_permissions); } let args_str = serde_json::to_string(&args)?; Ok(ev_function_call(call_id, "shell_command", &args_str)) } fn exec_command_event( call_id: &str, cmd: &str, yield_time_ms: Option<u64>, sandbox_permissions: SandboxPermissions, justification: Option<&str>, ) -> Result<Value> { let mut args = json!({ "cmd": cmd.to_string(), }); if let Some(yield_time_ms) = yield_time_ms { args["yield_time_ms"] = json!(yield_time_ms); } if sandbox_permissions.requires_escalated_permissions() { args["sandbox_permissions"] = json!(sandbox_permissions); let reason = justification.unwrap_or(DEFAULT_UNIFIED_EXEC_JUSTIFICATION); args["justification"] = json!(reason); } let args_str = serde_json::to_string(&args)?; Ok(ev_function_call(call_id, "exec_command", &args_str)) } #[derive(Clone)] enum Expectation { FileCreated { target: TargetPath, content: &'static str, }, FileCreatedNoExitCode { target: TargetPath, content: &'static str, }, PatchApplied { target: TargetPath, content: &'static str, }, FileNotCreated { target: TargetPath, message_contains: &'static [&'static str], }, NetworkSuccess { body_contains: &'static str, }, NetworkSuccessNoExitCode { body_contains: &'static str, }, NetworkFailure { expect_tag: &'static str, }, CommandSuccess { stdout_contains: &'static str, }, CommandSuccessNoExitCode { stdout_contains: &'static str, }, CommandFailure { output_contains: &'static str, }, } impl Expectation { fn verify(&self, test: &TestCodex, result: &CommandResult) -> Result<()> { match self { Expectation::FileCreated { target, content } => { let (path, _) = target.resolve_for_patch(test); assert_eq!( result.exit_code, Some(0), "expected successful exit for {path:?}" ); assert!( result.stdout.contains(content), "stdout missing {content:?}: {}", result.stdout ); let file_contents = fs::read_to_string(&path)?; assert!( file_contents.contains(content), "file contents missing {content:?}: {file_contents}" ); let _ = fs::remove_file(path); } Expectation::FileCreatedNoExitCode { target, content } => { let (path, _) = target.resolve_for_patch(test); assert!( result.exit_code.is_none() || result.exit_code == Some(0), "expected no exit code for {path:?}", ); assert!( result.stdout.contains(content), "stdout missing {content:?}: {}", result.stdout ); let file_contents = fs::read_to_string(&path)?; assert!( file_contents.contains(content), "file contents missing {content:?}: {file_contents}" ); let _ = fs::remove_file(path); } Expectation::PatchApplied { target, content } => { let (path, _) = target.resolve_for_patch(test); match result.exit_code { Some(0) | None => { if result.exit_code.is_none() { assert!( result.stdout.contains("Success."), "patch output missing success indicator: {}", result.stdout ); } } Some(code) => panic!( "expected successful patch exit for {:?}, got {code} with stdout {}", path, result.stdout ), } let file_contents = fs::read_to_string(&path)?; assert!( file_contents.contains(content), "patched file missing {content:?}: {file_contents}" ); let _ = fs::remove_file(path); } Expectation::FileNotCreated { target, message_contains, } => { let (path, _) = target.resolve_for_patch(test); assert_ne!( result.exit_code, Some(0), "expected non-zero exit for {path:?}" ); for needle in *message_contains { if needle.contains('|') { let options: Vec<&str> = needle.split('|').collect(); let matches_any = options.iter().any(|option| result.stdout.contains(option)); assert!( matches_any, "stdout missing one of {options:?}: {}", result.stdout ); } else { assert!( result.stdout.contains(needle), "stdout missing {needle:?}: {}", result.stdout ); } } assert!( !path.exists(), "command should not create {path:?}, but file exists" ); } Expectation::NetworkSuccess { body_contains } => { assert_eq!( result.exit_code, Some(0), "expected successful network exit: {}", result.stdout ); assert!( result.stdout.contains("OK:"), "stdout missing OK prefix: {}", result.stdout ); assert!( result.stdout.contains(body_contains), "stdout missing body text {body_contains:?}: {}", result.stdout ); } Expectation::NetworkSuccessNoExitCode { body_contains } => { assert!( result.exit_code.is_none() || result.exit_code == Some(0), "expected no exit code for successful network call: {}", result.stdout ); assert!( result.stdout.contains("OK:"), "stdout missing OK prefix: {}", result.stdout ); assert!( result.stdout.contains(body_contains), "stdout missing body text {body_contains:?}: {}", result.stdout ); } Expectation::NetworkFailure { expect_tag } => { assert_ne!( result.exit_code, Some(0), "expected non-zero exit for network failure: {}", result.stdout ); assert!( result.stdout.contains("ERR:"), "stdout missing ERR prefix: {}", result.stdout ); assert!( result.stdout.contains(expect_tag), "stdout missing expected tag {expect_tag:?}: {}", result.stdout ); } Expectation::CommandSuccess { stdout_contains } => { assert_eq!( result.exit_code, Some(0), "expected successful trusted command exit: {}", result.stdout ); assert!( result.stdout.contains(stdout_contains), "trusted command stdout missing {stdout_contains:?}: {}", result.stdout ); } Expectation::CommandSuccessNoExitCode { stdout_contains } => { assert!( result.exit_code.is_none() || result.exit_code == Some(0), "expected no exit code for trusted command: {}", result.stdout ); assert!( result.stdout.contains(stdout_contains), "trusted command stdout missing {stdout_contains:?}: {}", result.stdout ); } Expectation::CommandFailure { output_contains } => { assert_ne!( result.exit_code, Some(0), "expected non-zero exit for command failure: {}", result.stdout ); assert!( result.stdout.contains(output_contains), "command failure stderr missing {output_contains:?}: {}", result.stdout ); } } Ok(()) } } #[derive(Clone)] enum Outcome { Auto, ExecApproval { decision: ReviewDecision, expected_reason: Option<&'static str>, }, PatchApproval { decision: ReviewDecision, expected_reason: Option<&'static str>, }, } #[derive(Clone)] struct ScenarioSpec { name: &'static str, approval_policy: AskForApproval, sandbox_policy: SandboxPolicy, action: ActionKind, sandbox_permissions: SandboxPermissions, features: Vec<Feature>, model_override: Option<&'static str>, outcome: Outcome, expectation: Expectation, } struct CommandResult { exit_code: Option<i64>, stdout: String, } async fn submit_turn( test: &TestCodex, prompt: &str, approval_policy: AskForApproval, sandbox_policy: SandboxPolicy, ) -> Result<()> { let session_model = test.session_configured.model.clone(); test.codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: prompt.into(), }], final_output_json_schema: None, cwd: test.cwd.path().to_path_buf(), approval_policy, sandbox_policy, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; Ok(()) } fn parse_result(item: &Value) -> CommandResult { let output_str = item .get("output") .and_then(Value::as_str) .expect("shell output payload"); match serde_json::from_str::<Value>(output_str) { Ok(parsed) => { let exit_code = parsed["metadata"]["exit_code"].as_i64(); let stdout = parsed["output"].as_str().unwrap_or_default().to_string(); CommandResult { exit_code, stdout } } Err(_) => { let structured = Regex::new(r"(?s)^Exit code:\s*(-?\d+).*?Output:\n(.*)$").unwrap(); let regex = Regex::new(r"(?s)^.*?Process exited with code (\d+)\n.*?Output:\n(.*)$").unwrap(); // parse freeform output if let Some(captures) = structured.captures(output_str) { let exit_code = captures.get(1).unwrap().as_str().parse::<i64>().unwrap(); let output = captures.get(2).unwrap().as_str(); CommandResult { exit_code: Some(exit_code), stdout: output.to_string(), } } else if let Some(captures) = regex.captures(output_str) { let exit_code = captures.get(1).unwrap().as_str().parse::<i64>().unwrap(); let output = captures.get(2).unwrap().as_str(); CommandResult { exit_code: Some(exit_code), stdout: output.to_string(), } } else { CommandResult { exit_code: None, stdout: output_str.to_string(), } } } } } async fn expect_exec_approval( test: &TestCodex, expected_command: &str, ) -> ExecApprovalRequestEvent { let event = wait_for_event(&test.codex, |event| { matches!( event, EventMsg::ExecApprovalRequest(_) | EventMsg::TaskComplete(_) ) }) .await; match event { EventMsg::ExecApprovalRequest(approval) => { let last_arg = approval .command .last() .map(std::string::String::as_str) .unwrap_or_default(); assert_eq!(last_arg, expected_command); approval } EventMsg::TaskComplete(_) => panic!("expected approval request before completion"), other => panic!("unexpected event: {other:?}"), } } async fn expect_patch_approval( test: &TestCodex, expected_call_id: &str, ) -> ApplyPatchApprovalRequestEvent { let event = wait_for_event(&test.codex, |event| { matches!( event, EventMsg::ApplyPatchApprovalRequest(_) | EventMsg::TaskComplete(_) ) }) .await; match event { EventMsg::ApplyPatchApprovalRequest(approval) => { assert_eq!(approval.call_id, expected_call_id); approval } EventMsg::TaskComplete(_) => panic!("expected patch approval request before completion"), other => panic!("unexpected event: {other:?}"), } } async fn wait_for_completion_without_approval(test: &TestCodex) { let event = wait_for_event(&test.codex, |event| { matches!( event, EventMsg::ExecApprovalRequest(_) | EventMsg::TaskComplete(_) ) }) .await; match event { EventMsg::TaskComplete(_) => {} EventMsg::ExecApprovalRequest(event) => { panic!("unexpected approval request: {:?}", event.command) } other => panic!("unexpected event: {other:?}"), } } async fn wait_for_completion(test: &TestCodex) { wait_for_event(&test.codex, |event| { matches!(event, EventMsg::TaskComplete(_)) }) .await; } fn scenarios() -> Vec<ScenarioSpec> { use AskForApproval::*; let workspace_write = |network_access| SandboxPolicy::WorkspaceWrite { writable_roots: vec![], network_access, exclude_tmpdir_env_var: false, exclude_slash_tmp: false, }; vec![ ScenarioSpec { name: "danger_full_access_on_request_allows_outside_write", approval_policy: OnRequest, sandbox_policy: SandboxPolicy::DangerFullAccess, action: ActionKind::WriteFile { target: TargetPath::OutsideWorkspace("dfa_on_request.txt"), content: "danger-on-request", }, sandbox_permissions: SandboxPermissions::UseDefault, features: vec![], model_override: Some("gpt-5"), outcome: Outcome::Auto, expectation: Expectation::FileCreated { target: TargetPath::OutsideWorkspace("dfa_on_request.txt"), content: "danger-on-request", }, }, ScenarioSpec { name: "danger_full_access_on_request_allows_outside_write_gpt_5_1_no_exit", approval_policy: OnRequest, sandbox_policy: SandboxPolicy::DangerFullAccess, action: ActionKind::WriteFile { target: TargetPath::OutsideWorkspace("dfa_on_request_5_1.txt"), content: "danger-on-request", }, sandbox_permissions: SandboxPermissions::UseDefault, features: vec![], model_override: Some("gpt-5.1"), outcome: Outcome::Auto, expectation: Expectation::FileCreated { target: TargetPath::OutsideWorkspace("dfa_on_request_5_1.txt"), content: "danger-on-request", }, }, ScenarioSpec { name: "danger_full_access_on_request_allows_network", approval_policy: OnRequest, sandbox_policy: SandboxPolicy::DangerFullAccess, action: ActionKind::FetchUrl { endpoint: "/dfa/network", response_body: "danger-network-ok", }, sandbox_permissions: SandboxPermissions::UseDefault, features: vec![], model_override: Some("gpt-5"), outcome: Outcome::Auto, expectation: Expectation::NetworkSuccess { body_contains: "danger-network-ok", }, }, ScenarioSpec { name: "danger_full_access_on_request_allows_network_gpt_5_1_no_exit", approval_policy: OnRequest, sandbox_policy: SandboxPolicy::DangerFullAccess, action: ActionKind::FetchUrl { endpoint: "/dfa/network", response_body: "danger-network-ok", }, sandbox_permissions: SandboxPermissions::UseDefault, features: vec![], model_override: Some("gpt-5.1"), outcome: Outcome::Auto, expectation: Expectation::NetworkSuccessNoExitCode { body_contains: "danger-network-ok", }, }, ScenarioSpec { name: "trusted_command_unless_trusted_runs_without_prompt", approval_policy: UnlessTrusted, sandbox_policy: SandboxPolicy::DangerFullAccess, action: ActionKind::RunCommand { command: "echo trusted-unless", }, sandbox_permissions: SandboxPermissions::UseDefault, features: vec![], model_override: Some("gpt-5"), outcome: Outcome::Auto, expectation: Expectation::CommandSuccess { stdout_contains: "trusted-unless", }, }, ScenarioSpec { name: "trusted_command_unless_trusted_runs_without_prompt_gpt_5_1_no_exit", approval_policy: UnlessTrusted, sandbox_policy: SandboxPolicy::DangerFullAccess, action: ActionKind::RunCommand { command: "echo trusted-unless", }, sandbox_permissions: SandboxPermissions::UseDefault, features: vec![], model_override: Some("gpt-5.1"), outcome: Outcome::Auto, expectation: Expectation::CommandSuccessNoExitCode { stdout_contains: "trusted-unless", }, }, ScenarioSpec { name: "danger_full_access_on_failure_allows_outside_write", approval_policy: OnFailure, sandbox_policy: SandboxPolicy::DangerFullAccess, action: ActionKind::WriteFile { target: TargetPath::OutsideWorkspace("dfa_on_failure.txt"), content: "danger-on-failure", }, sandbox_permissions: SandboxPermissions::UseDefault, features: vec![], model_override: Some("gpt-5"), outcome: Outcome::Auto, expectation: Expectation::FileCreated { target: TargetPath::OutsideWorkspace("dfa_on_failure.txt"), content: "danger-on-failure", }, }, ScenarioSpec { name: "danger_full_access_on_failure_allows_outside_write_gpt_5_1_no_exit", approval_policy: OnFailure, sandbox_policy: SandboxPolicy::DangerFullAccess, action: ActionKind::WriteFile { target: TargetPath::OutsideWorkspace("dfa_on_failure_5_1.txt"), content: "danger-on-failure", }, sandbox_permissions: SandboxPermissions::UseDefault, features: vec![], model_override: Some("gpt-5.1"), outcome: Outcome::Auto, expectation: Expectation::FileCreatedNoExitCode { target: TargetPath::OutsideWorkspace("dfa_on_failure_5_1.txt"), content: "danger-on-failure", }, }, ScenarioSpec { name: "danger_full_access_unless_trusted_requests_approval", approval_policy: UnlessTrusted, sandbox_policy: SandboxPolicy::DangerFullAccess, action: ActionKind::WriteFile { target: TargetPath::OutsideWorkspace("dfa_unless_trusted.txt"), content: "danger-unless-trusted", }, sandbox_permissions: SandboxPermissions::UseDefault, features: vec![], model_override: Some("gpt-5"), outcome: Outcome::ExecApproval { decision: ReviewDecision::Approved, expected_reason: None, }, expectation: Expectation::FileCreated { target: TargetPath::OutsideWorkspace("dfa_unless_trusted.txt"), content: "danger-unless-trusted", }, }, ScenarioSpec { name: "danger_full_access_unless_trusted_requests_approval_gpt_5_1_no_exit", approval_policy: UnlessTrusted, sandbox_policy: SandboxPolicy::DangerFullAccess, action: ActionKind::WriteFile { target: TargetPath::OutsideWorkspace("dfa_unless_trusted_5_1.txt"), content: "danger-unless-trusted", }, sandbox_permissions: SandboxPermissions::UseDefault, features: vec![], model_override: Some("gpt-5.1"), outcome: Outcome::ExecApproval { decision: ReviewDecision::Approved, expected_reason: None, }, expectation: Expectation::FileCreatedNoExitCode { target: TargetPath::OutsideWorkspace("dfa_unless_trusted_5_1.txt"), content: "danger-unless-trusted", }, }, ScenarioSpec { name: "danger_full_access_never_allows_outside_write", approval_policy: Never, sandbox_policy: SandboxPolicy::DangerFullAccess, action: ActionKind::WriteFile { target: TargetPath::OutsideWorkspace("dfa_never.txt"), content: "danger-never", }, sandbox_permissions: SandboxPermissions::UseDefault, features: vec![], model_override: Some("gpt-5"), outcome: Outcome::Auto, expectation: Expectation::FileCreated { target: TargetPath::OutsideWorkspace("dfa_never.txt"), content: "danger-never", }, }, ScenarioSpec { name: "danger_full_access_never_allows_outside_write_gpt_5_1_no_exit", approval_policy: Never, sandbox_policy: SandboxPolicy::DangerFullAccess, action: ActionKind::WriteFile { target: TargetPath::OutsideWorkspace("dfa_never_5_1.txt"), content: "danger-never", }, sandbox_permissions: SandboxPermissions::UseDefault, features: vec![], model_override: Some("gpt-5.1"), outcome: Outcome::Auto, expectation: Expectation::FileCreatedNoExitCode { target: TargetPath::OutsideWorkspace("dfa_never_5_1.txt"), content: "danger-never", }, }, ScenarioSpec { name: "read_only_on_request_requires_approval", approval_policy: OnRequest, sandbox_policy: SandboxPolicy::ReadOnly, action: ActionKind::WriteFile { target: TargetPath::Workspace("ro_on_request.txt"), content: "read-only-approval", }, sandbox_permissions: SandboxPermissions::RequireEscalated, features: vec![], model_override: Some("gpt-5"), outcome: Outcome::ExecApproval { decision: ReviewDecision::Approved, expected_reason: None, }, expectation: Expectation::FileCreated { target: TargetPath::Workspace("ro_on_request.txt"), content: "read-only-approval", }, }, ScenarioSpec { name: "read_only_on_request_requires_approval_gpt_5_1_no_exit", approval_policy: OnRequest, sandbox_policy: SandboxPolicy::ReadOnly, action: ActionKind::WriteFile { target: TargetPath::Workspace("ro_on_request_5_1.txt"), content: "read-only-approval", }, sandbox_permissions: SandboxPermissions::RequireEscalated, features: vec![], model_override: Some("gpt-5.1"), outcome: Outcome::ExecApproval { decision: ReviewDecision::Approved, expected_reason: None, }, expectation: Expectation::FileCreatedNoExitCode { target: TargetPath::Workspace("ro_on_request_5_1.txt"), content: "read-only-approval", }, }, ScenarioSpec { name: "trusted_command_on_request_read_only_runs_without_prompt", approval_policy: OnRequest, sandbox_policy: SandboxPolicy::ReadOnly, action: ActionKind::RunCommand { command: "echo trusted-read-only", }, sandbox_permissions: SandboxPermissions::UseDefault, features: vec![],
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/view_image.rs
codex-rs/core/tests/suite/view_image.rs
#![cfg(not(target_os = "windows"))] use base64::Engine; use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::user_input::UserInput; use core_test_support::responses; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::sse; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::TestCodex; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use image::GenericImageView; use image::ImageBuffer; use image::Rgba; use image::load_from_memory; use serde_json::Value; fn find_image_message(body: &Value) -> Option<&Value> { body.get("input") .and_then(Value::as_array) .and_then(|items| { items.iter().find(|item| { item.get("type").and_then(Value::as_str) == Some("message") && item .get("content") .and_then(Value::as_array) .map(|content| { content.iter().any(|span| { span.get("type").and_then(Value::as_str) == Some("input_image") }) }) .unwrap_or(false) }) }) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn user_turn_with_local_image_attaches_image() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let TestCodex { codex, cwd, session_configured, .. } = test_codex().build(&server).await?; let rel_path = "user-turn/example.png"; let abs_path = cwd.path().join(rel_path); if let Some(parent) = abs_path.parent() { std::fs::create_dir_all(parent)?; } let image = ImageBuffer::from_pixel(4096, 1024, Rgba([20u8, 40, 60, 255])); image.save(&abs_path)?; let response = sse(vec![ ev_response_created("resp-1"), ev_assistant_message("msg-1", "done"), ev_completed("resp-1"), ]); let mock = responses::mount_sse_once(&server, response).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::LocalImage { path: abs_path.clone(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; let body = mock.single_request().body_json(); let image_message = find_image_message(&body).expect("pending input image message not included in request"); let image_url = image_message .get("content") .and_then(Value::as_array) .and_then(|content| { content.iter().find_map(|span| { if span.get("type").and_then(Value::as_str) == Some("input_image") { span.get("image_url").and_then(Value::as_str) } else { None } }) }) .expect("image_url present"); let (prefix, encoded) = image_url .split_once(',') .expect("image url contains data prefix"); assert_eq!(prefix, "data:image/png;base64"); let decoded = BASE64_STANDARD .decode(encoded) .expect("image data decodes from base64 for request"); let resized = load_from_memory(&decoded).expect("load resized image"); let (width, height) = resized.dimensions(); assert!(width <= 2048); assert!(height <= 768); assert!(width < 4096); assert!(height < 1024); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn view_image_tool_attaches_local_image() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let TestCodex { codex, cwd, session_configured, .. } = test_codex().build(&server).await?; let rel_path = "assets/example.png"; let abs_path = cwd.path().join(rel_path); if let Some(parent) = abs_path.parent() { std::fs::create_dir_all(parent)?; } let image = ImageBuffer::from_pixel(4096, 1024, Rgba([255u8, 0, 0, 255])); image.save(&abs_path)?; let call_id = "view-image-call"; let arguments = serde_json::json!({ "path": rel_path }).to_string(); let first_response = sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "view_image", &arguments), ev_completed("resp-1"), ]); responses::mount_sse_once(&server, first_response).await; let second_response = sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]); let mock = responses::mount_sse_once(&server, second_response).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "please add the screenshot".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let mut tool_event = None; wait_for_event(&codex, |event| match event { EventMsg::ViewImageToolCall(_) => { tool_event = Some(event.clone()); false } EventMsg::TaskComplete(_) => true, _ => false, }) .await; let tool_event = match tool_event.expect("view image tool event emitted") { EventMsg::ViewImageToolCall(event) => event, _ => unreachable!("stored event must be ViewImageToolCall"), }; assert_eq!(tool_event.call_id, call_id); assert_eq!(tool_event.path, abs_path); let req = mock.single_request(); let body = req.body_json(); let output_text = req .function_call_output_content_and_success(call_id) .and_then(|(content, _)| content) .expect("output text present"); assert_eq!(output_text, "attached local image path"); let image_message = find_image_message(&body).expect("pending input image message not included in request"); let image_url = image_message .get("content") .and_then(Value::as_array) .and_then(|content| { content.iter().find_map(|span| { if span.get("type").and_then(Value::as_str) == Some("input_image") { span.get("image_url").and_then(Value::as_str) } else { None } }) }) .expect("image_url present"); let (prefix, encoded) = image_url .split_once(',') .expect("image url contains data prefix"); assert_eq!(prefix, "data:image/png;base64"); let decoded = BASE64_STANDARD .decode(encoded) .expect("image data decodes from base64 for request"); let resized = load_from_memory(&decoded).expect("load resized image"); let (resized_width, resized_height) = resized.dimensions(); assert!(resized_width <= 2048); assert!(resized_height <= 768); assert!(resized_width < 4096); assert!(resized_height < 1024); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn view_image_tool_errors_when_path_is_directory() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let TestCodex { codex, cwd, session_configured, .. } = test_codex().build(&server).await?; let rel_path = "assets"; let abs_path = cwd.path().join(rel_path); std::fs::create_dir_all(&abs_path)?; let call_id = "view-image-directory"; let arguments = serde_json::json!({ "path": rel_path }).to_string(); let first_response = sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "view_image", &arguments), ev_completed("resp-1"), ]); responses::mount_sse_once(&server, first_response).await; let second_response = sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]); let mock = responses::mount_sse_once(&server, second_response).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "please attach the folder".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; let req = mock.single_request(); let body_with_tool_output = req.body_json(); let output_text = req .function_call_output_content_and_success(call_id) .and_then(|(content, _)| content) .expect("output text present"); let expected_message = format!("image path `{}` is not a file", abs_path.display()); assert_eq!(output_text, expected_message); assert!( find_image_message(&body_with_tool_output).is_none(), "directory path should not produce an input_image message" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn view_image_tool_placeholder_for_non_image_files() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let TestCodex { codex, cwd, session_configured, .. } = test_codex().build(&server).await?; let rel_path = "assets/example.json"; let abs_path = cwd.path().join(rel_path); if let Some(parent) = abs_path.parent() { std::fs::create_dir_all(parent)?; } std::fs::write(&abs_path, br#"{ "message": "hello" }"#)?; let call_id = "view-image-non-image"; let arguments = serde_json::json!({ "path": rel_path }).to_string(); let first_response = sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "view_image", &arguments), ev_completed("resp-1"), ]); responses::mount_sse_once(&server, first_response).await; let second_response = sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]); let mock = responses::mount_sse_once(&server, second_response).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "please use the view_image tool to read the json file".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; let request = mock.single_request(); assert!( request.inputs_of_type("input_image").is_empty(), "non-image file should not produce an input_image message" ); let placeholder = request .inputs_of_type("message") .iter() .find_map(|item| { let content = item.get("content").and_then(Value::as_array)?; content.iter().find_map(|span| { if span.get("type").and_then(Value::as_str) == Some("input_text") { let text = span.get("text").and_then(Value::as_str)?; if text.contains("Codex could not read the local image at") && text.contains("unsupported MIME type `application/json`") { return Some(text.to_string()); } } None }) }) .expect("placeholder text found"); assert!( placeholder.contains(&abs_path.display().to_string()), "placeholder should mention path: {placeholder}" ); let output_text = mock .single_request() .function_call_output_content_and_success(call_id) .and_then(|(content, _)| content) .expect("output text present"); assert_eq!(output_text, "attached local image path"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn view_image_tool_errors_when_file_missing() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let TestCodex { codex, cwd, session_configured, .. } = test_codex().build(&server).await?; let rel_path = "missing/example.png"; let abs_path = cwd.path().join(rel_path); let call_id = "view-image-missing"; let arguments = serde_json::json!({ "path": rel_path }).to_string(); let first_response = sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "view_image", &arguments), ev_completed("resp-1"), ]); responses::mount_sse_once(&server, first_response).await; let second_response = sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]); let mock = responses::mount_sse_once(&server, second_response).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "please attach the missing image".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; let req = mock.single_request(); let body_with_tool_output = req.body_json(); let output_text = req .function_call_output_content_and_success(call_id) .and_then(|(content, _)| content) .expect("output text present"); let expected_prefix = format!("unable to locate image at `{}`:", abs_path.display()); assert!( output_text.starts_with(&expected_prefix), "expected error to start with `{expected_prefix}` but got `{output_text}`" ); assert!( find_image_message(&body_with_tool_output).is_none(), "missing file should not produce an input_image message" ); Ok(()) } #[cfg(not(debug_assertions))] #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn replaces_invalid_local_image_after_bad_request() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; const INVALID_IMAGE_ERROR: &str = "The image data you provided does not represent a valid image"; let invalid_image_mock = responses::mount_response_once_match( &server, body_string_contains("\"input_image\""), ResponseTemplate::new(400) .insert_header("content-type", "text/plain") .set_body_string(INVALID_IMAGE_ERROR), ) .await; let success_response = sse(vec![ ev_response_created("resp-2"), ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]); let completion_mock = responses::mount_sse_once(&server, success_response).await; let TestCodex { codex, cwd, session_configured, .. } = test_codex().build(&server).await?; let rel_path = "assets/poisoned.png"; let abs_path = cwd.path().join(rel_path); if let Some(parent) = abs_path.parent() { std::fs::create_dir_all(parent)?; } let image = ImageBuffer::from_pixel(1024, 512, Rgba([10u8, 20, 30, 255])); image.save(&abs_path)?; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::LocalImage { path: abs_path.clone(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; let first_body = invalid_image_mock.single_request().body_json(); assert!( find_image_message(&first_body).is_some(), "initial request should include the uploaded image" ); let second_request = completion_mock.single_request(); let second_body = second_request.body_json(); assert!( find_image_message(&second_body).is_none(), "second request should replace the invalid image" ); let user_texts = second_request.message_input_texts("user"); assert!(user_texts.iter().any(|text| text == "Invalid image")); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/tool_harness.rs
codex-rs/core/tests/suite/tool_harness.rs
#![cfg(not(target_os = "windows"))] use std::fs; use assert_matches::assert_matches; use codex_core::features::Feature; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::plan_tool::StepStatus; use codex_protocol::user_input::UserInput; use core_test_support::assert_regex_match; use core_test_support::responses; use core_test_support::responses::ResponsesRequest; use core_test_support::responses::ev_apply_patch_function_call; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_local_shell_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::sse; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::TestCodex; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use serde_json::Value; use serde_json::json; fn call_output(req: &ResponsesRequest, call_id: &str) -> (String, Option<bool>) { let raw = req.function_call_output(call_id); assert_eq!( raw.get("call_id").and_then(Value::as_str), Some(call_id), "mismatched call_id in function_call_output" ); let (content_opt, success) = match req.function_call_output_content_and_success(call_id) { Some(values) => values, None => panic!("function_call_output present"), }; let content = match content_opt { Some(c) => c, None => panic!("function_call_output content present"), }; (content, success) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn shell_tool_executes_command_and_streams_output() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_model("gpt-5"); let TestCodex { codex, cwd, session_configured, .. } = builder.build(&server).await?; let call_id = "shell-tool-call"; let command = vec!["/bin/echo", "tool harness"]; let first_response = sse(vec![ ev_response_created("resp-1"), ev_local_shell_call(call_id, "completed", command), ev_completed("resp-1"), ]); responses::mount_sse_once(&server, first_response).await; let second_response = sse(vec![ ev_assistant_message("msg-1", "all done"), ev_completed("resp-2"), ]); let second_mock = responses::mount_sse_once(&server, second_response).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "please run the shell command".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; let req = second_mock.single_request(); let (output_text, _) = call_output(&req, call_id); let exec_output: Value = serde_json::from_str(&output_text)?; assert_eq!(exec_output["metadata"]["exit_code"], 0); let stdout = exec_output["output"].as_str().expect("stdout field"); assert_regex_match(r"(?s)^tool harness\n?$", stdout); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn update_plan_tool_emits_plan_update_event() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex(); let TestCodex { codex, cwd, session_configured, .. } = builder.build(&server).await?; let call_id = "plan-tool-call"; let plan_args = json!({ "explanation": "Tool harness check", "plan": [ {"step": "Inspect workspace", "status": "in_progress"}, {"step": "Report results", "status": "pending"}, ], }) .to_string(); let first_response = sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "update_plan", &plan_args), ev_completed("resp-1"), ]); responses::mount_sse_once(&server, first_response).await; let second_response = sse(vec![ ev_assistant_message("msg-1", "plan acknowledged"), ev_completed("resp-2"), ]); let second_mock = responses::mount_sse_once(&server, second_response).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "please update the plan".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let mut saw_plan_update = false; wait_for_event(&codex, |event| match event { EventMsg::PlanUpdate(update) => { saw_plan_update = true; assert_eq!(update.explanation.as_deref(), Some("Tool harness check")); assert_eq!(update.plan.len(), 2); assert_eq!(update.plan[0].step, "Inspect workspace"); assert_matches!(update.plan[0].status, StepStatus::InProgress); assert_eq!(update.plan[1].step, "Report results"); assert_matches!(update.plan[1].status, StepStatus::Pending); false } EventMsg::TaskComplete(_) => true, _ => false, }) .await; assert!(saw_plan_update, "expected PlanUpdate event"); let req = second_mock.single_request(); let (output_text, _success_flag) = call_output(&req, call_id); assert_eq!(output_text, "Plan updated"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn update_plan_tool_rejects_malformed_payload() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex(); let TestCodex { codex, cwd, session_configured, .. } = builder.build(&server).await?; let call_id = "plan-tool-invalid"; let invalid_args = json!({ "explanation": "Missing plan data" }) .to_string(); let first_response = sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "update_plan", &invalid_args), ev_completed("resp-1"), ]); responses::mount_sse_once(&server, first_response).await; let second_response = sse(vec![ ev_assistant_message("msg-1", "malformed plan payload"), ev_completed("resp-2"), ]); let second_mock = responses::mount_sse_once(&server, second_response).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "please update the plan".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let mut saw_plan_update = false; wait_for_event(&codex, |event| match event { EventMsg::PlanUpdate(_) => { saw_plan_update = true; false } EventMsg::TaskComplete(_) => true, _ => false, }) .await; assert!( !saw_plan_update, "did not expect PlanUpdate event for malformed payload" ); let req = second_mock.single_request(); let (output_text, success_flag) = call_output(&req, call_id); assert!( output_text.contains("failed to parse function arguments"), "expected parse error message in output text, got {output_text:?}" ); if let Some(success_flag) = success_flag { assert!( !success_flag, "expected tool output to mark success=false for malformed payload" ); } Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn apply_patch_tool_executes_and_emits_patch_events() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_config(|config| { config.features.enable(Feature::ApplyPatchFreeform); }); let TestCodex { codex, cwd, session_configured, .. } = builder.build(&server).await?; let file_name = "notes.txt"; let file_path = cwd.path().join(file_name); let call_id = "apply-patch-call"; let patch_content = format!( r#"*** Begin Patch *** Add File: {file_name} +Tool harness apply patch *** End Patch"# ); let first_response = sse(vec![ ev_response_created("resp-1"), ev_apply_patch_function_call(call_id, &patch_content), ev_completed("resp-1"), ]); responses::mount_sse_once(&server, first_response).await; let second_response = sse(vec![ ev_assistant_message("msg-1", "patch complete"), ev_completed("resp-2"), ]); let second_mock = responses::mount_sse_once(&server, second_response).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "please apply a patch".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let mut saw_patch_begin = false; let mut patch_end_success = None; wait_for_event(&codex, |event| match event { EventMsg::PatchApplyBegin(begin) => { saw_patch_begin = true; assert_eq!(begin.call_id, call_id); false } EventMsg::PatchApplyEnd(end) => { assert_eq!(end.call_id, call_id); patch_end_success = Some(end.success); false } EventMsg::TaskComplete(_) => true, _ => false, }) .await; assert!(saw_patch_begin, "expected PatchApplyBegin event"); let patch_end_success = patch_end_success.expect("expected PatchApplyEnd event to capture success flag"); assert!(patch_end_success); let req = second_mock.single_request(); let (output_text, _success_flag) = call_output(&req, call_id); let expected_pattern = format!( r"(?s)^Exit code: 0 Wall time: [0-9]+(?:\.[0-9]+)? seconds Output: Success. Updated the following files: A {file_name} ?$" ); assert_regex_match(&expected_pattern, &output_text); let updated_contents = fs::read_to_string(file_path)?; assert_eq!( updated_contents, "Tool harness apply patch\n", "expected updated file content" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn apply_patch_reports_parse_diagnostics() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_config(|config| { config.features.enable(Feature::ApplyPatchFreeform); }); let TestCodex { codex, cwd, session_configured, .. } = builder.build(&server).await?; let call_id = "apply-patch-parse-error"; let patch_content = r"*** Begin Patch *** Update File: broken.txt *** End Patch"; let first_response = sse(vec![ ev_response_created("resp-1"), ev_apply_patch_function_call(call_id, patch_content), ev_completed("resp-1"), ]); responses::mount_sse_once(&server, first_response).await; let second_response = sse(vec![ ev_assistant_message("msg-1", "failed"), ev_completed("resp-2"), ]); let second_mock = responses::mount_sse_once(&server, second_response).await; let session_model = session_configured.model.clone(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "please apply a patch".into(), }], final_output_json_schema: None, cwd: cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; let req = second_mock.single_request(); let (output_text, success_flag) = call_output(&req, call_id); assert!( output_text.contains("apply_patch verification failed"), "expected apply_patch verification failure message, got {output_text:?}" ); assert!( output_text.contains("invalid hunk"), "expected parse diagnostics in output text, got {output_text:?}" ); if let Some(success_flag) = success_flag { assert!( !success_flag, "expected tool output to mark success=false for parse failures" ); } Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/undo.rs
codex-rs/core/tests/suite/undo.rs
#![cfg(not(target_os = "windows"))] use std::fs; use std::path::Path; use std::process::Command; use std::sync::Arc; use anyhow::Context; use anyhow::Result; use anyhow::bail; use codex_core::CodexConversation; use codex_core::features::Feature; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::UndoCompletedEvent; use core_test_support::responses::ev_apply_patch_function_call; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_sse_sequence; use core_test_support::responses::sse; use core_test_support::skip_if_no_network; use core_test_support::test_codex::TestCodexHarness; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event_match; use pretty_assertions::assert_eq; #[allow(clippy::expect_used)] async fn undo_harness() -> Result<TestCodexHarness> { let builder = test_codex().with_model("gpt-5.1").with_config(|config| { config.include_apply_patch_tool = true; config.features.enable(Feature::GhostCommit); }); TestCodexHarness::with_builder(builder).await } fn git(path: &Path, args: &[&str]) -> Result<()> { let status = Command::new("git") .args(args) .current_dir(path) .status() .with_context(|| format!("failed to run git {args:?}"))?; if status.success() { return Ok(()); } let exit_status = status; bail!("git {args:?} exited with {exit_status}"); } fn git_output(path: &Path, args: &[&str]) -> Result<String> { let output = Command::new("git") .args(args) .current_dir(path) .output() .with_context(|| format!("failed to run git {args:?}"))?; if !output.status.success() { let exit_status = output.status; bail!("git {args:?} exited with {exit_status}"); } String::from_utf8(output.stdout).context("stdout was not valid utf8") } fn init_git_repo(path: &Path) -> Result<()> { // Use a consistent initial branch and config across environments to avoid // CI variance (default-branch hints, line ending differences, etc.). git(path, &["init", "--initial-branch=main"])?; git(path, &["config", "core.autocrlf", "false"])?; git(path, &["config", "user.name", "Codex Tests"])?; git(path, &["config", "user.email", "codex-tests@example.com"])?; // Create README.txt let readme_path = path.join("README.txt"); fs::write(&readme_path, "Test repository initialized by Codex.\n")?; // Stage and commit git(path, &["add", "README.txt"])?; git(path, &["commit", "-m", "Add README.txt"])?; Ok(()) } fn apply_patch_responses(call_id: &str, patch: &str, assistant_msg: &str) -> Vec<String> { vec![ sse(vec![ ev_response_created("resp-1"), ev_apply_patch_function_call(call_id, patch), ev_completed("resp-1"), ]), sse(vec![ ev_assistant_message("msg-1", assistant_msg), ev_completed("resp-2"), ]), ] } async fn run_apply_patch_turn( harness: &TestCodexHarness, prompt: &str, call_id: &str, patch: &str, assistant_msg: &str, ) -> Result<()> { mount_sse_sequence( harness.server(), apply_patch_responses(call_id, patch, assistant_msg), ) .await; harness.submit(prompt).await } async fn invoke_undo(codex: &Arc<CodexConversation>) -> Result<UndoCompletedEvent> { codex.submit(Op::Undo).await?; let event = wait_for_event_match(codex, |msg| match msg { EventMsg::UndoCompleted(done) => Some(done.clone()), _ => None, }) .await; Ok(event) } async fn expect_successful_undo(codex: &Arc<CodexConversation>) -> Result<UndoCompletedEvent> { let event = invoke_undo(codex).await?; assert!( event.success, "expected undo to succeed but failed with message {:?}", event.message ); Ok(event) } async fn expect_failed_undo(codex: &Arc<CodexConversation>) -> Result<UndoCompletedEvent> { let event = invoke_undo(codex).await?; assert!( !event.success, "expected undo to fail but succeeded with message {:?}", event.message ); assert_eq!( event.message.as_deref(), Some("No ghost snapshot available to undo.") ); Ok(event) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn undo_removes_new_file_created_during_turn() -> Result<()> { skip_if_no_network!(Ok(())); let harness = undo_harness().await?; init_git_repo(harness.cwd())?; let call_id = "undo-create-file"; let patch = "*** Begin Patch\n*** Add File: new_file.txt\n+from turn\n*** End Patch"; run_apply_patch_turn(&harness, "create file", call_id, patch, "ok").await?; let new_path = harness.path("new_file.txt"); assert_eq!(fs::read_to_string(&new_path)?, "from turn\n"); let codex = Arc::clone(&harness.test().codex); let completed = expect_successful_undo(&codex).await?; assert!(completed.success, "undo failed: {:?}", completed.message); assert!(!new_path.exists()); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn undo_restores_tracked_file_edit() -> Result<()> { skip_if_no_network!(Ok(())); let harness = undo_harness().await?; init_git_repo(harness.cwd())?; let tracked = harness.path("tracked.txt"); fs::write(&tracked, "before\n")?; git(harness.cwd(), &["add", "tracked.txt"])?; git(harness.cwd(), &["commit", "-m", "track file"])?; let patch = "*** Begin Patch\n*** Update File: tracked.txt\n@@\n-before\n+after\n*** End Patch"; run_apply_patch_turn( &harness, "update tracked file", "undo-tracked-edit", patch, "done", ) .await?; println!( "apply_patch output: {}", harness.function_call_stdout("undo-tracked-edit").await ); assert_eq!(fs::read_to_string(&tracked)?, "after\n"); let codex = Arc::clone(&harness.test().codex); let completed = expect_successful_undo(&codex).await?; assert!(completed.success, "undo failed: {:?}", completed.message); assert_eq!(fs::read_to_string(&tracked)?, "before\n"); let status = git_output(harness.cwd(), &["status", "--short"])?; assert_eq!(status, ""); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn undo_restores_untracked_file_edit() -> Result<()> { skip_if_no_network!(Ok(())); let harness = undo_harness().await?; init_git_repo(harness.cwd())?; git(harness.cwd(), &["commit", "--allow-empty", "-m", "init"])?; let notes = harness.path("notes.txt"); fs::write(&notes, "original\n")?; let status_before = git_output(harness.cwd(), &["status", "--short", "--ignored"])?; assert!(status_before.contains("?? notes.txt")); let patch = "*** Begin Patch\n*** Update File: notes.txt\n@@\n-original\n+modified\n*** End Patch"; run_apply_patch_turn( &harness, "edit untracked", "undo-untracked-edit", patch, "done", ) .await?; assert_eq!(fs::read_to_string(&notes)?, "modified\n"); let codex = Arc::clone(&harness.test().codex); let completed = expect_successful_undo(&codex).await?; assert!(completed.success, "undo failed: {:?}", completed.message); assert_eq!(fs::read_to_string(&notes)?, "original\n"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn undo_reverts_only_latest_turn() -> Result<()> { skip_if_no_network!(Ok(())); let harness = undo_harness().await?; init_git_repo(harness.cwd())?; let call_id_one = "undo-turn-one"; let add_patch = "*** Begin Patch\n*** Add File: story.txt\n+first version\n*** End Patch"; run_apply_patch_turn(&harness, "create story", call_id_one, add_patch, "done").await?; let story = harness.path("story.txt"); assert_eq!(fs::read_to_string(&story)?, "first version\n"); let call_id_two = "undo-turn-two"; let update_patch = "*** Begin Patch\n*** Update File: story.txt\n@@\n-first version\n+second version\n*** End Patch"; run_apply_patch_turn(&harness, "revise story", call_id_two, update_patch, "done").await?; assert_eq!(fs::read_to_string(&story)?, "second version\n"); let codex = Arc::clone(&harness.test().codex); let completed = expect_successful_undo(&codex).await?; assert!(completed.success, "undo failed: {:?}", completed.message); assert_eq!(fs::read_to_string(&story)?, "first version\n"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn undo_does_not_touch_unrelated_files() -> Result<()> { skip_if_no_network!(Ok(())); let harness = undo_harness().await?; init_git_repo(harness.cwd())?; let tracked_constant = harness.path("stable.txt"); fs::write(&tracked_constant, "stable\n")?; let target = harness.path("target.txt"); fs::write(&target, "start\n")?; let gitignore = harness.path(".gitignore"); fs::write(&gitignore, "ignored-stable.log\n")?; git( harness.cwd(), &["add", "stable.txt", "target.txt", ".gitignore"], )?; git(harness.cwd(), &["commit", "-m", "seed tracked"])?; let preexisting_untracked = harness.path("scratch.txt"); fs::write(&preexisting_untracked, "scratch before\n")?; let ignored = harness.path("ignored-stable.log"); fs::write(&ignored, "ignored before\n")?; let full_patch = "*** Begin Patch\n*** Update File: target.txt\n@@\n-start\n+edited\n*** Add File: temp.txt\n+ephemeral\n*** End Patch"; run_apply_patch_turn( &harness, "modify target", "undo-unrelated", full_patch, "done", ) .await?; let temp = harness.path("temp.txt"); assert_eq!(fs::read_to_string(&target)?, "edited\n"); assert_eq!(fs::read_to_string(&temp)?, "ephemeral\n"); let codex = Arc::clone(&harness.test().codex); let completed = expect_successful_undo(&codex).await?; assert!(completed.success, "undo failed: {:?}", completed.message); assert_eq!(fs::read_to_string(&tracked_constant)?, "stable\n"); assert_eq!(fs::read_to_string(&target)?, "start\n"); assert_eq!( fs::read_to_string(&preexisting_untracked)?, "scratch before\n" ); assert_eq!(fs::read_to_string(&ignored)?, "ignored before\n"); assert!(!temp.exists()); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn undo_sequential_turns_consumes_snapshots() -> Result<()> { skip_if_no_network!(Ok(())); let harness = undo_harness().await?; init_git_repo(harness.cwd())?; let story = harness.path("story.txt"); fs::write(&story, "initial\n")?; git(harness.cwd(), &["add", "story.txt"])?; git(harness.cwd(), &["commit", "-m", "seed story"])?; run_apply_patch_turn( &harness, "first change", "seq-turn-1", "*** Begin Patch\n*** Update File: story.txt\n@@\n-initial\n+turn one\n*** End Patch", "ok", ) .await?; assert_eq!(fs::read_to_string(&story)?, "turn one\n"); run_apply_patch_turn( &harness, "second change", "seq-turn-2", "*** Begin Patch\n*** Update File: story.txt\n@@\n-turn one\n+turn two\n*** End Patch", "ok", ) .await?; assert_eq!(fs::read_to_string(&story)?, "turn two\n"); run_apply_patch_turn( &harness, "third change", "seq-turn-3", "*** Begin Patch\n*** Update File: story.txt\n@@\n-turn two\n+turn three\n*** End Patch", "ok", ) .await?; assert_eq!(fs::read_to_string(&story)?, "turn three\n"); let codex = Arc::clone(&harness.test().codex); expect_successful_undo(&codex).await?; assert_eq!(fs::read_to_string(&story)?, "turn two\n"); expect_successful_undo(&codex).await?; assert_eq!(fs::read_to_string(&story)?, "turn one\n"); expect_successful_undo(&codex).await?; assert_eq!(fs::read_to_string(&story)?, "initial\n"); expect_failed_undo(&codex).await?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn undo_without_snapshot_reports_failure() -> Result<()> { skip_if_no_network!(Ok(())); let harness = undo_harness().await?; let codex = Arc::clone(&harness.test().codex); expect_failed_undo(&codex).await?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn undo_restores_moves_and_renames() -> Result<()> { skip_if_no_network!(Ok(())); let harness = undo_harness().await?; init_git_repo(harness.cwd())?; let source = harness.path("rename_me.txt"); fs::write(&source, "original\n")?; git(harness.cwd(), &["add", "rename_me.txt"])?; git(harness.cwd(), &["commit", "-m", "add rename target"])?; let patch = "*** Begin Patch\n*** Update File: rename_me.txt\n*** Move to: relocated/renamed.txt\n@@\n-original\n+renamed content\n*** End Patch"; run_apply_patch_turn(&harness, "rename file", "undo-rename", patch, "done").await?; let destination = harness.path("relocated/renamed.txt"); assert!(!source.exists()); assert_eq!(fs::read_to_string(&destination)?, "renamed content\n"); let codex = Arc::clone(&harness.test().codex); expect_successful_undo(&codex).await?; assert_eq!(fs::read_to_string(&source)?, "original\n"); assert!(!destination.exists()); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn undo_does_not_touch_ignored_directory_contents() -> Result<()> { skip_if_no_network!(Ok(())); let harness = undo_harness().await?; init_git_repo(harness.cwd())?; let gitignore = harness.path(".gitignore"); fs::write(&gitignore, "logs/\n")?; git(harness.cwd(), &["add", ".gitignore"])?; git(harness.cwd(), &["commit", "-m", "ignore logs directory"])?; let logs_dir = harness.path("logs"); fs::create_dir_all(&logs_dir)?; let preserved = logs_dir.join("persistent.log"); fs::write(&preserved, "keep me\n")?; run_apply_patch_turn( &harness, "write log", "undo-log", "*** Begin Patch\n*** Add File: logs/session.log\n+ephemeral log\n*** End Patch", "ok", ) .await?; let new_log = logs_dir.join("session.log"); assert_eq!(fs::read_to_string(&new_log)?, "ephemeral log\n"); let codex = Arc::clone(&harness.test().codex); expect_successful_undo(&codex).await?; assert!(new_log.exists()); assert_eq!(fs::read_to_string(&preserved)?, "keep me\n"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn undo_overwrites_manual_edits_after_turn() -> Result<()> { skip_if_no_network!(Ok(())); let harness = undo_harness().await?; init_git_repo(harness.cwd())?; let tracked = harness.path("tracked.txt"); fs::write(&tracked, "baseline\n")?; git(harness.cwd(), &["add", "tracked.txt"])?; git(harness.cwd(), &["commit", "-m", "baseline tracked"])?; run_apply_patch_turn( &harness, "modify tracked", "undo-manual-overwrite", "*** Begin Patch\n*** Update File: tracked.txt\n@@\n-baseline\n+turn change\n*** End Patch", "ok", ) .await?; assert_eq!(fs::read_to_string(&tracked)?, "turn change\n"); fs::write(&tracked, "manual edit\n")?; assert_eq!(fs::read_to_string(&tracked)?, "manual edit\n"); let codex = Arc::clone(&harness.test().codex); expect_successful_undo(&codex).await?; assert_eq!(fs::read_to_string(&tracked)?, "baseline\n"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn undo_preserves_unrelated_staged_changes() -> Result<()> { skip_if_no_network!(Ok(())); let harness = undo_harness().await?; init_git_repo(harness.cwd())?; // create a file for user to mess with let user_file = harness.path("user_file.txt"); fs::write(&user_file, "user content v1\n")?; git(harness.cwd(), &["add", "user_file.txt"])?; git(harness.cwd(), &["commit", "-m", "add user file"])?; // AI turn: modifies a DIFFERENT file (creating ghost commit of baseline) let ai_file = harness.path("ai_file.txt"); fs::write(&ai_file, "ai content v1\n")?; git(harness.cwd(), &["add", "ai_file.txt"])?; git(harness.cwd(), &["commit", "-m", "add ai file"])?; // baseline let patch = "*** Begin Patch\n*** Update File: ai_file.txt\n@@\n-ai content v1\n+ai content v2\n*** End Patch"; run_apply_patch_turn(&harness, "modify ai file", "undo-staging-test", patch, "ok").await?; assert_eq!(fs::read_to_string(&ai_file)?, "ai content v2\n"); // NOW: User modifies user_file AND stages it fs::write(&user_file, "user content v2 (staged)\n")?; git(harness.cwd(), &["add", "user_file.txt"])?; // Verify status before undo let status_before = git_output(harness.cwd(), &["status", "--porcelain"])?; assert!(status_before.contains("M user_file.txt")); // M in index // UNDO let codex = Arc::clone(&harness.test().codex); // checks that undo succeeded expect_successful_undo(&codex).await?; // AI file should be reverted assert_eq!(fs::read_to_string(&ai_file)?, "ai content v1\n"); // User file should STILL be staged with v2 let status_after = git_output(harness.cwd(), &["status", "--porcelain"])?; // We expect 'M' in the first column (index modified). // The second column will likely be 'M' because the worktree was reverted to v1 while index has v2. // So "MM user_file.txt" is expected. if !status_after.contains("MM user_file.txt") && !status_after.contains("M user_file.txt") { bail!("Status should contain staged change (M in first col), but was: '{status_after}'"); } // Disk content is reverted to v1 (snapshot state) assert_eq!(fs::read_to_string(&user_file)?, "user content v1\n"); // But we can get v2 back from index git(harness.cwd(), &["checkout", "user_file.txt"])?; assert_eq!( fs::read_to_string(&user_file)?, "user content v2 (staged)\n" ); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/cli_stream.rs
codex-rs/core/tests/suite/cli_stream.rs
use assert_cmd::Command as AssertCommand; use codex_core::RolloutRecorder; use codex_core::protocol::GitInfo; use core_test_support::fs_wait; use core_test_support::skip_if_no_network; use std::time::Duration; use tempfile::TempDir; use uuid::Uuid; use wiremock::Mock; use wiremock::MockServer; use wiremock::ResponseTemplate; use wiremock::matchers::method; use wiremock::matchers::path; /// Tests streaming chat completions through the CLI using a mock server. /// This test: /// 1. Sets up a mock server that simulates OpenAI's chat completions API /// 2. Configures codex to use this mock server via a custom provider /// 3. Sends a simple "hello?" prompt and verifies the streamed response /// 4. Ensures the response is received exactly once and contains "hi" #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn chat_mode_stream_cli() { skip_if_no_network!(); let server = MockServer::start().await; let sse = concat!( "data: {\"choices\":[{\"delta\":{\"content\":\"hi\"}}]}\n\n", "data: {\"choices\":[{\"delta\":{}}]}\n\n", "data: [DONE]\n\n" ); Mock::given(method("POST")) .and(path("/v1/chat/completions")) .respond_with( ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_raw(sse, "text/event-stream"), ) .expect(1) .mount(&server) .await; let home = TempDir::new().unwrap(); let provider_override = format!( "model_providers.mock={{ name = \"mock\", base_url = \"{}/v1\", env_key = \"PATH\", wire_api = \"chat\" }}", server.uri() ); let bin = codex_utils_cargo_bin::cargo_bin("codex").unwrap(); let mut cmd = AssertCommand::new(bin); cmd.arg("exec") .arg("--skip-git-repo-check") .arg("-c") .arg(&provider_override) .arg("-c") .arg("model_provider=\"mock\"") .arg("-C") .arg(env!("CARGO_MANIFEST_DIR")) .arg("hello?"); cmd.env("CODEX_HOME", home.path()) .env("OPENAI_API_KEY", "dummy") .env("OPENAI_BASE_URL", format!("{}/v1", server.uri())); let output = cmd.output().unwrap(); println!("Status: {}", output.status); println!("Stdout:\n{}", String::from_utf8_lossy(&output.stdout)); println!("Stderr:\n{}", String::from_utf8_lossy(&output.stderr)); assert!(output.status.success()); let stdout = String::from_utf8_lossy(&output.stdout); let hi_lines = stdout.lines().filter(|line| line.trim() == "hi").count(); assert_eq!(hi_lines, 1, "Expected exactly one line with 'hi'"); server.verify().await; // Verify a new session rollout was created and is discoverable via list_conversations let provider_filter = vec!["mock".to_string()]; let page = RolloutRecorder::list_conversations( home.path(), 10, None, &[], Some(provider_filter.as_slice()), "mock", ) .await .expect("list conversations"); assert!( !page.items.is_empty(), "expected at least one session to be listed" ); // First line of head must be the SessionMeta payload (id/timestamp) let head0 = page.items[0].head.first().expect("missing head record"); assert!(head0.get("id").is_some(), "head[0] missing id"); assert!( head0.get("timestamp").is_some(), "head[0] missing timestamp" ); } /// Verify that passing `-c experimental_instructions_file=...` to the CLI /// overrides the built-in base instructions by inspecting the request body /// received by a mock OpenAI Responses endpoint. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn exec_cli_applies_experimental_instructions_file() { skip_if_no_network!(); // Start mock server which will capture the request and return a minimal // SSE stream for a single turn. let server = MockServer::start().await; let sse = concat!( "data: {\"type\":\"response.created\",\"response\":{}}\n\n", "data: {\"type\":\"response.completed\",\"response\":{\"id\":\"r1\"}}\n\n" ); let resp_mock = core_test_support::responses::mount_sse_once(&server, sse.to_string()).await; // Create a temporary instructions file with a unique marker we can assert // appears in the outbound request payload. let custom = TempDir::new().unwrap(); let marker = "cli-experimental-instructions-marker"; let custom_path = custom.path().join("instr.md"); std::fs::write(&custom_path, marker).unwrap(); let custom_path_str = custom_path.to_string_lossy().replace('\\', "/"); // Build a provider override that points at the mock server and instructs // Codex to use the Responses API with the dummy env var. let provider_override = format!( "model_providers.mock={{ name = \"mock\", base_url = \"{}/v1\", env_key = \"PATH\", wire_api = \"responses\" }}", server.uri() ); let home = TempDir::new().unwrap(); let bin = codex_utils_cargo_bin::cargo_bin("codex").unwrap(); let mut cmd = AssertCommand::new(bin); cmd.arg("exec") .arg("--skip-git-repo-check") .arg("-c") .arg(&provider_override) .arg("-c") .arg("model_provider=\"mock\"") .arg("-c") .arg(format!( "experimental_instructions_file=\"{custom_path_str}\"" )) .arg("-C") .arg(env!("CARGO_MANIFEST_DIR")) .arg("hello?\n"); cmd.env("CODEX_HOME", home.path()) .env("OPENAI_API_KEY", "dummy") .env("OPENAI_BASE_URL", format!("{}/v1", server.uri())); let output = cmd.output().unwrap(); println!("Status: {}", output.status); println!("Stdout:\n{}", String::from_utf8_lossy(&output.stdout)); println!("Stderr:\n{}", String::from_utf8_lossy(&output.stderr)); assert!(output.status.success()); // Inspect the captured request and verify our custom base instructions were // included in the `instructions` field. let request = resp_mock.single_request(); let body = request.body_json(); let instructions = body .get("instructions") .and_then(|v| v.as_str()) .unwrap_or_default() .to_string(); assert!( instructions.contains(marker), "instructions did not contain custom marker; got: {instructions}" ); } /// Tests streaming responses through the CLI using a local SSE fixture file. /// This test: /// 1. Uses a pre-recorded SSE response fixture instead of a live server /// 2. Configures codex to read from this fixture via CODEX_RS_SSE_FIXTURE env var /// 3. Sends a "hello?" prompt and verifies the response /// 4. Ensures the fixture content is correctly streamed through the CLI #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn responses_api_stream_cli() { skip_if_no_network!(); let fixture = std::path::Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/cli_responses_fixture.sse"); let home = TempDir::new().unwrap(); let bin = codex_utils_cargo_bin::cargo_bin("codex").unwrap(); let mut cmd = AssertCommand::new(bin); cmd.arg("exec") .arg("--skip-git-repo-check") .arg("-C") .arg(env!("CARGO_MANIFEST_DIR")) .arg("hello?"); cmd.env("CODEX_HOME", home.path()) .env("OPENAI_API_KEY", "dummy") .env("CODEX_RS_SSE_FIXTURE", fixture) .env("OPENAI_BASE_URL", "http://unused.local"); let output = cmd.output().unwrap(); assert!(output.status.success()); let stdout = String::from_utf8_lossy(&output.stdout); assert!(stdout.contains("fixture hello")); } /// End-to-end: create a session (writes rollout), verify the file, then resume and confirm append. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn integration_creates_and_checks_session_file() -> anyhow::Result<()> { // Honor sandbox network restrictions for CI parity with the other tests. skip_if_no_network!(Ok(())); // 1. Temp home so we read/write isolated session files. let home = TempDir::new()?; // 2. Unique marker we'll look for in the session log. let marker = format!("integration-test-{}", Uuid::new_v4()); let prompt = format!("echo {marker}"); // 3. Use the same offline SSE fixture as responses_api_stream_cli so the test is hermetic. let fixture = std::path::Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/cli_responses_fixture.sse"); // 4. Run the codex CLI and invoke `exec`, which is what records a session. let bin = codex_utils_cargo_bin::cargo_bin("codex").unwrap(); let mut cmd = AssertCommand::new(bin); cmd.arg("exec") .arg("--skip-git-repo-check") .arg("-C") .arg(env!("CARGO_MANIFEST_DIR")) .arg(&prompt); cmd.env("CODEX_HOME", home.path()) .env("OPENAI_API_KEY", "dummy") .env("CODEX_RS_SSE_FIXTURE", &fixture) // Required for CLI arg parsing even though fixture short-circuits network usage. .env("OPENAI_BASE_URL", "http://unused.local"); let output = cmd.output().unwrap(); assert!( output.status.success(), "codex-cli exec failed: {}", String::from_utf8_lossy(&output.stderr) ); // Wait for sessions dir to appear. let sessions_dir = home.path().join("sessions"); fs_wait::wait_for_path_exists(&sessions_dir, Duration::from_secs(5)).await?; // Find the session file that contains `marker`. let marker_clone = marker.clone(); let path = fs_wait::wait_for_matching_file(&sessions_dir, Duration::from_secs(10), move |p| { if p.extension().and_then(|ext| ext.to_str()) != Some("jsonl") { return false; } let Ok(content) = std::fs::read_to_string(p) else { return false; }; content.contains(&marker_clone) }) .await?; // Basic sanity checks on location and metadata. let rel = match path.strip_prefix(&sessions_dir) { Ok(r) => r, Err(_) => panic!("session file should live under sessions/"), }; let comps: Vec<String> = rel .components() .map(|c| c.as_os_str().to_string_lossy().into_owned()) .collect(); assert_eq!( comps.len(), 4, "Expected sessions/YYYY/MM/DD/<file>, got {rel:?}" ); let year = &comps[0]; let month = &comps[1]; let day = &comps[2]; assert!( year.len() == 4 && year.chars().all(|c| c.is_ascii_digit()), "Year dir not 4-digit numeric: {year}" ); assert!( month.len() == 2 && month.chars().all(|c| c.is_ascii_digit()), "Month dir not zero-padded 2-digit numeric: {month}" ); assert!( day.len() == 2 && day.chars().all(|c| c.is_ascii_digit()), "Day dir not zero-padded 2-digit numeric: {day}" ); if let Ok(m) = month.parse::<u8>() { assert!((1..=12).contains(&m), "Month out of range: {m}"); } if let Ok(d) = day.parse::<u8>() { assert!((1..=31).contains(&d), "Day out of range: {d}"); } let content = std::fs::read_to_string(&path).unwrap_or_else(|_| panic!("Failed to read session file")); let mut lines = content.lines(); let meta_line = lines .next() .ok_or("missing session meta line") .unwrap_or_else(|_| panic!("missing session meta line")); let meta: serde_json::Value = serde_json::from_str(meta_line) .unwrap_or_else(|_| panic!("Failed to parse session meta line as JSON")); assert_eq!( meta.get("type").and_then(|v| v.as_str()), Some("session_meta") ); let payload = meta .get("payload") .unwrap_or_else(|| panic!("Missing payload in meta line")); assert!(payload.get("id").is_some(), "SessionMeta missing id"); assert!( payload.get("timestamp").is_some(), "SessionMeta missing timestamp" ); let mut found_message = false; for line in lines { if line.trim().is_empty() { continue; } let Ok(item) = serde_json::from_str::<serde_json::Value>(line) else { continue; }; if item.get("type").and_then(|t| t.as_str()) == Some("response_item") && let Some(payload) = item.get("payload") && payload.get("type").and_then(|t| t.as_str()) == Some("message") && let Some(c) = payload.get("content") && c.to_string().contains(&marker) { found_message = true; break; } } assert!( found_message, "No message found in session file containing the marker" ); // Second run: resume should update the existing file. let marker2 = format!("integration-resume-{}", Uuid::new_v4()); let prompt2 = format!("echo {marker2}"); let bin2 = codex_utils_cargo_bin::cargo_bin("codex").unwrap(); let mut cmd2 = AssertCommand::new(bin2); cmd2.arg("exec") .arg("--skip-git-repo-check") .arg("-C") .arg(env!("CARGO_MANIFEST_DIR")) .arg(&prompt2) .arg("resume") .arg("--last"); cmd2.env("CODEX_HOME", home.path()) .env("OPENAI_API_KEY", "dummy") .env("CODEX_RS_SSE_FIXTURE", &fixture) .env("OPENAI_BASE_URL", "http://unused.local"); let output2 = cmd2.output().unwrap(); assert!(output2.status.success(), "resume codex-cli run failed"); // Find the new session file containing the resumed marker. let marker2_clone = marker2.clone(); let resumed_path = fs_wait::wait_for_matching_file(&sessions_dir, Duration::from_secs(10), move |p| { if p.extension().and_then(|ext| ext.to_str()) != Some("jsonl") { return false; } std::fs::read_to_string(p) .map(|content| content.contains(&marker2_clone)) .unwrap_or(false) }) .await?; // Resume should write to the existing log file. assert_eq!( resumed_path, path, "resume should create a new session file" ); let resumed_content = std::fs::read_to_string(&resumed_path)?; assert!( resumed_content.contains(&marker), "resumed file missing original marker" ); assert!( resumed_content.contains(&marker2), "resumed file missing resumed marker" ); Ok(()) } /// Integration test to verify git info is collected and recorded in session files. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn integration_git_info_unit_test() { // This test verifies git info collection works independently // without depending on the full CLI integration // 1. Create temp directory for git repo let temp_dir = TempDir::new().unwrap(); let git_repo = temp_dir.path().to_path_buf(); let envs = vec![ ("GIT_CONFIG_GLOBAL", "/dev/null"), ("GIT_CONFIG_NOSYSTEM", "1"), ]; // 2. Initialize a git repository with some content let init_output = std::process::Command::new("git") .envs(envs.clone()) .args(["init"]) .current_dir(&git_repo) .output() .unwrap(); assert!(init_output.status.success(), "git init failed"); // Configure git user (required for commits) std::process::Command::new("git") .envs(envs.clone()) .args(["config", "user.name", "Integration Test"]) .current_dir(&git_repo) .output() .unwrap(); std::process::Command::new("git") .envs(envs.clone()) .args(["config", "user.email", "test@example.com"]) .current_dir(&git_repo) .output() .unwrap(); // Create a test file and commit it let test_file = git_repo.join("test.txt"); std::fs::write(&test_file, "integration test content").unwrap(); std::process::Command::new("git") .envs(envs.clone()) .args(["add", "."]) .current_dir(&git_repo) .output() .unwrap(); let commit_output = std::process::Command::new("git") .envs(envs.clone()) .args(["commit", "-m", "Integration test commit"]) .current_dir(&git_repo) .output() .unwrap(); assert!(commit_output.status.success(), "git commit failed"); // Create a branch to test branch detection std::process::Command::new("git") .envs(envs.clone()) .args(["checkout", "-b", "integration-test-branch"]) .current_dir(&git_repo) .output() .unwrap(); // Add a remote to test repository URL detection std::process::Command::new("git") .envs(envs.clone()) .args([ "remote", "add", "origin", "https://github.com/example/integration-test.git", ]) .current_dir(&git_repo) .output() .unwrap(); // 3. Test git info collection directly let git_info = codex_core::git_info::collect_git_info(&git_repo).await; // 4. Verify git info is present and contains expected data assert!(git_info.is_some(), "Git info should be collected"); let git_info = git_info.unwrap(); // Check that we have a commit hash assert!( git_info.commit_hash.is_some(), "Git info should contain commit_hash" ); let commit_hash = git_info.commit_hash.as_ref().unwrap(); assert_eq!(commit_hash.len(), 40, "Commit hash should be 40 characters"); assert!( commit_hash.chars().all(|c| c.is_ascii_hexdigit()), "Commit hash should be hexadecimal" ); // Check that we have the correct branch assert!(git_info.branch.is_some(), "Git info should contain branch"); let branch = git_info.branch.as_ref().unwrap(); assert_eq!( branch, "integration-test-branch", "Branch should match what we created" ); // Check that we have the repository URL assert!( git_info.repository_url.is_some(), "Git info should contain repository_url" ); let repo_url = git_info.repository_url.as_ref().unwrap(); // Some hosts rewrite remotes (e.g., github.com → git@github.com), so assert against // the actual remote reported by git instead of a static URL. let expected_remote_url = std::process::Command::new("git") .args(["remote", "get-url", "origin"]) .current_dir(&git_repo) .output() .unwrap(); let expected_remote_url = String::from_utf8(expected_remote_url.stdout) .unwrap() .trim() .to_string(); assert_eq!( repo_url, &expected_remote_url, "Repository URL should match git remote get-url output" ); println!("✅ Git info collection test passed!"); println!(" Commit: {commit_hash}"); println!(" Branch: {branch}"); println!(" Repo: {repo_url}"); // 5. Test serialization to ensure it works in SessionMeta let serialized = serde_json::to_string(&git_info).unwrap(); let deserialized: GitInfo = serde_json::from_str(&serialized).unwrap(); assert_eq!(git_info.commit_hash, deserialized.commit_hash); assert_eq!(git_info.branch, deserialized.branch); assert_eq!(git_info.repository_url, deserialized.repository_url); println!("✅ Git info serialization test passed!"); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/mod.rs
codex-rs/core/tests/suite/mod.rs
// Aggregates all former standalone integration tests as modules. use codex_arg0::arg0_dispatch; use ctor::ctor; use tempfile::TempDir; // This code runs before any other tests are run. // It allows the test binary to behave like codex and dispatch to apply_patch and codex-linux-sandbox // based on the arg0. // NOTE: this doesn't work on ARM #[ctor] pub static CODEX_ALIASES_TEMP_DIR: TempDir = unsafe { #[allow(clippy::unwrap_used)] arg0_dispatch().unwrap() }; #[cfg(not(target_os = "windows"))] mod abort_tasks; mod apply_patch_cli; #[cfg(not(target_os = "windows"))] mod approvals; mod auth_refresh; mod cli_stream; mod client; mod codex_delegate; mod compact; mod compact_remote; mod compact_resume_fork; mod deprecation_notice; mod exec; mod exec_policy; mod fork_conversation; mod grep_files; mod items; mod json_result; mod list_dir; mod list_models; mod live_cli; mod model_overrides; mod model_tools; mod models_etag_responses; mod otel; mod prompt_caching; mod quota_exceeded; mod read_file; mod remote_models; mod resume; mod resume_warning; mod review; mod rmcp_client; mod rollout_list_find; mod seatbelt; mod shell_command; mod shell_serialization; mod shell_snapshot; mod skills; mod stream_error_allows_next_turn; mod stream_no_completed; mod text_encoding_fix; mod tool_harness; mod tool_parallelism; mod tools; mod truncation; mod undo; mod unified_exec; mod user_notification; mod user_shell_cmd; mod view_image;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/prompt_caching.rs
codex-rs/core/tests/suite/prompt_caching.rs
#![allow(clippy::unwrap_used)] use codex_core::features::Feature; use codex_core::protocol::AskForApproval; use codex_core::protocol::ENVIRONMENT_CONTEXT_OPEN_TAG; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_core::protocol_config_types::ReasoningSummary; use codex_core::shell::Shell; use codex_core::shell::default_user_shell; use codex_protocol::openai_models::ReasoningEffort; use codex_protocol::user_input::UserInput; use codex_utils_absolute_path::AbsolutePathBuf; use core_test_support::load_sse_fixture_with_id; use core_test_support::responses::mount_sse_once; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::TestCodex; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use tempfile::TempDir; fn text_user_input(text: String) -> serde_json::Value { serde_json::json!({ "type": "message", "role": "user", "content": [ { "type": "input_text", "text": text } ] }) } fn default_env_context_str(cwd: &str, shell: &Shell) -> String { let shell_name = shell.name(); format!( r#"<environment_context> <cwd>{cwd}</cwd> <approval_policy>on-request</approval_policy> <sandbox_mode>read-only</sandbox_mode> <network_access>restricted</network_access> <shell>{shell_name}</shell> </environment_context>"# ) } /// Build minimal SSE stream with completed marker using the JSON fixture. fn sse_completed(id: &str) -> String { load_sse_fixture_with_id("tests/fixtures/completed_template.json", id) } fn assert_tool_names(body: &serde_json::Value, expected_names: &[&str]) { assert_eq!( body["tools"] .as_array() .unwrap() .iter() .map(|t| t["name"].as_str().unwrap().to_string()) .collect::<Vec<_>>(), expected_names ); } fn normalize_newlines(text: &str) -> String { text.replace("\r\n", "\n") } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn prompt_tools_are_consistent_across_requests() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); use pretty_assertions::assert_eq; let server = start_mock_server().await; let req1 = mount_sse_once(&server, sse_completed("resp-1")).await; let req2 = mount_sse_once(&server, sse_completed("resp-2")).await; let TestCodex { codex, config, conversation_manager, .. } = test_codex() .with_config(|config| { config.user_instructions = Some("be consistent and helpful".to_string()); config.model = Some("gpt-5.1-codex-max".to_string()); }) .build(&server) .await?; let base_instructions = conversation_manager .get_models_manager() .construct_model_family( config .model .as_deref() .expect("test config should have a model"), &config, ) .await .base_instructions .clone(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello 1".into(), }], }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello 2".into(), }], }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let expected_tools_names = vec![ "shell_command", "list_mcp_resources", "list_mcp_resource_templates", "read_mcp_resource", "update_plan", "apply_patch", "view_image", ]; let body0 = req1.single_request().body_json(); let expected_instructions = if expected_tools_names.contains(&"apply_patch") { base_instructions } else { [ base_instructions.clone(), include_str!("../../../apply-patch/apply_patch_tool_instructions.md").to_string(), ] .join("\n") }; assert_eq!( body0["instructions"], serde_json::json!(expected_instructions), ); assert_tool_names(&body0, &expected_tools_names); let body1 = req2.single_request().body_json(); assert_eq!( body1["instructions"], serde_json::json!(expected_instructions), ); assert_tool_names(&body1, &expected_tools_names); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn codex_mini_latest_tools() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); use pretty_assertions::assert_eq; let server = start_mock_server().await; let req1 = mount_sse_once(&server, sse_completed("resp-1")).await; let req2 = mount_sse_once(&server, sse_completed("resp-2")).await; let TestCodex { codex, .. } = test_codex() .with_config(|config| { config.user_instructions = Some("be consistent and helpful".to_string()); config.features.disable(Feature::ApplyPatchFreeform); config.model = Some("codex-mini-latest".to_string()); }) .build(&server) .await?; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello 1".into(), }], }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello 2".into(), }], }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let expected_instructions = [ include_str!("../../prompt.md"), include_str!("../../../apply-patch/apply_patch_tool_instructions.md"), ] .join("\n"); let body0 = req1.single_request().body_json(); let instructions0 = body0["instructions"] .as_str() .expect("instructions should be a string"); assert_eq!( normalize_newlines(instructions0), normalize_newlines(&expected_instructions) ); let body1 = req2.single_request().body_json(); let instructions1 = body1["instructions"] .as_str() .expect("instructions should be a string"); assert_eq!( normalize_newlines(instructions1), normalize_newlines(&expected_instructions) ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn prefixes_context_and_instructions_once_and_consistently_across_requests() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); use pretty_assertions::assert_eq; let server = start_mock_server().await; let req1 = mount_sse_once(&server, sse_completed("resp-1")).await; let req2 = mount_sse_once(&server, sse_completed("resp-2")).await; let TestCodex { codex, config, .. } = test_codex() .with_config(|config| { config.user_instructions = Some("be consistent and helpful".to_string()); }) .build(&server) .await?; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello 1".into(), }], }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello 2".into(), }], }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let body1 = req1.single_request().body_json(); let input1 = body1["input"].as_array().expect("input array"); assert_eq!(input1.len(), 3, "expected cached prefix + env + user msg"); let ui_text = input1[0]["content"][0]["text"] .as_str() .expect("ui message text"); assert!( ui_text.contains("be consistent and helpful"), "expected user instructions in UI message: {ui_text}" ); let shell = default_user_shell(); let cwd_str = config.cwd.to_string_lossy(); let expected_env_text = default_env_context_str(&cwd_str, &shell); assert_eq!( input1[1], text_user_input(expected_env_text), "expected environment context after UI message" ); assert_eq!(input1[2], text_user_input("hello 1".to_string())); let body2 = req2.single_request().body_json(); let input2 = body2["input"].as_array().expect("input array"); assert_eq!( &input2[..input1.len()], input1.as_slice(), "expected cached prefix to be reused" ); assert_eq!(input2[input1.len()], text_user_input("hello 2".to_string())); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); use pretty_assertions::assert_eq; let server = start_mock_server().await; let req1 = mount_sse_once(&server, sse_completed("resp-1")).await; let req2 = mount_sse_once(&server, sse_completed("resp-2")).await; let TestCodex { codex, .. } = test_codex() .with_config(|config| { config.user_instructions = Some("be consistent and helpful".to_string()); }) .build(&server) .await?; // First turn codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello 1".into(), }], }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let writable = TempDir::new().unwrap(); codex .submit(Op::OverrideTurnContext { cwd: None, approval_policy: Some(AskForApproval::Never), sandbox_policy: Some(SandboxPolicy::WorkspaceWrite { writable_roots: vec![writable.path().try_into().unwrap()], network_access: true, exclude_tmpdir_env_var: true, exclude_slash_tmp: true, }), model: Some("o3".to_string()), effort: Some(Some(ReasoningEffort::High)), summary: Some(ReasoningSummary::Detailed), }) .await?; // Second turn after overrides codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello 2".into(), }], }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let body1 = req1.single_request().body_json(); let body2 = req2.single_request().body_json(); // prompt_cache_key should remain constant across overrides assert_eq!( body1["prompt_cache_key"], body2["prompt_cache_key"], "prompt_cache_key should not change across overrides" ); // The entire prefix from the first request should be identical and reused // as the prefix of the second request, ensuring cache hit potential. let expected_user_message_2 = serde_json::json!({ "type": "message", "role": "user", "content": [ { "type": "input_text", "text": "hello 2" } ] }); // After overriding the turn context, the environment context should be emitted again // reflecting the new approval policy and sandbox settings. Omit cwd because it did // not change. let shell = default_user_shell(); let expected_env_text_2 = format!( r#"<environment_context> <approval_policy>never</approval_policy> <sandbox_mode>workspace-write</sandbox_mode> <network_access>enabled</network_access> <writable_roots> <root>{}</root> </writable_roots> <shell>{}</shell> </environment_context>"#, writable.path().display(), shell.name() ); let expected_env_msg_2 = serde_json::json!({ "type": "message", "role": "user", "content": [ { "type": "input_text", "text": expected_env_text_2 } ] }); let expected_body2 = serde_json::json!( [ body1["input"].as_array().unwrap().as_slice(), [expected_env_msg_2, expected_user_message_2].as_slice(), ] .concat() ); assert_eq!(body2["input"], expected_body2); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn override_before_first_turn_emits_environment_context() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let req = mount_sse_once(&server, sse_completed("resp-1")).await; let TestCodex { codex, .. } = test_codex().build(&server).await?; codex .submit(Op::OverrideTurnContext { cwd: None, approval_policy: Some(AskForApproval::Never), sandbox_policy: None, model: None, effort: None, summary: None, }) .await?; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "first message".into(), }], }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let body = req.single_request().body_json(); let input = body["input"] .as_array() .expect("input array must be present"); assert!( !input.is_empty(), "expected at least environment context and user message" ); let env_texts: Vec<&str> = input .iter() .filter_map(|msg| { msg["content"] .as_array() .and_then(|content| content.first()) .and_then(|item| item["text"].as_str()) }) .filter(|text| text.starts_with(ENVIRONMENT_CONTEXT_OPEN_TAG)) .collect(); assert!( env_texts .iter() .any(|text| text.contains("<approval_policy>never</approval_policy>")), "environment context should reflect overridden approval policy: {env_texts:?}" ); let env_count = input .iter() .filter(|msg| { msg["content"] .as_array() .and_then(|content| { content.iter().find(|item| { item["type"].as_str() == Some("input_text") && item["text"] .as_str() .map(|text| text.starts_with(ENVIRONMENT_CONTEXT_OPEN_TAG)) .unwrap_or(false) }) }) .is_some() }) .count(); assert_eq!( env_count, 2, "environment context should appear exactly twice, found {env_count}" ); let user_texts: Vec<&str> = input .iter() .filter_map(|msg| { msg["content"] .as_array() .and_then(|content| content.first()) .and_then(|item| item["text"].as_str()) }) .collect(); assert!( user_texts.contains(&"first message"), "expected user message text, got {user_texts:?}" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn per_turn_overrides_keep_cached_prefix_and_key_constant() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); use pretty_assertions::assert_eq; let server = start_mock_server().await; let req1 = mount_sse_once(&server, sse_completed("resp-1")).await; let req2 = mount_sse_once(&server, sse_completed("resp-2")).await; let TestCodex { codex, .. } = test_codex() .with_config(|config| { config.user_instructions = Some("be consistent and helpful".to_string()); }) .build(&server) .await?; // First turn codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello 1".into(), }], }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; // Second turn using per-turn overrides via UserTurn let new_cwd = TempDir::new().unwrap(); let writable = TempDir::new().unwrap(); codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "hello 2".into(), }], cwd: new_cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::WorkspaceWrite { writable_roots: vec![AbsolutePathBuf::try_from(writable.path()).unwrap()], network_access: true, exclude_tmpdir_env_var: true, exclude_slash_tmp: true, }, model: "o3".to_string(), effort: Some(ReasoningEffort::High), summary: ReasoningSummary::Detailed, final_output_json_schema: None, }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let body1 = req1.single_request().body_json(); let body2 = req2.single_request().body_json(); // prompt_cache_key should remain constant across per-turn overrides assert_eq!( body1["prompt_cache_key"], body2["prompt_cache_key"], "prompt_cache_key should not change across per-turn overrides" ); // The entire prefix from the first request should be identical and reused // as the prefix of the second request. let expected_user_message_2 = serde_json::json!({ "type": "message", "role": "user", "content": [ { "type": "input_text", "text": "hello 2" } ] }); let shell = default_user_shell(); let expected_env_text_2 = format!( r#"<environment_context> <cwd>{}</cwd> <approval_policy>never</approval_policy> <sandbox_mode>workspace-write</sandbox_mode> <network_access>enabled</network_access> <writable_roots> <root>{}</root> </writable_roots> <shell>{}</shell> </environment_context>"#, new_cwd.path().display(), writable.path().display(), shell.name(), ); let expected_env_msg_2 = serde_json::json!({ "type": "message", "role": "user", "content": [ { "type": "input_text", "text": expected_env_text_2 } ] }); let expected_body2 = serde_json::json!( [ body1["input"].as_array().unwrap().as_slice(), [expected_env_msg_2, expected_user_message_2].as_slice(), ] .concat() ); assert_eq!(body2["input"], expected_body2); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); use pretty_assertions::assert_eq; let server = start_mock_server().await; let req1 = mount_sse_once(&server, sse_completed("resp-1")).await; let req2 = mount_sse_once(&server, sse_completed("resp-2")).await; let TestCodex { codex, config, session_configured, .. } = test_codex() .with_config(|config| { config.user_instructions = Some("be consistent and helpful".to_string()); }) .build(&server) .await?; let default_cwd = config.cwd.clone(); let default_approval_policy = config.approval_policy.value(); let default_sandbox_policy = config.sandbox_policy.get(); let default_model = session_configured.model; let default_effort = config.model_reasoning_effort; let default_summary = config.model_reasoning_summary; codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "hello 1".into(), }], cwd: default_cwd.clone(), approval_policy: default_approval_policy, sandbox_policy: default_sandbox_policy.clone(), model: default_model.clone(), effort: default_effort, summary: default_summary, final_output_json_schema: None, }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "hello 2".into(), }], cwd: default_cwd.clone(), approval_policy: default_approval_policy, sandbox_policy: default_sandbox_policy.clone(), model: default_model.clone(), effort: default_effort, summary: default_summary, final_output_json_schema: None, }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let body1 = req1.single_request().body_json(); let body2 = req2.single_request().body_json(); let expected_ui_msg = body1["input"][0].clone(); let shell = default_user_shell(); let default_cwd_lossy = default_cwd.to_string_lossy(); let expected_env_msg_1 = text_user_input(default_env_context_str(&default_cwd_lossy, &shell)); let expected_user_message_1 = text_user_input("hello 1".to_string()); let expected_input_1 = serde_json::Value::Array(vec![ expected_ui_msg.clone(), expected_env_msg_1.clone(), expected_user_message_1.clone(), ]); assert_eq!(body1["input"], expected_input_1); let expected_user_message_2 = text_user_input("hello 2".to_string()); let expected_input_2 = serde_json::Value::Array(vec![ expected_ui_msg, expected_env_msg_1, expected_user_message_1, expected_user_message_2, ]); assert_eq!(body2["input"], expected_input_2); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); use pretty_assertions::assert_eq; let server = start_mock_server().await; let req1 = mount_sse_once(&server, sse_completed("resp-1")).await; let req2 = mount_sse_once(&server, sse_completed("resp-2")).await; let TestCodex { codex, config, session_configured, .. } = test_codex() .with_config(|config| { config.user_instructions = Some("be consistent and helpful".to_string()); }) .build(&server) .await?; let default_cwd = config.cwd.clone(); let default_approval_policy = config.approval_policy.value(); let default_sandbox_policy = config.sandbox_policy.get(); let default_model = session_configured.model; let default_effort = config.model_reasoning_effort; let default_summary = config.model_reasoning_summary; codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "hello 1".into(), }], cwd: default_cwd.clone(), approval_policy: default_approval_policy, sandbox_policy: default_sandbox_policy.clone(), model: default_model, effort: default_effort, summary: default_summary, final_output_json_schema: None, }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "hello 2".into(), }], cwd: default_cwd.clone(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: "o3".to_string(), effort: Some(ReasoningEffort::High), summary: ReasoningSummary::Detailed, final_output_json_schema: None, }) .await?; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let body1 = req1.single_request().body_json(); let body2 = req2.single_request().body_json(); let expected_ui_msg = body1["input"][0].clone(); let shell = default_user_shell(); let expected_env_text_1 = default_env_context_str(&default_cwd.to_string_lossy(), &shell); let expected_env_msg_1 = text_user_input(expected_env_text_1); let expected_user_message_1 = text_user_input("hello 1".to_string()); let expected_input_1 = serde_json::Value::Array(vec![ expected_ui_msg.clone(), expected_env_msg_1.clone(), expected_user_message_1.clone(), ]); assert_eq!(body1["input"], expected_input_1); let shell_name = shell.name(); let expected_env_msg_2 = text_user_input(format!( r#"<environment_context> <approval_policy>never</approval_policy> <sandbox_mode>danger-full-access</sandbox_mode> <network_access>enabled</network_access> <shell>{shell_name}</shell> </environment_context>"# )); let expected_user_message_2 = text_user_input("hello 2".to_string()); let expected_input_2 = serde_json::Value::Array(vec![ expected_ui_msg, expected_env_msg_1, expected_user_message_1, expected_env_msg_2, expected_user_message_2, ]); assert_eq!(body2["input"], expected_input_2); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/rmcp_client.rs
codex-rs/core/tests/suite/rmcp_client.rs
use std::collections::HashMap; use std::ffi::OsStr; use std::ffi::OsString; use std::fs; use std::net::TcpListener; use std::path::Path; use std::time::Duration; use std::time::SystemTime; use std::time::UNIX_EPOCH; use codex_core::config::types::McpServerConfig; use codex_core::config::types::McpServerTransportConfig; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::McpInvocation; use codex_core::protocol::McpToolCallBeginEvent; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::user_input::UserInput; use core_test_support::responses; use core_test_support::responses::mount_sse_once; use core_test_support::skip_if_no_network; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use escargot::CargoBuild; use mcp_types::ContentBlock; use serde_json::Value; use serde_json::json; use serial_test::serial; use tempfile::tempdir; use tokio::net::TcpStream; use tokio::process::Child; use tokio::process::Command; use tokio::time::Instant; use tokio::time::sleep; static OPENAI_PNG: &str = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAD0AAAA9CAYAAAAeYmHpAAAE6klEQVR4Aeyau44UVxCGx1fZsmRLlm3Zoe0XcGQ5cUiCCIgJeS9CHgAhMkISQnIuGQgJEkBcxLW+nqnZ6uqqc+nuWRC7q/P3qetf9e+MtOwyX25O4Nep6JPyop++0qev9HrfgZ+F6r2DuB/vHOrt/UIkqdDHYvujOW6fO7h/CNEI+a5jc+pBR8uy0jVFsziYu5HtfSUk+Io34q921hLNctFSX0gwww+S8wce8K1LfCU+cYW4888aov8NxqvQILUPPReLOrm6zyLxa4i+6VZuFbJo8d1MOHZm+7VUtB/aIvhPWc/3SWg49JcwFLlHxuXKjtyloo+YNhuW3VS+WPBuUEMvCFKjEDVgFBQHXrnazpqiSxNZCkQ1kYiozsbm9Oz7l4i2Il7vGccGNWAc3XosDrZe/9P3ZnMmzHNEQw4smf8RQ87XEAMsC7Az0Au+dgXerfH4+sHvEc0SYGic8WBBUGqFH2gN7yDrazy7m2pbRTeRmU3+MjZmr1h6LJgPbGy23SI6GlYT0brQ71IY8Us4PNQCm+zepSbaD2BY9xCaAsD9IIj/IzFmKMSdHHonwdZATbTnYREf6/VZGER98N9yCWIvXQwXDoDdhZJoT8jwLnJXDB9w4Sb3e6nK5ndzlkTLnP3JBu4LKkbrYrU69gCVceV0JvpyuW1xlsUVngzhwMetn/XamtTORF9IO5YnWNiyeF9zCAfqR3fUW+vZZKLtgP+ts8BmQRBREAdRDhH3o8QuRh/YucNFz2BEjxbRN6LGzphfKmvP6v6QhqIQyZ8XNJ0W0X83MR1PEcJBNO2KC2Z1TW/v244scp9FwRViZxIOBF0Lctk7ZVSavdLvRlV1hz/ysUi9sr8CIcB3nvWBwA93ykTz18eAYxQ6N/K2DkPA1lv3iXCwmDUT7YkjIby9siXueIJj9H+pzSqJ9oIuJWTUgSSt4WO7o/9GGg0viR4VinNRUDoIj34xoCd6pxD3aK3zfdbnx5v1J3ZNNEJsE0sBG7N27ReDrJc4sFxz7dI/ZAbOmmiKvHBitQXpAdR6+F7v+/ol/tOouUV01EeMZQF2BoQDn6dP4XNr+j9GZEtEK1/L8pFw7bd3a53tsTa7WD+054jOFmPg1XBKPQgnqFfmFcy32ZRvjmiIIQTYFvyDxQ8nH8WIwwGwlyDjDznnilYyFr6njrlZwsKkBpO59A7OwgdzPEWRm+G+oeb7IfyNuzjEEVLrOVxJsxvxwF8kmCM6I2QYmJunz4u4TrADpfl7mlbRTWQ7VmrBzh3+C9f6Grc3YoGN9dg/SXFthpRsT6vobfXRs2VBlgBHXVMLHjDNbIZv1sZ9+X3hB09cXdH1JKViyG0+W9bWZDa/r2f9zAFR71sTzGpMSWz2iI4YssWjWo3REy1MDGjdwe5e0dFSiAC1JakBvu4/CUS8Eh6dqHdU0Or0ioY3W5ClSqDXAy7/6SRfgw8vt4I+tbvvNtFT2kVDhY5+IGb1rCqYaXNF08vSALsXCPmt0kQNqJT1p5eI1mkIV/BxCY1z85lOzeFbPBQHURkkPTlwTYK9gTVE25l84IbFFN+YJDHjdpn0gq6mrHht0dkcjbM4UL9283O5p77GN+SPW/QwVB4IUYg7Or+Kp7naR6qktP98LNF2UxWo9yObPIT9KYg+hK4i56no4rfnM0qeyFf6AwAAAP//trwR3wAAAAZJREFUAwBZ0sR75itw5gAAAABJRU5ErkJggg=="; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[serial(mcp_test_value)] async fn stdio_server_round_trip() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = responses::start_mock_server().await; let call_id = "call-123"; let server_name = "rmcp"; let tool_name = format!("mcp__{server_name}__echo"); mount_sse_once( &server, responses::sse(vec![ responses::ev_response_created("resp-1"), responses::ev_function_call(call_id, &tool_name, "{\"message\":\"ping\"}"), responses::ev_completed("resp-1"), ]), ) .await; mount_sse_once( &server, responses::sse(vec![ responses::ev_assistant_message("msg-1", "rmcp echo tool completed successfully."), responses::ev_completed("resp-2"), ]), ) .await; let expected_env_value = "propagated-env"; let rmcp_test_server_bin = CargoBuild::new() .package("codex-rmcp-client") .bin("test_stdio_server") .run()? .path() .to_string_lossy() .into_owned(); let fixture = test_codex() .with_config(move |config| { config.mcp_servers.insert( server_name.to_string(), McpServerConfig { transport: McpServerTransportConfig::Stdio { command: rmcp_test_server_bin.clone(), args: Vec::new(), env: Some(HashMap::from([( "MCP_TEST_VALUE".to_string(), expected_env_value.to_string(), )])), env_vars: Vec::new(), cwd: None, }, enabled: true, startup_timeout_sec: Some(Duration::from_secs(10)), tool_timeout_sec: None, enabled_tools: None, disabled_tools: None, }, ); }) .build(&server) .await?; let session_model = fixture.session_configured.model.clone(); fixture .codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "call the rmcp echo tool".into(), }], final_output_json_schema: None, cwd: fixture.cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::ReadOnly, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let begin_event = wait_for_event(&fixture.codex, |ev| { matches!(ev, EventMsg::McpToolCallBegin(_)) }) .await; let EventMsg::McpToolCallBegin(begin) = begin_event else { unreachable!("event guard guarantees McpToolCallBegin"); }; assert_eq!(begin.invocation.server, server_name); assert_eq!(begin.invocation.tool, "echo"); let end_event = wait_for_event(&fixture.codex, |ev| { matches!(ev, EventMsg::McpToolCallEnd(_)) }) .await; let EventMsg::McpToolCallEnd(end) = end_event else { unreachable!("event guard guarantees McpToolCallEnd"); }; let result = end .result .as_ref() .expect("rmcp echo tool should return success"); assert_eq!(result.is_error, Some(false)); assert!( result.content.is_empty(), "content should default to an empty array" ); let structured = result .structured_content .as_ref() .expect("structured content"); let Value::Object(map) = structured else { panic!("structured content should be an object: {structured:?}"); }; let echo_value = map .get("echo") .and_then(Value::as_str) .expect("echo payload present"); assert_eq!(echo_value, "ECHOING: ping"); let env_value = map .get("env") .and_then(Value::as_str) .expect("env snapshot inserted"); assert_eq!(env_value, expected_env_value); wait_for_event(&fixture.codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; server.verify().await; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[serial(mcp_test_value)] async fn stdio_image_responses_round_trip() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = responses::start_mock_server().await; let call_id = "img-1"; let server_name = "rmcp"; let tool_name = format!("mcp__{server_name}__image"); // First stream: model decides to call the image tool. mount_sse_once( &server, responses::sse(vec![ responses::ev_response_created("resp-1"), responses::ev_function_call(call_id, &tool_name, "{}"), responses::ev_completed("resp-1"), ]), ) .await; // Second stream: after tool execution, assistant emits a message and completes. let final_mock = mount_sse_once( &server, responses::sse(vec![ responses::ev_assistant_message("msg-1", "rmcp image tool completed successfully."), responses::ev_completed("resp-2"), ]), ) .await; // Build the stdio rmcp server and pass the image as data URL so it can construct ImageContent. let rmcp_test_server_bin = CargoBuild::new() .package("codex-rmcp-client") .bin("test_stdio_server") .run()? .path() .to_string_lossy() .into_owned(); let fixture = test_codex() .with_config(move |config| { config.mcp_servers.insert( server_name.to_string(), McpServerConfig { transport: McpServerTransportConfig::Stdio { command: rmcp_test_server_bin, args: Vec::new(), env: Some(HashMap::from([( "MCP_TEST_IMAGE_DATA_URL".to_string(), OPENAI_PNG.to_string(), )])), env_vars: Vec::new(), cwd: None, }, enabled: true, startup_timeout_sec: Some(Duration::from_secs(10)), tool_timeout_sec: None, enabled_tools: None, disabled_tools: None, }, ); }) .build(&server) .await?; let session_model = fixture.session_configured.model.clone(); fixture .codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "call the rmcp image tool".into(), }], final_output_json_schema: None, cwd: fixture.cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::ReadOnly, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; // Wait for tool begin/end and final completion. let begin_event = wait_for_event(&fixture.codex, |ev| { matches!(ev, EventMsg::McpToolCallBegin(_)) }) .await; let EventMsg::McpToolCallBegin(begin) = begin_event else { unreachable!("begin"); }; assert_eq!( begin, McpToolCallBeginEvent { call_id: call_id.to_string(), invocation: McpInvocation { server: server_name.to_string(), tool: "image".to_string(), arguments: Some(json!({})), }, }, ); let end_event = wait_for_event(&fixture.codex, |ev| { matches!(ev, EventMsg::McpToolCallEnd(_)) }) .await; let EventMsg::McpToolCallEnd(end) = end_event else { unreachable!("end"); }; assert_eq!(end.call_id, call_id); assert_eq!( end.invocation, McpInvocation { server: server_name.to_string(), tool: "image".to_string(), arguments: Some(json!({})), } ); let result = end.result.expect("rmcp image tool should return success"); assert_eq!(result.is_error, Some(false)); assert_eq!(result.content.len(), 1); let base64_only = OPENAI_PNG .strip_prefix("data:image/png;base64,") .expect("data url prefix"); match &result.content[0] { ContentBlock::ImageContent(img) => { assert_eq!(img.mime_type, "image/png"); assert_eq!(img.r#type, "image"); assert_eq!(img.data, base64_only); } other => panic!("expected image content, got {other:?}"), } wait_for_event(&fixture.codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let output_item = final_mock.single_request().function_call_output(call_id); assert_eq!( output_item, json!({ "type": "function_call_output", "call_id": call_id, "output": [{ "type": "input_image", "image_url": OPENAI_PNG }] }) ); server.verify().await; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[serial(mcp_test_value)] async fn stdio_image_completions_round_trip() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = responses::start_mock_server().await; let call_id = "img-cc-1"; let server_name = "rmcp"; let tool_name = format!("mcp__{server_name}__image"); let tool_call = json!({ "choices": [ { "delta": { "tool_calls": [ { "id": call_id, "type": "function", "function": {"name": tool_name, "arguments": "{}"} } ] }, "finish_reason": "tool_calls" } ] }); let sse_tool_call = format!( "data: {}\n\ndata: [DONE]\n\n", serde_json::to_string(&tool_call)? ); let final_assistant = json!({ "choices": [ { "delta": {"content": "rmcp image tool completed successfully."}, "finish_reason": "stop" } ] }); let sse_final = format!( "data: {}\n\ndata: [DONE]\n\n", serde_json::to_string(&final_assistant)? ); use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; struct ChatSeqResponder { num_calls: AtomicUsize, bodies: Vec<String>, } impl wiremock::Respond for ChatSeqResponder { fn respond(&self, _: &wiremock::Request) -> wiremock::ResponseTemplate { let idx = self.num_calls.fetch_add(1, Ordering::SeqCst); match self.bodies.get(idx) { Some(body) => wiremock::ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_string(body.clone()), None => panic!("no chat completion response for index {idx}"), } } } let chat_seq = ChatSeqResponder { num_calls: AtomicUsize::new(0), bodies: vec![sse_tool_call, sse_final], }; wiremock::Mock::given(wiremock::matchers::method("POST")) .and(wiremock::matchers::path("/v1/chat/completions")) .respond_with(chat_seq) .expect(2) .mount(&server) .await; let rmcp_test_server_bin = CargoBuild::new() .package("codex-rmcp-client") .bin("test_stdio_server") .run()? .path() .to_string_lossy() .into_owned(); let fixture = test_codex() .with_config(move |config| { config.model_provider.wire_api = codex_core::WireApi::Chat; config.mcp_servers.insert( server_name.to_string(), McpServerConfig { transport: McpServerTransportConfig::Stdio { command: rmcp_test_server_bin, args: Vec::new(), env: Some(HashMap::from([( "MCP_TEST_IMAGE_DATA_URL".to_string(), OPENAI_PNG.to_string(), )])), env_vars: Vec::new(), cwd: None, }, enabled: true, startup_timeout_sec: Some(Duration::from_secs(10)), tool_timeout_sec: None, enabled_tools: None, disabled_tools: None, }, ); }) .build(&server) .await?; let session_model = fixture.session_configured.model.clone(); fixture .codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "call the rmcp image tool".into(), }], final_output_json_schema: None, cwd: fixture.cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::ReadOnly, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let begin_event = wait_for_event(&fixture.codex, |ev| { matches!(ev, EventMsg::McpToolCallBegin(_)) }) .await; let EventMsg::McpToolCallBegin(begin) = begin_event else { unreachable!("begin"); }; assert_eq!( begin, McpToolCallBeginEvent { call_id: call_id.to_string(), invocation: McpInvocation { server: server_name.to_string(), tool: "image".to_string(), arguments: Some(json!({})), }, }, ); let end_event = wait_for_event(&fixture.codex, |ev| { matches!(ev, EventMsg::McpToolCallEnd(_)) }) .await; let EventMsg::McpToolCallEnd(end) = end_event else { unreachable!("end"); }; assert!(end.result.as_ref().is_ok(), "tool call should succeed"); wait_for_event(&fixture.codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; // Chat Completions assertion: the second POST should include a tool role message // with an array `content` containing an item with the expected data URL. let all_requests = server.received_requests().await.expect("requests captured"); let requests: Vec<_> = all_requests .iter() .filter(|req| req.method == "POST" && req.url.path().ends_with("/chat/completions")) .collect(); assert!(requests.len() >= 2, "expected two chat completion calls"); let second = requests[1]; let body: Value = serde_json::from_slice(&second.body)?; let messages = body .get("messages") .and_then(Value::as_array) .cloned() .expect("messages array"); let tool_msg = messages .iter() .find(|m| { m.get("role") == Some(&json!("tool")) && m.get("tool_call_id") == Some(&json!(call_id)) }) .cloned() .expect("tool message present"); assert_eq!( tool_msg, json!({ "role": "tool", "tool_call_id": call_id, "content": [{"type": "image_url", "image_url": {"url": OPENAI_PNG}}] }) ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[serial(mcp_test_value)] async fn stdio_server_propagates_whitelisted_env_vars() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = responses::start_mock_server().await; let call_id = "call-1234"; let server_name = "rmcp_whitelist"; let tool_name = format!("mcp__{server_name}__echo"); mount_sse_once( &server, responses::sse(vec![ responses::ev_response_created("resp-1"), responses::ev_function_call(call_id, &tool_name, "{\"message\":\"ping\"}"), responses::ev_completed("resp-1"), ]), ) .await; mount_sse_once( &server, responses::sse(vec![ responses::ev_assistant_message("msg-1", "rmcp echo tool completed successfully."), responses::ev_completed("resp-2"), ]), ) .await; let expected_env_value = "propagated-env-from-whitelist"; let _guard = EnvVarGuard::set("MCP_TEST_VALUE", OsStr::new(expected_env_value)); let rmcp_test_server_bin = CargoBuild::new() .package("codex-rmcp-client") .bin("test_stdio_server") .run()? .path() .to_string_lossy() .into_owned(); let fixture = test_codex() .with_config(move |config| { config.mcp_servers.insert( server_name.to_string(), McpServerConfig { transport: McpServerTransportConfig::Stdio { command: rmcp_test_server_bin, args: Vec::new(), env: None, env_vars: vec!["MCP_TEST_VALUE".to_string()], cwd: None, }, enabled: true, startup_timeout_sec: Some(Duration::from_secs(10)), tool_timeout_sec: None, enabled_tools: None, disabled_tools: None, }, ); }) .build(&server) .await?; let session_model = fixture.session_configured.model.clone(); fixture .codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "call the rmcp echo tool".into(), }], final_output_json_schema: None, cwd: fixture.cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::ReadOnly, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let begin_event = wait_for_event(&fixture.codex, |ev| { matches!(ev, EventMsg::McpToolCallBegin(_)) }) .await; let EventMsg::McpToolCallBegin(begin) = begin_event else { unreachable!("event guard guarantees McpToolCallBegin"); }; assert_eq!(begin.invocation.server, server_name); assert_eq!(begin.invocation.tool, "echo"); let end_event = wait_for_event(&fixture.codex, |ev| { matches!(ev, EventMsg::McpToolCallEnd(_)) }) .await; let EventMsg::McpToolCallEnd(end) = end_event else { unreachable!("event guard guarantees McpToolCallEnd"); }; let result = end .result .as_ref() .expect("rmcp echo tool should return success"); assert_eq!(result.is_error, Some(false)); assert!( result.content.is_empty(), "content should default to an empty array" ); let structured = result .structured_content .as_ref() .expect("structured content"); let Value::Object(map) = structured else { panic!("structured content should be an object: {structured:?}"); }; let echo_value = map .get("echo") .and_then(Value::as_str) .expect("echo payload present"); assert_eq!(echo_value, "ECHOING: ping"); let env_value = map .get("env") .and_then(Value::as_str) .expect("env snapshot inserted"); assert_eq!(env_value, expected_env_value); wait_for_event(&fixture.codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; server.verify().await; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn streamable_http_tool_call_round_trip() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = responses::start_mock_server().await; let call_id = "call-456"; let server_name = "rmcp_http"; let tool_name = format!("mcp__{server_name}__echo"); mount_sse_once( &server, responses::sse(vec![ responses::ev_response_created("resp-1"), responses::ev_function_call(call_id, &tool_name, "{\"message\":\"ping\"}"), responses::ev_completed("resp-1"), ]), ) .await; mount_sse_once( &server, responses::sse(vec![ responses::ev_assistant_message( "msg-1", "rmcp streamable http echo tool completed successfully.", ), responses::ev_completed("resp-2"), ]), ) .await; let expected_env_value = "propagated-env-http"; let rmcp_http_server_bin = CargoBuild::new() .package("codex-rmcp-client") .bin("test_streamable_http_server") .run()? .path() .to_string_lossy() .into_owned(); let listener = TcpListener::bind("127.0.0.1:0")?; let port = listener.local_addr()?.port(); drop(listener); let bind_addr = format!("127.0.0.1:{port}"); let server_url = format!("http://{bind_addr}/mcp"); let mut http_server_child = Command::new(&rmcp_http_server_bin) .kill_on_drop(true) .env("MCP_STREAMABLE_HTTP_BIND_ADDR", &bind_addr) .env("MCP_TEST_VALUE", expected_env_value) .spawn()?; wait_for_streamable_http_server(&mut http_server_child, &bind_addr, Duration::from_secs(5)) .await?; let fixture = test_codex() .with_config(move |config| { config.mcp_servers.insert( server_name.to_string(), McpServerConfig { transport: McpServerTransportConfig::StreamableHttp { url: server_url, bearer_token_env_var: None, http_headers: None, env_http_headers: None, }, enabled: true, startup_timeout_sec: Some(Duration::from_secs(10)), tool_timeout_sec: None, enabled_tools: None, disabled_tools: None, }, ); }) .build(&server) .await?; let session_model = fixture.session_configured.model.clone(); fixture .codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "call the rmcp streamable http echo tool".into(), }], final_output_json_schema: None, cwd: fixture.cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::ReadOnly, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let begin_event = wait_for_event(&fixture.codex, |ev| { matches!(ev, EventMsg::McpToolCallBegin(_)) }) .await; let EventMsg::McpToolCallBegin(begin) = begin_event else { unreachable!("event guard guarantees McpToolCallBegin"); }; assert_eq!(begin.invocation.server, server_name); assert_eq!(begin.invocation.tool, "echo"); let end_event = wait_for_event(&fixture.codex, |ev| { matches!(ev, EventMsg::McpToolCallEnd(_)) }) .await; let EventMsg::McpToolCallEnd(end) = end_event else { unreachable!("event guard guarantees McpToolCallEnd"); }; let result = end .result .as_ref() .expect("rmcp echo tool should return success"); assert_eq!(result.is_error, Some(false)); assert!( result.content.is_empty(), "content should default to an empty array" ); let structured = result .structured_content .as_ref() .expect("structured content"); let Value::Object(map) = structured else { panic!("structured content should be an object: {structured:?}"); }; let echo_value = map .get("echo") .and_then(Value::as_str) .expect("echo payload present"); assert_eq!(echo_value, "ECHOING: ping"); let env_value = map .get("env") .and_then(Value::as_str) .expect("env snapshot inserted"); assert_eq!(env_value, expected_env_value); wait_for_event(&fixture.codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; server.verify().await; match http_server_child.try_wait() { Ok(Some(_)) => {} Ok(None) => { let _ = http_server_child.kill().await; } Err(error) => { eprintln!("failed to check streamable http server status: {error}"); let _ = http_server_child.kill().await; } } if let Err(error) = http_server_child.wait().await { eprintln!("failed to await streamable http server shutdown: {error}"); } Ok(()) } /// This test writes to a fallback credentials file in CODEX_HOME. /// Ideally, we wouldn't need to serialize the test but it's much more cumbersome to wire CODEX_HOME through the code. #[serial(codex_home)] #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn streamable_http_with_oauth_round_trip() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = responses::start_mock_server().await; let call_id = "call-789"; let server_name = "rmcp_http_oauth"; let tool_name = format!("mcp__{server_name}__echo"); mount_sse_once( &server, responses::sse(vec![ responses::ev_response_created("resp-1"), responses::ev_function_call(call_id, &tool_name, "{\"message\":\"ping\"}"), responses::ev_completed("resp-1"), ]), ) .await; mount_sse_once( &server, responses::sse(vec![ responses::ev_assistant_message( "msg-1", "rmcp streamable http oauth echo tool completed successfully.", ), responses::ev_completed("resp-2"), ]), ) .await; let expected_env_value = "propagated-env-http-oauth"; let expected_token = "initial-access-token"; let client_id = "test-client-id"; let refresh_token = "initial-refresh-token"; let rmcp_http_server_bin = CargoBuild::new() .package("codex-rmcp-client") .bin("test_streamable_http_server") .run()? .path() .to_string_lossy() .into_owned(); let listener = TcpListener::bind("127.0.0.1:0")?; let port = listener.local_addr()?.port(); drop(listener); let bind_addr = format!("127.0.0.1:{port}"); let server_url = format!("http://{bind_addr}/mcp"); let mut http_server_child = Command::new(&rmcp_http_server_bin) .kill_on_drop(true) .env("MCP_STREAMABLE_HTTP_BIND_ADDR", &bind_addr) .env("MCP_EXPECT_BEARER", expected_token) .env("MCP_TEST_VALUE", expected_env_value) .spawn()?; wait_for_streamable_http_server(&mut http_server_child, &bind_addr, Duration::from_secs(5)) .await?; let temp_home = tempdir()?; let _guard = EnvVarGuard::set("CODEX_HOME", temp_home.path().as_os_str()); write_fallback_oauth_tokens( temp_home.path(), server_name, &server_url, client_id, expected_token, refresh_token, )?; let fixture = test_codex() .with_config(move |config| { config.mcp_servers.insert( server_name.to_string(), McpServerConfig { transport: McpServerTransportConfig::StreamableHttp { url: server_url, bearer_token_env_var: None, http_headers: None, env_http_headers: None, }, enabled: true, startup_timeout_sec: Some(Duration::from_secs(10)), tool_timeout_sec: None, enabled_tools: None, disabled_tools: None, }, ); }) .build(&server) .await?; let session_model = fixture.session_configured.model.clone(); fixture .codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: "call the rmcp streamable http oauth echo tool".into(), }], final_output_json_schema: None, cwd: fixture.cwd.path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::ReadOnly, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; let begin_event = wait_for_event(&fixture.codex, |ev| { matches!(ev, EventMsg::McpToolCallBegin(_)) }) .await; let EventMsg::McpToolCallBegin(begin) = begin_event else { unreachable!("event guard guarantees McpToolCallBegin"); }; assert_eq!(begin.invocation.server, server_name); assert_eq!(begin.invocation.tool, "echo"); let end_event = wait_for_event(&fixture.codex, |ev| { matches!(ev, EventMsg::McpToolCallEnd(_)) }) .await; let EventMsg::McpToolCallEnd(end) = end_event else { unreachable!("event guard guarantees McpToolCallEnd"); }; let result = end .result .as_ref() .expect("rmcp echo tool should return success"); assert_eq!(result.is_error, Some(false)); assert!( result.content.is_empty(), "content should default to an empty array" ); let structured = result
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/resume_warning.rs
codex-rs/core/tests/suite/resume_warning.rs
#![allow(clippy::unwrap_used, clippy::expect_used)] use codex_core::AuthManager; use codex_core::CodexAuth; use codex_core::ConversationManager; use codex_core::NewConversation; use codex_core::protocol::EventMsg; use codex_core::protocol::InitialHistory; use codex_core::protocol::ResumedHistory; use codex_core::protocol::RolloutItem; use codex_core::protocol::TurnContextItem; use codex_core::protocol::WarningEvent; use codex_protocol::ConversationId; use core::time::Duration; use core_test_support::load_default_config_for_test; use core_test_support::wait_for_event; use tempfile::TempDir; fn resume_history( config: &codex_core::config::Config, previous_model: &str, rollout_path: &std::path::Path, ) -> InitialHistory { let turn_ctx = TurnContextItem { cwd: config.cwd.clone(), approval_policy: config.approval_policy.value(), sandbox_policy: config.sandbox_policy.get().clone(), model: previous_model.to_string(), effort: config.model_reasoning_effort, summary: config.model_reasoning_summary, base_instructions: None, user_instructions: None, developer_instructions: None, final_output_json_schema: None, truncation_policy: None, }; InitialHistory::Resumed(ResumedHistory { conversation_id: ConversationId::default(), history: vec![RolloutItem::TurnContext(turn_ctx)], rollout_path: rollout_path.to_path_buf(), }) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn emits_warning_when_resumed_model_differs() { // Arrange a config with a current model and a prior rollout recorded under a different model. let home = TempDir::new().expect("tempdir"); let mut config = load_default_config_for_test(&home).await; config.model = Some("current-model".to_string()); // Ensure cwd is absolute (the helper sets it to the temp dir already). assert!(config.cwd.is_absolute()); let rollout_path = home.path().join("rollout.jsonl"); std::fs::write(&rollout_path, "").expect("create rollout placeholder"); let initial_history = resume_history(&config, "previous-model", &rollout_path); let conversation_manager = ConversationManager::with_models_provider( CodexAuth::from_api_key("test"), config.model_provider.clone(), ); let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("test")); // Act: resume the conversation. let NewConversation { conversation, .. } = conversation_manager .resume_conversation_with_history(config, initial_history, auth_manager) .await .expect("resume conversation"); // Assert: a Warning event is emitted describing the model mismatch. let warning = wait_for_event(&conversation, |ev| matches!(ev, EventMsg::Warning(_))).await; let EventMsg::Warning(WarningEvent { message }) = warning else { panic!("expected warning event"); }; assert!(message.contains("previous-model")); assert!(message.contains("current-model")); // Drain the TaskComplete/Shutdown window to avoid leaking tasks between tests. // The warning is emitted during initialization, so a short sleep is sufficient. tokio::time::sleep(Duration::from_millis(50)).await; }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/compact_resume_fork.rs
codex-rs/core/tests/suite/compact_resume_fork.rs
#![allow(clippy::expect_used)] //! Integration tests that cover compacting, resuming, and forking conversations. //! //! Each test sets up a mocked SSE conversation and drives the conversation through //! a specific sequence of operations. After every operation we capture the //! request payload that Codex would send to the model and assert that the //! model-visible history matches the expected sequence of messages. use super::compact::COMPACT_WARNING_MESSAGE; use super::compact::FIRST_REPLY; use super::compact::SUMMARY_TEXT; use codex_core::CodexAuth; use codex_core::CodexConversation; use codex_core::ConversationManager; use codex_core::ModelProviderInfo; use codex_core::NewConversation; use codex_core::built_in_model_providers; use codex_core::compact::SUMMARIZATION_PROMPT; use codex_core::config::Config; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::WarningEvent; use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR; use codex_protocol::user_input::UserInput; use core_test_support::load_default_config_for_test; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::get_responses_request_bodies; use core_test_support::responses::mount_sse_once_match; use core_test_support::responses::sse; use core_test_support::wait_for_event; use pretty_assertions::assert_eq; use serde_json::Value; use serde_json::json; use std::sync::Arc; use tempfile::TempDir; use wiremock::MockServer; const AFTER_SECOND_RESUME: &str = "AFTER_SECOND_RESUME"; fn network_disabled() -> bool { std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() } fn body_contains_text(body: &str, text: &str) -> bool { body.contains(&json_fragment(text)) } fn json_fragment(text: &str) -> String { serde_json::to_string(text) .expect("serialize text to JSON") .trim_matches('"') .to_string() } fn filter_out_ghost_snapshot_entries(items: &[Value]) -> Vec<Value> { items .iter() .filter(|item| !is_ghost_snapshot_message(item)) .cloned() .collect() } fn is_ghost_snapshot_message(item: &Value) -> bool { if item.get("type").and_then(Value::as_str) != Some("message") { return false; } if item.get("role").and_then(Value::as_str) != Some("user") { return false; } item.get("content") .and_then(Value::as_array) .and_then(|content| content.first()) .and_then(|entry| entry.get("text")) .and_then(Value::as_str) .is_some_and(|text| text.trim_start().starts_with("<ghost_snapshot>")) } fn normalize_line_endings_str(text: &str) -> String { if text.contains('\r') { text.replace("\r\n", "\n").replace('\r', "\n") } else { text.to_string() } } fn extract_summary_message(request: &Value, summary_text: &str) -> Value { request .get("input") .and_then(Value::as_array) .and_then(|items| { items.iter().find(|item| { item.get("type").and_then(Value::as_str) == Some("message") && item.get("role").and_then(Value::as_str) == Some("user") && item .get("content") .and_then(Value::as_array) .and_then(|arr| arr.first()) .and_then(|entry| entry.get("text")) .and_then(Value::as_str) .map(|text| text.contains(summary_text)) .unwrap_or(false) }) }) .cloned() .unwrap_or_else(|| panic!("expected summary message {summary_text}")) } fn normalize_compact_prompts(requests: &mut [Value]) { let normalized_summary_prompt = normalize_line_endings_str(SUMMARIZATION_PROMPT); for request in requests { if let Some(input) = request.get_mut("input").and_then(Value::as_array_mut) { input.retain(|item| { if item.get("type").and_then(Value::as_str) != Some("message") || item.get("role").and_then(Value::as_str) != Some("user") { return true; } let content = item .get("content") .and_then(Value::as_array) .cloned() .unwrap_or_default(); if let Some(first) = content.first() { let text = first .get("text") .and_then(Value::as_str) .unwrap_or_default(); let normalized_text = normalize_line_endings_str(text); !(text.is_empty() || normalized_text == normalized_summary_prompt) } else { false } }); } } } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] /// Scenario: compact an initial conversation, resume it, fork one turn back, and /// ensure the model-visible history matches expectations at each request. async fn compact_resume_and_fork_preserve_model_history_view() { if network_disabled() { println!("Skipping test because network is disabled in this sandbox"); return; } // 1. Arrange mocked SSE responses for the initial compact/resume/fork flow. let server = MockServer::start().await; mount_initial_flow(&server).await; let expected_model = "gpt-5.1-codex"; // 2. Start a new conversation and drive it through the compact/resume/fork steps. let (_home, config, manager, base) = start_test_conversation(&server, Some(expected_model)).await; user_turn(&base, "hello world").await; compact_conversation(&base).await; user_turn(&base, "AFTER_COMPACT").await; let base_path = fetch_conversation_path(&base).await; assert!( base_path.exists(), "compact+resume test expects base path {base_path:?} to exist", ); let resumed = resume_conversation(&manager, &config, base_path).await; user_turn(&resumed, "AFTER_RESUME").await; let resumed_path = fetch_conversation_path(&resumed).await; assert!( resumed_path.exists(), "compact+resume test expects resumed path {resumed_path:?} to exist", ); let forked = fork_conversation(&manager, &config, resumed_path, 2).await; user_turn(&forked, "AFTER_FORK").await; // 3. Capture the requests to the model and validate the history slices. let mut requests = gather_request_bodies(&server).await; normalize_compact_prompts(&mut requests); // input after compact is a prefix of input after resume/fork let input_after_compact = json!(requests[requests.len() - 3]["input"]); let input_after_resume = json!(requests[requests.len() - 2]["input"]); let input_after_fork = json!(requests[requests.len() - 1]["input"]); let compact_arr = input_after_compact .as_array() .expect("input after compact should be an array"); let resume_arr = input_after_resume .as_array() .expect("input after resume should be an array"); let fork_arr = input_after_fork .as_array() .expect("input after fork should be an array"); assert!( compact_arr.len() <= resume_arr.len(), "after-resume input should have at least as many items as after-compact", ); assert_eq!(compact_arr.as_slice(), &resume_arr[..compact_arr.len()]); assert!( compact_arr.len() <= fork_arr.len(), "after-fork input should have at least as many items as after-compact", ); assert_eq!( &compact_arr.as_slice()[..compact_arr.len()], &fork_arr[..compact_arr.len()] ); let expected_model = requests[0]["model"] .as_str() .unwrap_or_default() .to_string(); let prompt = requests[0]["instructions"] .as_str() .unwrap_or_default() .to_string(); let user_instructions = requests[0]["input"][0]["content"][0]["text"] .as_str() .unwrap_or_default() .to_string(); let environment_context = requests[0]["input"][1]["content"][0]["text"] .as_str() .unwrap_or_default() .to_string(); let tool_calls = json!(requests[0]["tools"].as_array()); let prompt_cache_key = requests[0]["prompt_cache_key"] .as_str() .unwrap_or_default() .to_string(); let fork_prompt_cache_key = requests[requests.len() - 1]["prompt_cache_key"] .as_str() .unwrap_or_default() .to_string(); let summary_after_compact = extract_summary_message(&requests[2], SUMMARY_TEXT); let summary_after_resume = extract_summary_message(&requests[3], SUMMARY_TEXT); let summary_after_fork = extract_summary_message(&requests[4], SUMMARY_TEXT); let user_turn_1 = json!( { "model": expected_model, "instructions": prompt, "input": [ { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": user_instructions } ] }, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": environment_context } ] }, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": "hello world" } ] } ], "tools": tool_calls, "tool_choice": "auto", "parallel_tool_calls": false, "reasoning": { "summary": "auto" }, "store": false, "stream": true, "include": [ "reasoning.encrypted_content" ], "prompt_cache_key": prompt_cache_key }); let compact_1 = json!( { "model": expected_model, "instructions": prompt, "input": [ { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": user_instructions } ] }, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": environment_context } ] }, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": "hello world" } ] }, { "type": "message", "role": "assistant", "content": [ { "type": "output_text", "text": "FIRST_REPLY" } ] }, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": SUMMARIZATION_PROMPT } ] } ], "tools": [], "tool_choice": "auto", "parallel_tool_calls": false, "reasoning": { "summary": "auto" }, "store": false, "stream": true, "include": [ "reasoning.encrypted_content" ], "prompt_cache_key": prompt_cache_key }); let user_turn_2_after_compact = json!( { "model": expected_model, "instructions": prompt, "input": [ { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": user_instructions } ] }, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": environment_context } ] }, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": "hello world" } ] }, summary_after_compact, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": "AFTER_COMPACT" } ] } ], "tools": tool_calls, "tool_choice": "auto", "parallel_tool_calls": false, "reasoning": { "summary": "auto" }, "store": false, "stream": true, "include": [ "reasoning.encrypted_content" ], "prompt_cache_key": prompt_cache_key }); let usert_turn_3_after_resume = json!( { "model": expected_model, "instructions": prompt, "input": [ { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": user_instructions } ] }, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": environment_context } ] }, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": "hello world" } ] }, summary_after_resume, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": "AFTER_COMPACT" } ] }, { "type": "message", "role": "assistant", "content": [ { "type": "output_text", "text": "AFTER_COMPACT_REPLY" } ] }, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": "AFTER_RESUME" } ] } ], "tools": tool_calls, "tool_choice": "auto", "parallel_tool_calls": false, "reasoning": { "summary": "auto" }, "store": false, "stream": true, "include": [ "reasoning.encrypted_content" ], "prompt_cache_key": prompt_cache_key }); let user_turn_3_after_fork = json!( { "model": expected_model, "instructions": prompt, "input": [ { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": user_instructions } ] }, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": environment_context } ] }, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": "hello world" } ] }, summary_after_fork, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": "AFTER_COMPACT" } ] }, { "type": "message", "role": "assistant", "content": [ { "type": "output_text", "text": "AFTER_COMPACT_REPLY" } ] }, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": "AFTER_FORK" } ] } ], "tools": tool_calls, "tool_choice": "auto", "parallel_tool_calls": false, "reasoning": { "summary": "auto" }, "store": false, "stream": true, "include": [ "reasoning.encrypted_content" ], "prompt_cache_key": fork_prompt_cache_key }); let mut expected = json!([ user_turn_1, compact_1, user_turn_2_after_compact, usert_turn_3_after_resume, user_turn_3_after_fork ]); normalize_line_endings(&mut expected); if let Some(arr) = expected.as_array_mut() { normalize_compact_prompts(arr); } assert_eq!(requests.len(), 5); assert_eq!(json!(requests), expected); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] /// Scenario: after the forked branch is compacted, resuming again should reuse /// the compacted history and only append the new user message. async fn compact_resume_after_second_compaction_preserves_history() { if network_disabled() { println!("Skipping test because network is disabled in this sandbox"); return; } // 1. Arrange mocked SSE responses for the initial flow plus the second compact. let server = MockServer::start().await; mount_initial_flow(&server).await; mount_second_compact_flow(&server).await; // 2. Drive the conversation through compact -> resume -> fork -> compact -> resume. let (_home, config, manager, base) = start_test_conversation(&server, None).await; user_turn(&base, "hello world").await; compact_conversation(&base).await; user_turn(&base, "AFTER_COMPACT").await; let base_path = fetch_conversation_path(&base).await; assert!( base_path.exists(), "second compact test expects base path {base_path:?} to exist", ); let resumed = resume_conversation(&manager, &config, base_path).await; user_turn(&resumed, "AFTER_RESUME").await; let resumed_path = fetch_conversation_path(&resumed).await; assert!( resumed_path.exists(), "second compact test expects resumed path {resumed_path:?} to exist", ); let forked = fork_conversation(&manager, &config, resumed_path, 3).await; user_turn(&forked, "AFTER_FORK").await; compact_conversation(&forked).await; user_turn(&forked, "AFTER_COMPACT_2").await; let forked_path = fetch_conversation_path(&forked).await; assert!( forked_path.exists(), "second compact test expects forked path {forked_path:?} to exist", ); let resumed_again = resume_conversation(&manager, &config, forked_path).await; user_turn(&resumed_again, AFTER_SECOND_RESUME).await; let mut requests = gather_request_bodies(&server).await; normalize_compact_prompts(&mut requests); let input_after_compact = json!(requests[requests.len() - 2]["input"]); let input_after_resume = json!(requests[requests.len() - 1]["input"]); // test input after compact before resume is the same as input after resume let compact_input_array = input_after_compact .as_array() .expect("input after compact should be an array"); let resume_input_array = input_after_resume .as_array() .expect("input after resume should be an array"); let compact_filtered = filter_out_ghost_snapshot_entries(compact_input_array); let resume_filtered = filter_out_ghost_snapshot_entries(resume_input_array); assert!( compact_filtered.len() <= resume_filtered.len(), "after-resume input should have at least as many items as after-compact" ); assert_eq!( compact_filtered.as_slice(), &resume_filtered[..compact_filtered.len()] ); // hard coded test let prompt = requests[0]["instructions"] .as_str() .unwrap_or_default() .to_string(); let user_instructions = requests[0]["input"][0]["content"][0]["text"] .as_str() .unwrap_or_default() .to_string(); let environment_instructions = requests[0]["input"][1]["content"][0]["text"] .as_str() .unwrap_or_default() .to_string(); // Build expected final request input: initial context + forked user message + // compacted summary + post-compact user message + resumed user message. let summary_after_second_compact = extract_summary_message(&requests[requests.len() - 3], SUMMARY_TEXT); let mut expected = json!([ { "instructions": prompt, "input": [ { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": user_instructions } ] }, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": environment_instructions } ] }, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": "AFTER_FORK" } ] }, summary_after_second_compact, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": "AFTER_COMPACT_2" } ] }, { "type": "message", "role": "user", "content": [ { "type": "input_text", "text": "AFTER_SECOND_RESUME" } ] } ], } ]); normalize_line_endings(&mut expected); let mut last_request_after_2_compacts = json!([{ "instructions": requests[requests.len() -1]["instructions"], "input": requests[requests.len() -1]["input"], }]); if let Some(arr) = expected.as_array_mut() { normalize_compact_prompts(arr); } if let Some(arr) = last_request_after_2_compacts.as_array_mut() { normalize_compact_prompts(arr); } assert_eq!(expected, last_request_after_2_compacts); } fn normalize_line_endings(value: &mut Value) { match value { Value::String(text) => { if text.contains('\r') { *text = text.replace("\r\n", "\n").replace('\r', "\n"); } } Value::Array(items) => { for item in items { normalize_line_endings(item); } } Value::Object(map) => { for item in map.values_mut() { normalize_line_endings(item); } } _ => {} } } async fn gather_request_bodies(server: &MockServer) -> Vec<Value> { let mut bodies = get_responses_request_bodies(server).await; for body in &mut bodies { normalize_line_endings(body); } bodies } async fn mount_initial_flow(server: &MockServer) { let sse1 = sse(vec![ ev_assistant_message("m1", FIRST_REPLY), ev_completed("r1"), ]); let sse2 = sse(vec![ ev_assistant_message("m2", SUMMARY_TEXT), ev_completed("r2"), ]); let sse3 = sse(vec![ ev_assistant_message("m3", "AFTER_COMPACT_REPLY"), ev_completed("r3"), ]); let sse4 = sse(vec![ev_completed("r4")]); let sse5 = sse(vec![ev_completed("r5")]); let match_first = |req: &wiremock::Request| { let body = std::str::from_utf8(&req.body).unwrap_or(""); body.contains("\"text\":\"hello world\"") && !body.contains(&format!("\"text\":\"{SUMMARY_TEXT}\"")) && !body.contains("\"text\":\"AFTER_COMPACT\"") && !body.contains("\"text\":\"AFTER_RESUME\"") && !body.contains("\"text\":\"AFTER_FORK\"") }; mount_sse_once_match(server, match_first, sse1).await; let match_compact = |req: &wiremock::Request| { let body = std::str::from_utf8(&req.body).unwrap_or(""); body_contains_text(body, SUMMARIZATION_PROMPT) || body.contains(&json_fragment(FIRST_REPLY)) }; mount_sse_once_match(server, match_compact, sse2).await; let match_after_compact = |req: &wiremock::Request| { let body = std::str::from_utf8(&req.body).unwrap_or(""); body.contains("\"text\":\"AFTER_COMPACT\"") && !body.contains("\"text\":\"AFTER_RESUME\"") && !body.contains("\"text\":\"AFTER_FORK\"") }; mount_sse_once_match(server, match_after_compact, sse3).await; let match_after_resume = |req: &wiremock::Request| { let body = std::str::from_utf8(&req.body).unwrap_or(""); body.contains("\"text\":\"AFTER_RESUME\"") }; mount_sse_once_match(server, match_after_resume, sse4).await; let match_after_fork = |req: &wiremock::Request| { let body = std::str::from_utf8(&req.body).unwrap_or(""); body.contains("\"text\":\"AFTER_FORK\"") }; mount_sse_once_match(server, match_after_fork, sse5).await; } async fn mount_second_compact_flow(server: &MockServer) { let sse6 = sse(vec![ ev_assistant_message("m4", SUMMARY_TEXT), ev_completed("r6"), ]); let sse7 = sse(vec![ev_completed("r7")]); let match_second_compact = |req: &wiremock::Request| { let body = std::str::from_utf8(&req.body).unwrap_or(""); body.contains("AFTER_FORK") }; mount_sse_once_match(server, match_second_compact, sse6).await; let match_after_second_resume = |req: &wiremock::Request| { let body = std::str::from_utf8(&req.body).unwrap_or(""); body.contains(&format!("\"text\":\"{AFTER_SECOND_RESUME}\"")) }; mount_sse_once_match(server, match_after_second_resume, sse7).await; } async fn start_test_conversation( server: &MockServer, model: Option<&str>, ) -> (TempDir, Config, ConversationManager, Arc<CodexConversation>) { let model_provider = ModelProviderInfo { name: "Non-OpenAI Model provider".into(), base_url: Some(format!("{}/v1", server.uri())), ..built_in_model_providers()["openai"].clone() }; let home = TempDir::new().expect("create temp dir"); let mut config = load_default_config_for_test(&home).await; config.model_provider = model_provider; config.compact_prompt = Some(SUMMARIZATION_PROMPT.to_string()); if let Some(model) = model { config.model = Some(model.to_string()); } let manager = ConversationManager::with_models_provider( CodexAuth::from_api_key("dummy"), config.model_provider.clone(), ); let NewConversation { conversation, .. } = manager .new_conversation(config.clone()) .await .expect("create conversation"); (home, config, manager, conversation) } async fn user_turn(conversation: &Arc<CodexConversation>, text: &str) { conversation .submit(Op::UserInput { items: vec![UserInput::Text { text: text.into() }], }) .await .expect("submit user turn"); wait_for_event(conversation, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; } async fn compact_conversation(conversation: &Arc<CodexConversation>) { conversation .submit(Op::Compact) .await .expect("compact conversation"); let warning_event = wait_for_event(conversation, |ev| matches!(ev, EventMsg::Warning(_))).await; let EventMsg::Warning(WarningEvent { message }) = warning_event else { panic!("expected warning event after compact"); }; assert_eq!(message, COMPACT_WARNING_MESSAGE); wait_for_event(conversation, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; } async fn fetch_conversation_path(conversation: &Arc<CodexConversation>) -> std::path::PathBuf { conversation.rollout_path() } async fn resume_conversation( manager: &ConversationManager, config: &Config, path: std::path::PathBuf, ) -> Arc<CodexConversation> { let auth_manager = codex_core::AuthManager::from_auth_for_testing(CodexAuth::from_api_key("dummy")); let NewConversation { conversation, .. } = manager .resume_conversation_from_rollout(config.clone(), path, auth_manager) .await .expect("resume conversation"); conversation } #[cfg(test)] async fn fork_conversation( manager: &ConversationManager, config: &Config, path: std::path::PathBuf, nth_user_message: usize, ) -> Arc<CodexConversation> { let NewConversation { conversation, .. } = manager .fork_conversation(nth_user_message, config.clone(), path) .await .expect("fork conversation"); conversation }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/user_shell_cmd.rs
codex-rs/core/tests/suite/user_shell_cmd.rs
use anyhow::Context; use codex_core::ConversationManager; use codex_core::NewConversation; use codex_core::protocol::EventMsg; use codex_core::protocol::ExecCommandEndEvent; use codex_core::protocol::ExecCommandSource; use codex_core::protocol::ExecOutputStream; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_core::protocol::TurnAbortReason; use core_test_support::assert_regex_match; use core_test_support::load_default_config_for_test; use core_test_support::responses; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_sse_once; use core_test_support::responses::sse; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use core_test_support::wait_for_event_match; use regex_lite::escape; use std::path::PathBuf; use tempfile::TempDir; #[tokio::test] async fn user_shell_cmd_ls_and_cat_in_temp_dir() { // Create a temporary working directory with a known file. let cwd = TempDir::new().unwrap(); let file_name = "hello.txt"; let file_path: PathBuf = cwd.path().join(file_name); let contents = "hello from bang test\n"; tokio::fs::write(&file_path, contents) .await .expect("write temp file"); // Load config and pin cwd to the temp dir so ls/cat operate there. let codex_home = TempDir::new().unwrap(); let mut config = load_default_config_for_test(&codex_home).await; config.cwd = cwd.path().to_path_buf(); let conversation_manager = ConversationManager::with_models_provider( codex_core::CodexAuth::from_api_key("dummy"), config.model_provider.clone(), ); let NewConversation { conversation: codex, .. } = conversation_manager .new_conversation(config) .await .expect("create new conversation"); // 1) shell command should list the file let list_cmd = "ls".to_string(); codex .submit(Op::RunUserShellCommand { command: list_cmd }) .await .unwrap(); let msg = wait_for_event(&codex, |ev| matches!(ev, EventMsg::ExecCommandEnd(_))).await; let EventMsg::ExecCommandEnd(ExecCommandEndEvent { stdout, exit_code, .. }) = msg else { unreachable!() }; assert_eq!(exit_code, 0); assert!( stdout.contains(file_name), "ls output should include {file_name}, got: {stdout:?}" ); // 2) shell command should print the file contents verbatim let cat_cmd = format!("cat {file_name}"); codex .submit(Op::RunUserShellCommand { command: cat_cmd }) .await .unwrap(); let msg = wait_for_event(&codex, |ev| matches!(ev, EventMsg::ExecCommandEnd(_))).await; let EventMsg::ExecCommandEnd(ExecCommandEndEvent { mut stdout, exit_code, .. }) = msg else { unreachable!() }; assert_eq!(exit_code, 0); if cfg!(windows) { // Windows shells emit CRLF line endings; normalize so the assertion remains portable. stdout = stdout.replace("\r\n", "\n"); } assert_eq!(stdout, contents); } #[tokio::test] async fn user_shell_cmd_can_be_interrupted() { // Set up isolated config and conversation. let codex_home = TempDir::new().unwrap(); let config = load_default_config_for_test(&codex_home).await; let conversation_manager = ConversationManager::with_models_provider( codex_core::CodexAuth::from_api_key("dummy"), config.model_provider.clone(), ); let NewConversation { conversation: codex, .. } = conversation_manager .new_conversation(config) .await .expect("create new conversation"); // Start a long-running command and then interrupt it. let sleep_cmd = "sleep 5".to_string(); codex .submit(Op::RunUserShellCommand { command: sleep_cmd }) .await .unwrap(); // Wait until it has started (ExecCommandBegin), then interrupt. let _ = wait_for_event(&codex, |ev| matches!(ev, EventMsg::ExecCommandBegin(_))).await; codex.submit(Op::Interrupt).await.unwrap(); // Expect a TurnAborted(Interrupted) notification. let msg = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnAborted(_))).await; let EventMsg::TurnAborted(ev) = msg else { unreachable!() }; assert_eq!(ev.reason, TurnAbortReason::Interrupted); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn user_shell_command_history_is_persisted_and_shared_with_model() -> anyhow::Result<()> { let server = responses::start_mock_server().await; let mut builder = core_test_support::test_codex::test_codex(); let test = builder.build(&server).await?; #[cfg(windows)] let command = r#"$val = $env:CODEX_SANDBOX; if ([string]::IsNullOrEmpty($val)) { $val = 'not-set' } ; [System.Console]::Write($val)"#.to_string(); #[cfg(not(windows))] let command = r#"sh -c "printf '%s' \"${CODEX_SANDBOX:-not-set}\"""#.to_string(); test.codex .submit(Op::RunUserShellCommand { command: command.clone(), }) .await?; let begin_event = wait_for_event_match(&test.codex, |ev| match ev { EventMsg::ExecCommandBegin(event) => Some(event.clone()), _ => None, }) .await; assert_eq!(begin_event.source, ExecCommandSource::UserShell); let matches_last_arg = begin_event.command.last() == Some(&command); let matches_split = shlex::split(&command).is_some_and(|split| split == begin_event.command); assert!( matches_last_arg || matches_split, "user command begin event should include the original command; got: {:?}", begin_event.command ); let delta_event = wait_for_event_match(&test.codex, |ev| match ev { EventMsg::ExecCommandOutputDelta(event) => Some(event.clone()), _ => None, }) .await; assert_eq!(delta_event.stream, ExecOutputStream::Stdout); let chunk_text = String::from_utf8(delta_event.chunk.clone()).expect("user command chunk is valid utf-8"); assert_eq!(chunk_text.trim(), "not-set"); let end_event = wait_for_event_match(&test.codex, |ev| match ev { EventMsg::ExecCommandEnd(event) => Some(event.clone()), _ => None, }) .await; assert_eq!(end_event.exit_code, 0); assert_eq!(end_event.stdout.trim(), "not-set"); let _ = wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let responses = vec![responses::sse(vec![ responses::ev_response_created("resp-1"), responses::ev_assistant_message("msg-1", "done"), responses::ev_completed("resp-1"), ])]; let mock = responses::mount_sse_sequence(&server, responses).await; test.submit_turn("follow-up after shell command").await?; let request = mock.single_request(); let command_message = request .message_input_texts("user") .into_iter() .find(|text| text.contains("<user_shell_command>")) .expect("command message recorded in request"); let command_message = command_message.replace("\r\n", "\n"); let escaped_command = escape(&command); let expected_pattern = format!( r"(?m)\A<user_shell_command>\n<command>\n{escaped_command}\n</command>\n<result>\nExit code: 0\nDuration: [0-9]+(?:\.[0-9]+)? seconds\nOutput:\nnot-set\n</result>\n</user_shell_command>\z" ); assert_regex_match(&expected_pattern, &command_message); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[cfg(not(target_os = "windows"))] // TODO: unignore on windows async fn user_shell_command_output_is_truncated_in_history() -> anyhow::Result<()> { let server = responses::start_mock_server().await; let builder = core_test_support::test_codex::test_codex(); let test = builder .with_config(|config| { config.tool_output_token_limit = Some(100); }) .build(&server) .await?; #[cfg(windows)] let command = r#"for ($i=1; $i -le 400; $i++) { Write-Output $i }"#.to_string(); #[cfg(not(windows))] let command = "seq 1 400".to_string(); test.codex .submit(Op::RunUserShellCommand { command: command.clone(), }) .await?; let end_event = wait_for_event_match(&test.codex, |ev| match ev { EventMsg::ExecCommandEnd(event) => Some(event.clone()), _ => None, }) .await; assert_eq!(end_event.exit_code, 0); let _ = wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let responses = vec![responses::sse(vec![ responses::ev_response_created("resp-1"), responses::ev_assistant_message("msg-1", "done"), responses::ev_completed("resp-1"), ])]; let mock = responses::mount_sse_sequence(&server, responses).await; test.submit_turn("follow-up after shell command").await?; let request = mock.single_request(); let command_message = request .message_input_texts("user") .into_iter() .find(|text| text.contains("<user_shell_command>")) .expect("command message recorded in request"); let command_message = command_message.replace("\r\n", "\n"); let head = (1..=69).map(|i| format!("{i}\n")).collect::<String>(); let tail = (352..=400).map(|i| format!("{i}\n")).collect::<String>(); let truncated_body = format!("Total output lines: 400\n\n{head}70…273 tokens truncated…351\n{tail}"); let escaped_command = escape(&command); let escaped_truncated_body = escape(&truncated_body); let expected_pattern = format!( r"(?m)\A<user_shell_command>\n<command>\n{escaped_command}\n</command>\n<result>\nExit code: 0\nDuration: [0-9]+(?:\.[0-9]+)? seconds\nOutput:\n{escaped_truncated_body}\n</result>\n</user_shell_command>\z" ); assert_regex_match(&expected_pattern, &command_message); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn user_shell_command_is_truncated_only_once() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex() .with_model("gpt-5.1-codex") .with_config(|config| { config.tool_output_token_limit = Some(100); }); let fixture = builder.build(&server).await?; let call_id = "user-shell-double-truncation"; let args = if cfg!(windows) { serde_json::json!({ "command": "for ($i=1; $i -le 2000; $i++) { Write-Output $i }", "timeout_ms": 5_000, }) } else { serde_json::json!({ "command": "seq 1 2000", "timeout_ms": 5_000, }) }; mount_sse_once( &server, sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), ) .await; let mock2 = mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ) .await; fixture .submit_turn_with_policy( "trigger big shell_command output", SandboxPolicy::DangerFullAccess, ) .await?; let output = mock2 .single_request() .function_call_output_text(call_id) .context("function_call_output present for shell_command call")?; let truncation_headers = output.matches("Total output lines:").count(); assert_eq!( truncation_headers, 1, "shell_command output should carry only one truncation header: {output}" ); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/model_tools.rs
codex-rs/core/tests/suite/model_tools.rs
#![allow(clippy::unwrap_used)] use core_test_support::load_sse_fixture_with_id; use core_test_support::responses; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::test_codex; fn sse_completed(id: &str) -> String { load_sse_fixture_with_id("tests/fixtures/completed_template.json", id) } #[allow(clippy::expect_used)] fn tool_identifiers(body: &serde_json::Value) -> Vec<String> { body["tools"] .as_array() .unwrap() .iter() .map(|tool| { tool.get("name") .and_then(|v| v.as_str()) .or_else(|| tool.get("type").and_then(|v| v.as_str())) .map(std::string::ToString::to_string) .expect("tool should have either name or type") }) .collect() } #[allow(clippy::expect_used)] async fn collect_tool_identifiers_for_model(model: &str) -> Vec<String> { let server = start_mock_server().await; let sse = sse_completed(model); let resp_mock = responses::mount_sse_once(&server, sse).await; let mut builder = test_codex().with_model(model); let test = builder .build(&server) .await .expect("create test Codex conversation"); test.submit_turn("hello tools").await.expect("submit turn"); let body = resp_mock.single_request().body_json(); tool_identifiers(&body) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn model_selects_expected_tools() { skip_if_no_network!(); use pretty_assertions::assert_eq; let codex_tools = collect_tool_identifiers_for_model("codex-mini-latest").await; assert_eq!( codex_tools, vec![ "local_shell".to_string(), "list_mcp_resources".to_string(), "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), "view_image".to_string() ], "codex-mini-latest should expose the local shell tool", ); let gpt5_codex_tools = collect_tool_identifiers_for_model("gpt-5-codex").await; assert_eq!( gpt5_codex_tools, vec![ "shell_command".to_string(), "list_mcp_resources".to_string(), "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), "apply_patch".to_string(), "view_image".to_string() ], "gpt-5-codex should expose the apply_patch tool", ); let gpt51_codex_tools = collect_tool_identifiers_for_model("gpt-5.1-codex").await; assert_eq!( gpt51_codex_tools, vec![ "shell_command".to_string(), "list_mcp_resources".to_string(), "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), "apply_patch".to_string(), "view_image".to_string() ], "gpt-5.1-codex should expose the apply_patch tool", ); let gpt5_tools = collect_tool_identifiers_for_model("gpt-5").await; assert_eq!( gpt5_tools, vec![ "shell".to_string(), "list_mcp_resources".to_string(), "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), "view_image".to_string() ], "gpt-5 should expose the apply_patch tool", ); let gpt51_tools = collect_tool_identifiers_for_model("gpt-5.1").await; assert_eq!( gpt51_tools, vec![ "shell_command".to_string(), "list_mcp_resources".to_string(), "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), "apply_patch".to_string(), "view_image".to_string() ], "gpt-5.1 should expose the apply_patch tool", ); let exp_tools = collect_tool_identifiers_for_model("exp-5.1").await; assert_eq!( exp_tools, vec![ "exec_command".to_string(), "write_stdin".to_string(), "list_mcp_resources".to_string(), "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), "apply_patch".to_string(), "view_image".to_string() ], "exp-5.1 should expose the apply_patch tool", ); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/resume.rs
codex-rs/core/tests/suite/resume.rs
use anyhow::Result; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_protocol::user_input::UserInput; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_reasoning_item; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_sse_once; use core_test_support::responses::sse; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use std::sync::Arc; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn resume_includes_initial_messages_from_rollout_events() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex(); let initial = builder.build(&server).await?; let codex = Arc::clone(&initial.codex); let home = initial.home.clone(); let rollout_path = initial.session_configured.rollout_path.clone(); let initial_sse = sse(vec![ ev_response_created("resp-initial"), ev_assistant_message("msg-1", "Completed first turn"), ev_completed("resp-initial"), ]); mount_sse_once(&server, initial_sse).await; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "Record some messages".into(), }], }) .await?; wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; let resumed = builder.resume(&server, home, rollout_path).await?; let initial_messages = resumed .session_configured .initial_messages .expect("expected initial messages to be present for resumed session"); match initial_messages.as_slice() { [ EventMsg::UserMessage(first_user), EventMsg::TokenCount(_), EventMsg::AgentMessage(assistant_message), EventMsg::TokenCount(_), ] => { assert_eq!(first_user.message, "Record some messages"); assert_eq!(assistant_message.message, "Completed first turn"); } other => panic!("unexpected initial messages after resume: {other:#?}"), } Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn resume_includes_initial_messages_from_reasoning_events() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_config(|config| { config.show_raw_agent_reasoning = true; }); let initial = builder.build(&server).await?; let codex = Arc::clone(&initial.codex); let home = initial.home.clone(); let rollout_path = initial.session_configured.rollout_path.clone(); let initial_sse = sse(vec![ ev_response_created("resp-initial"), ev_reasoning_item("reason-1", &["Summarized step"], &["raw detail"]), ev_assistant_message("msg-1", "Completed reasoning turn"), ev_completed("resp-initial"), ]); mount_sse_once(&server, initial_sse).await; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "Record reasoning messages".into(), }], }) .await?; wait_for_event(&codex, |event| matches!(event, EventMsg::TaskComplete(_))).await; let resumed = builder.resume(&server, home, rollout_path).await?; let initial_messages = resumed .session_configured .initial_messages .expect("expected initial messages to be present for resumed session"); match initial_messages.as_slice() { [ EventMsg::UserMessage(first_user), EventMsg::TokenCount(_), EventMsg::AgentReasoning(reasoning), EventMsg::AgentReasoningRawContent(raw), EventMsg::AgentMessage(assistant_message), EventMsg::TokenCount(_), ] => { assert_eq!(first_user.message, "Record reasoning messages"); assert_eq!(reasoning.text, "Summarized step"); assert_eq!(raw.text, "raw detail"); assert_eq!(assistant_message.message, "Completed reasoning turn"); } other => panic!("unexpected initial messages after resume: {other:#?}"), } Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/shell_serialization.rs
codex-rs/core/tests/suite/shell_serialization.rs
#![cfg(not(target_os = "windows"))] #![allow(clippy::expect_used)] use anyhow::Result; use codex_core::protocol::SandboxPolicy; use core_test_support::assert_regex_match; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_local_shell_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_sse_sequence; use core_test_support::responses::sse; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::ApplyPatchModelOutput; use core_test_support::test_codex::ShellModelOutput; use core_test_support::test_codex::TestCodexBuilder; use core_test_support::test_codex::test_codex; use pretty_assertions::assert_eq; use regex_lite::Regex; use serde_json::Value; use serde_json::json; use std::fs; use test_case::test_case; use crate::suite::apply_patch_cli::apply_patch_harness; use crate::suite::apply_patch_cli::mount_apply_patch; const FIXTURE_JSON: &str = r#"{ "description": "This is an example JSON file.", "foo": "bar", "isTest": true, "testNumber": 123, "testArray": [1, 2, 3], "testObject": { "foo": "bar" } } "#; fn shell_responses( call_id: &str, command: Vec<&str>, output_type: ShellModelOutput, ) -> Result<Vec<String>> { match output_type { ShellModelOutput::ShellCommand => { let command = shlex::try_join(command)?; let parameters = json!({ "command": command, "timeout_ms": 2_000, }); Ok(vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call( call_id, "shell_command", &serde_json::to_string(&parameters)?, ), ev_completed("resp-1"), ]), sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ]) } ShellModelOutput::Shell => { let parameters = json!({ "command": command, "timeout_ms": 2_000, }); Ok(vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "shell", &serde_json::to_string(&parameters)?), ev_completed("resp-1"), ]), sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ]) } ShellModelOutput::LocalShell => Ok(vec![ sse(vec![ ev_response_created("resp-1"), ev_local_shell_call(call_id, "completed", command), ev_completed("resp-1"), ]), sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ]), } } fn configure_shell_model( builder: TestCodexBuilder, output_type: ShellModelOutput, include_apply_patch_tool: bool, ) -> TestCodexBuilder { let builder = match (output_type, include_apply_patch_tool) { (ShellModelOutput::ShellCommand, _) => builder.with_model("test-gpt-5-codex"), (ShellModelOutput::LocalShell, true) => builder.with_model("gpt-5.1-codex"), (ShellModelOutput::Shell, true) => builder.with_model("gpt-5.1-codex"), (ShellModelOutput::LocalShell, false) => builder.with_model("codex-mini-latest"), (ShellModelOutput::Shell, false) => builder.with_model("gpt-5"), }; builder.with_config(move |config| { config.include_apply_patch_tool = include_apply_patch_tool; }) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ShellModelOutput::Shell)] #[test_case(ShellModelOutput::LocalShell)] async fn shell_output_stays_json_without_freeform_apply_patch( output_type: ShellModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = configure_shell_model(test_codex(), output_type, false); let test = builder.build(&server).await?; let call_id = "shell-json"; let responses = shell_responses(call_id, vec!["/bin/echo", "shell json"], output_type)?; let mock = mount_sse_sequence(&server, responses).await; test.submit_turn_with_policy( "run the json shell command", SandboxPolicy::DangerFullAccess, ) .await?; let req = mock.last_request().expect("shell output request recorded"); let output_item = req.function_call_output(call_id); let output = output_item .get("output") .and_then(Value::as_str) .expect("shell output string"); let mut parsed: Value = serde_json::from_str(output)?; if let Some(metadata) = parsed.get_mut("metadata").and_then(Value::as_object_mut) { let _ = metadata.remove("duration_seconds"); } assert_eq!( parsed .get("metadata") .and_then(|metadata| metadata.get("exit_code")) .and_then(Value::as_i64), Some(0), "expected zero exit code in unformatted JSON output", ); let stdout = parsed .get("output") .and_then(Value::as_str) .unwrap_or_default(); assert_regex_match(r"(?s)^shell json\n?$", stdout); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ShellModelOutput::Shell)] #[test_case(ShellModelOutput::ShellCommand)] #[test_case(ShellModelOutput::LocalShell)] async fn shell_output_is_structured_with_freeform_apply_patch( output_type: ShellModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = configure_shell_model(test_codex(), output_type, true); let test = builder.build(&server).await?; let call_id = "shell-structured"; let responses = shell_responses(call_id, vec!["/bin/echo", "freeform shell"], output_type)?; let mock = mount_sse_sequence(&server, responses).await; test.submit_turn_with_policy( "run the structured shell command", SandboxPolicy::DangerFullAccess, ) .await?; let req = mock .last_request() .expect("structured shell output request recorded"); let output_item = req.function_call_output(call_id); let output = output_item .get("output") .and_then(Value::as_str) .expect("structured output string"); assert!( serde_json::from_str::<Value>(output).is_err(), "expected structured shell output to be plain text", ); let expected_pattern = r"(?s)^Exit code: 0 Wall time: [0-9]+(?:\.[0-9]+)? seconds Output: freeform shell ?$"; assert_regex_match(expected_pattern, output); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ShellModelOutput::Shell)] #[test_case(ShellModelOutput::LocalShell)] async fn shell_output_preserves_fixture_json_without_serialization( output_type: ShellModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = configure_shell_model(test_codex(), output_type, false); let test = builder.build(&server).await?; let fixture_path = test.cwd.path().join("fixture.json"); fs::write(&fixture_path, FIXTURE_JSON)?; let fixture_path_str = fixture_path.to_string_lossy().to_string(); let call_id = "shell-json-fixture"; let responses = shell_responses( call_id, vec!["/usr/bin/sed", "-n", "p", fixture_path_str.as_str()], output_type, )?; let mock = mount_sse_sequence(&server, responses).await; test.submit_turn_with_policy( "read the fixture JSON with sed", SandboxPolicy::DangerFullAccess, ) .await?; let req = mock.last_request().expect("shell output request recorded"); let output_item = req.function_call_output(call_id); let output = output_item .get("output") .and_then(Value::as_str) .expect("shell output string"); let mut parsed: Value = serde_json::from_str(output)?; if let Some(metadata) = parsed.get_mut("metadata").and_then(Value::as_object_mut) { let _ = metadata.remove("duration_seconds"); } assert_eq!( parsed .get("metadata") .and_then(|metadata| metadata.get("exit_code")) .and_then(Value::as_i64), Some(0), "expected zero exit code when serialization is disabled", ); let stdout = parsed .get("output") .and_then(Value::as_str) .unwrap_or_default() .to_string(); assert_eq!( stdout, FIXTURE_JSON, "expected shell output to match the fixture contents" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ShellModelOutput::Shell)] #[test_case(ShellModelOutput::ShellCommand)] #[test_case(ShellModelOutput::LocalShell)] async fn shell_output_structures_fixture_with_serialization( output_type: ShellModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = configure_shell_model(test_codex(), output_type, true); let test = builder.build(&server).await?; let fixture_path = test.cwd.path().join("fixture.json"); fs::write(&fixture_path, FIXTURE_JSON)?; let fixture_path_str = fixture_path.to_string_lossy().to_string(); let call_id = "shell-structured-fixture"; let responses = shell_responses( call_id, vec!["/usr/bin/sed", "-n", "p", fixture_path_str.as_str()], output_type, )?; let mock = mount_sse_sequence(&server, responses).await; test.submit_turn_with_policy( "read the fixture JSON with structured output", SandboxPolicy::DangerFullAccess, ) .await?; let req = mock .last_request() .expect("structured output request recorded"); let output_item = req.function_call_output(call_id); let output = output_item .get("output") .and_then(Value::as_str) .expect("structured output string"); assert!( serde_json::from_str::<Value>(output).is_err(), "expected structured output to be plain text" ); let (header, body) = output .split_once("Output:\n") .expect("structured output contains an Output section"); assert_regex_match( r"(?s)^Exit code: 0\nWall time: [0-9]+(?:\.[0-9]+)? seconds$", header.trim_end(), ); assert_eq!( body, FIXTURE_JSON, "expected Output section to include the fixture contents" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ShellModelOutput::Shell)] #[test_case(ShellModelOutput::ShellCommand)] #[test_case(ShellModelOutput::LocalShell)] async fn shell_output_for_freeform_tool_records_duration( output_type: ShellModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = configure_shell_model(test_codex(), output_type, true); let test = builder.build(&server).await?; let call_id = "shell-structured"; let responses = shell_responses(call_id, vec!["/bin/sh", "-c", "sleep 1"], output_type)?; let mock = mount_sse_sequence(&server, responses).await; test.submit_turn_with_policy( "run the structured shell command", SandboxPolicy::DangerFullAccess, ) .await?; let req = mock .last_request() .expect("structured output request recorded"); let output_item = req.function_call_output(call_id); let output = output_item .get("output") .and_then(Value::as_str) .expect("structured output string"); let expected_pattern = r#"(?s)^Exit code: 0 Wall time: [0-9]+(?:\.[0-9]+)? seconds Output: $"#; assert_regex_match(expected_pattern, output); let wall_time_regex = Regex::new(r"(?m)^Wall (?:time|Clock): ([0-9]+(?:\.[0-9]+)?) seconds$") .expect("compile wall time regex"); let wall_time_seconds = wall_time_regex .captures(output) .and_then(|caps| caps.get(1)) .and_then(|value| value.as_str().parse::<f32>().ok()) .expect("expected structured shell output to contain wall time seconds"); assert!( wall_time_seconds > 0.5, "expected wall time to be greater than zero seconds, got {wall_time_seconds}" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ShellModelOutput::Shell)] #[test_case(ShellModelOutput::LocalShell)] async fn shell_output_reserializes_truncated_content(output_type: ShellModelOutput) -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = configure_shell_model(test_codex(), output_type, true).with_config(move |config| { config.tool_output_token_limit = Some(200); }); let test = builder.build(&server).await?; let call_id = "shell-truncated"; let responses = shell_responses(call_id, vec!["/bin/sh", "-c", "seq 1 400"], output_type)?; let mock = mount_sse_sequence(&server, responses).await; test.submit_turn_with_policy( "run the truncation shell command", SandboxPolicy::DangerFullAccess, ) .await?; let req = mock .last_request() .expect("truncated output request recorded"); let output_item = req.function_call_output(call_id); let output = output_item .get("output") .and_then(Value::as_str) .expect("truncated output string"); assert!( serde_json::from_str::<Value>(output).is_err(), "expected truncated shell output to be plain text", ); let truncated_pattern = r#"(?s)^Exit code: 0 Wall time: [0-9]+(?:\.[0-9]+)? seconds Total output lines: 400 Output: 1 2 3 4 5 6 .*…46 tokens truncated….* 396 397 398 399 400 $"#; assert_regex_match(truncated_pattern, output); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] async fn apply_patch_custom_tool_output_is_structured( output_type: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let call_id = "apply-patch-structured"; let file_name = "structured.txt"; let patch = format!( r#"*** Begin Patch *** Add File: {file_name} +from custom tool *** End Patch "# ); mount_apply_patch(&harness, call_id, &patch, "done", output_type).await; harness .test() .submit_turn_with_policy( "apply the patch via custom tool", SandboxPolicy::DangerFullAccess, ) .await?; let output = harness.apply_patch_output(call_id, output_type).await; let expected_pattern = format!( r"(?s)^Exit code: 0 Wall time: [0-9]+(?:\.[0-9]+)? seconds Output: Success. Updated the following files: A {file_name} ?$" ); assert_regex_match(&expected_pattern, output.as_str()); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] async fn apply_patch_custom_tool_call_creates_file( output_type: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let call_id = "apply-patch-add-file"; let file_name = "custom_tool_apply_patch.txt"; let patch = format!( "*** Begin Patch\n*** Add File: {file_name}\n+custom tool content\n*** End Patch\n" ); mount_apply_patch(&harness, call_id, &patch, "apply_patch done", output_type).await; harness .test() .submit_turn_with_policy( "apply the patch via custom tool to create a file", SandboxPolicy::DangerFullAccess, ) .await?; let output = harness.apply_patch_output(call_id, output_type).await; let expected_pattern = format!( r"(?s)^Exit code: 0 Wall time: [0-9]+(?:\.[0-9]+)? seconds Output: Success. Updated the following files: A {file_name} ?$" ); assert_regex_match(&expected_pattern, output.as_str()); let new_file_path = harness.path(file_name); let created_contents = fs::read_to_string(&new_file_path)?; assert_eq!( created_contents, "custom tool content\n", "expected file contents for {file_name}" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] async fn apply_patch_custom_tool_call_updates_existing_file( output_type: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let call_id = "apply-patch-update-file"; let file_name = "custom_tool_apply_patch_existing.txt"; let file_path = harness.path(file_name); fs::write(&file_path, "before\n")?; let patch = format!( "*** Begin Patch\n*** Update File: {file_name}\n@@\n-before\n+after\n*** End Patch\n" ); mount_apply_patch( &harness, call_id, &patch, "apply_patch update done", output_type, ) .await; harness .test() .submit_turn_with_policy( "apply the patch via custom tool to update a file", SandboxPolicy::DangerFullAccess, ) .await?; let output = harness.apply_patch_output(call_id, output_type).await; let expected_pattern = format!( r"(?s)^Exit code: 0 Wall time: [0-9]+(?:\.[0-9]+)? seconds Output: Success. Updated the following files: M {file_name} ?$" ); assert_regex_match(&expected_pattern, output.as_str()); let updated_contents = fs::read_to_string(file_path)?; assert_eq!(updated_contents, "after\n", "expected updated file content"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] async fn apply_patch_custom_tool_call_reports_failure_output( output_type: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let call_id = "apply-patch-failure"; let missing_file = "missing_custom_tool_apply_patch.txt"; let patch = format!( "*** Begin Patch\n*** Update File: {missing_file}\n@@\n-before\n+after\n*** End Patch\n" ); mount_apply_patch( &harness, call_id, &patch, "apply_patch failure done", output_type, ) .await; harness .test() .submit_turn_with_policy( "attempt a failing apply_patch via custom tool", SandboxPolicy::DangerFullAccess, ) .await?; let output = harness.apply_patch_output(call_id, output_type).await; let expected_output = format!( "apply_patch verification failed: Failed to read file to update {}/{missing_file}: No such file or directory (os error 2)", harness.cwd().to_string_lossy() ); assert_eq!(output, expected_output.as_str()); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ApplyPatchModelOutput::Freeform)] #[test_case(ApplyPatchModelOutput::Function)] #[test_case(ApplyPatchModelOutput::Shell)] #[test_case(ApplyPatchModelOutput::ShellViaHeredoc)] async fn apply_patch_function_call_output_is_structured( output_type: ApplyPatchModelOutput, ) -> Result<()> { skip_if_no_network!(Ok(())); let harness = apply_patch_harness().await?; let call_id = "apply-patch-function"; let file_name = "function_apply_patch.txt"; let patch = format!("*** Begin Patch\n*** Add File: {file_name}\n+via function call\n*** End Patch\n"); mount_apply_patch( &harness, call_id, &patch, "apply_patch function done", output_type, ) .await; harness .test() .submit_turn_with_policy( "apply the patch via function-call apply_patch", SandboxPolicy::DangerFullAccess, ) .await?; let output = harness.apply_patch_output(call_id, output_type).await; let expected_pattern = format!( r"(?s)^Exit code: 0 Wall time: [0-9]+(?:\.[0-9]+)? seconds Output: Success. Updated the following files: A {file_name} ?$" ); assert_regex_match(&expected_pattern, output.as_str()); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[test_case(ShellModelOutput::Shell)] #[test_case(ShellModelOutput::ShellCommand)] #[test_case(ShellModelOutput::LocalShell)] async fn shell_output_is_structured_for_nonzero_exit(output_type: ShellModelOutput) -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex() .with_model("gpt-5.1-codex") .with_config(move |config| { config.include_apply_patch_tool = true; }); let test = builder.build(&server).await?; let call_id = "shell-nonzero-exit"; let responses = shell_responses(call_id, vec!["/bin/sh", "-c", "exit 42"], output_type)?; let mock = mount_sse_sequence(&server, responses).await; test.submit_turn_with_policy( "run the failing shell command", SandboxPolicy::DangerFullAccess, ) .await?; let req = mock.last_request().expect("shell output request recorded"); let output_item = req.function_call_output(call_id); let output = output_item .get("output") .and_then(Value::as_str) .expect("shell output string"); let expected_pattern = r"(?s)^Exit code: 42 Wall time: [0-9]+(?:\.[0-9]+)? seconds Output: ?$"; assert_regex_match(expected_pattern, output); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn shell_command_output_is_freeform() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_config(move |config| { config.include_apply_patch_tool = true; }); let test = builder.build(&server).await?; let call_id = "shell-command"; let args = json!({ "command": "echo shell command", "timeout_ms": 1_000, }); let responses = vec![ sse(vec![ json!({"type": "response.created", "response": {"id": "resp-1"}}), ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_assistant_message("msg-1", "shell_command done"), ev_completed("resp-2"), ]), ]; let mock = mount_sse_sequence(&server, responses).await; test.submit_turn_with_policy( "run the shell_command script in the user's shell", SandboxPolicy::DangerFullAccess, ) .await?; let req = mock .last_request() .expect("shell_command output request recorded"); let output_item = req.function_call_output(call_id); let output = output_item .get("output") .and_then(Value::as_str) .expect("shell_command output string"); let expected_pattern = r"(?s)^Exit code: 0 Wall time: [0-9]+(?:\.[0-9]+)? seconds Output: shell command ?$"; assert_regex_match(expected_pattern, output); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn shell_command_output_is_not_truncated_under_10k_bytes() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_model("gpt-5.1"); let test = builder.build(&server).await?; let call_id = "shell-command"; let args = json!({ "command": "perl -e 'print \"1\" x 10000'", "timeout_ms": 1000, }); let responses = vec![ sse(vec![ json!({"type": "response.created", "response": {"id": "resp-1"}}), ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_assistant_message("msg-1", "shell_command done"), ev_completed("resp-2"), ]), ]; let mock = mount_sse_sequence(&server, responses).await; test.submit_turn_with_policy( "run the shell_command script in the user's shell", SandboxPolicy::DangerFullAccess, ) .await?; let req = mock .last_request() .expect("shell_command output request recorded"); let output_item = req.function_call_output(call_id); let output = output_item .get("output") .and_then(Value::as_str) .expect("shell_command output string"); let expected_pattern = r"(?s)^Exit code: 0 Wall time: [0-9]+(?:\.[0-9]+)? seconds Output: 1{10000}$"; assert_regex_match(expected_pattern, output); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn shell_command_output_is_not_truncated_over_10k_bytes() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_model("gpt-5.1"); let test = builder.build(&server).await?; let call_id = "shell-command"; let args = json!({ "command": "perl -e 'print \"1\" x 10001'", "timeout_ms": 1000, }); let responses = vec![ sse(vec![ json!({"type": "response.created", "response": {"id": "resp-1"}}), ev_function_call(call_id, "shell_command", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_assistant_message("msg-1", "shell_command done"), ev_completed("resp-2"), ]), ]; let mock = mount_sse_sequence(&server, responses).await; test.submit_turn_with_policy( "run the shell_command script in the user's shell", SandboxPolicy::DangerFullAccess, ) .await?; let req = mock .last_request() .expect("shell_command output request recorded"); let output_item = req.function_call_output(call_id); let output = output_item .get("output") .and_then(Value::as_str) .expect("shell_command output string"); let expected_pattern = r"(?s)^Exit code: 0 Wall time: [0-9]+(?:\.[0-9]+)? seconds Output: 1*…1 chars truncated…1*$"; assert_regex_match(expected_pattern, output); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn local_shell_call_output_is_structured() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex() .with_model("gpt-5.1-codex") .with_config(|config| { config.include_apply_patch_tool = true; }); let test = builder.build(&server).await?; let call_id = "local-shell-call"; let responses = vec![ sse(vec![ json!({"type": "response.created", "response": {"id": "resp-1"}}), ev_local_shell_call(call_id, "completed", vec!["/bin/echo", "local shell"]), ev_completed("resp-1"), ]), sse(vec![ ev_assistant_message("msg-1", "local shell done"), ev_completed("resp-2"), ]), ]; let mock = mount_sse_sequence(&server, responses).await; test.submit_turn_with_policy( "run the local shell command", SandboxPolicy::DangerFullAccess, ) .await?; let req = mock .last_request() .expect("local shell output request recorded"); let output_item = req.function_call_output(call_id); let output = output_item .get("output") .and_then(Value::as_str) .expect("local shell output string"); let expected_pattern = r"(?s)^Exit code: 0 Wall time: [0-9]+(?:\.[0-9]+)? seconds Output: local shell ?$"; assert_regex_match(expected_pattern, output); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/list_models.rs
codex-rs/core/tests/suite/list_models.rs
use anyhow::Result; use codex_core::CodexAuth; use codex_core::ConversationManager; use codex_core::built_in_model_providers; use codex_protocol::openai_models::ModelPreset; use codex_protocol::openai_models::ReasoningEffort; use codex_protocol::openai_models::ReasoningEffortPreset; use core_test_support::load_default_config_for_test; use pretty_assertions::assert_eq; use tempfile::tempdir; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn list_models_returns_api_key_models() -> Result<()> { let codex_home = tempdir()?; let config = load_default_config_for_test(&codex_home).await; let manager = ConversationManager::with_models_provider( CodexAuth::from_api_key("sk-test"), built_in_model_providers()["openai"].clone(), ); let models = manager.list_models(&config).await; let expected_models = expected_models_for_api_key(); assert_eq!(expected_models, models); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn list_models_returns_chatgpt_models() -> Result<()> { let codex_home = tempdir()?; let config = load_default_config_for_test(&codex_home).await; let manager = ConversationManager::with_models_provider( CodexAuth::create_dummy_chatgpt_auth_for_testing(), built_in_model_providers()["openai"].clone(), ); let models = manager.list_models(&config).await; let expected_models = expected_models_for_chatgpt(); assert_eq!(expected_models, models); Ok(()) } fn expected_models_for_api_key() -> Vec<ModelPreset> { vec![gpt_5_1_codex_max(), gpt_5_1_codex_mini(), gpt_5_2()] } fn expected_models_for_chatgpt() -> Vec<ModelPreset> { let mut gpt_5_1_codex_max = gpt_5_1_codex_max(); gpt_5_1_codex_max.is_default = false; vec![ gpt_52_codex(), gpt_5_1_codex_max, gpt_5_1_codex_mini(), gpt_5_2(), ] } fn gpt_52_codex() -> ModelPreset { ModelPreset { id: "gpt-5.2-codex".to_string(), model: "gpt-5.2-codex".to_string(), display_name: "gpt-5.2-codex".to_string(), description: "Latest frontier agentic coding model.".to_string(), default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: vec![ effort( ReasoningEffort::Low, "Fast responses with lighter reasoning", ), effort( ReasoningEffort::Medium, "Balances speed and reasoning depth for everyday tasks", ), effort( ReasoningEffort::High, "Greater reasoning depth for complex problems", ), effort( ReasoningEffort::XHigh, "Extra high reasoning depth for complex problems", ), ], is_default: true, upgrade: None, show_in_picker: true, supported_in_api: false, } } fn gpt_5_1_codex_max() -> ModelPreset { ModelPreset { id: "gpt-5.1-codex-max".to_string(), model: "gpt-5.1-codex-max".to_string(), display_name: "gpt-5.1-codex-max".to_string(), description: "Codex-optimized flagship for deep and fast reasoning.".to_string(), default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: vec![ effort( ReasoningEffort::Low, "Fast responses with lighter reasoning", ), effort( ReasoningEffort::Medium, "Balances speed and reasoning depth for everyday tasks", ), effort( ReasoningEffort::High, "Greater reasoning depth for complex problems", ), effort( ReasoningEffort::XHigh, "Extra high reasoning depth for complex problems", ), ], is_default: true, upgrade: Some(gpt52_codex_upgrade()), show_in_picker: true, supported_in_api: true, } } fn gpt_5_1_codex_mini() -> ModelPreset { ModelPreset { id: "gpt-5.1-codex-mini".to_string(), model: "gpt-5.1-codex-mini".to_string(), display_name: "gpt-5.1-codex-mini".to_string(), description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(), default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: vec![ effort( ReasoningEffort::Medium, "Dynamically adjusts reasoning based on the task", ), effort( ReasoningEffort::High, "Maximizes reasoning depth for complex or ambiguous problems", ), ], is_default: false, upgrade: Some(gpt52_codex_upgrade()), show_in_picker: true, supported_in_api: true, } } fn gpt_5_2() -> ModelPreset { ModelPreset { id: "gpt-5.2".to_string(), model: "gpt-5.2".to_string(), display_name: "gpt-5.2".to_string(), description: "Latest frontier model with improvements across knowledge, reasoning and coding" .to_string(), default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: vec![ effort( ReasoningEffort::Low, "Balances speed with some reasoning; useful for straightforward queries and short explanations", ), effort( ReasoningEffort::Medium, "Provides a solid balance of reasoning depth and latency for general-purpose tasks", ), effort( ReasoningEffort::High, "Maximizes reasoning depth for complex or ambiguous problems", ), effort( ReasoningEffort::XHigh, "Extra high reasoning for complex problems", ), ], is_default: false, upgrade: Some(gpt52_codex_upgrade()), show_in_picker: true, supported_in_api: true, } } fn gpt52_codex_upgrade() -> codex_protocol::openai_models::ModelUpgrade { codex_protocol::openai_models::ModelUpgrade { id: "gpt-5.2-codex".to_string(), reasoning_effort_mapping: None, migration_config_key: "gpt-5.2-codex".to_string(), model_link: Some("https://openai.com/index/introducing-gpt-5-2-codex".to_string()), upgrade_copy: Some( "Codex is now powered by gpt-5.2-codex, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work." .to_string(), ), } } fn effort(reasoning_effort: ReasoningEffort, description: &str) -> ReasoningEffortPreset { ReasoningEffortPreset { effort: reasoning_effort, description: description.to_string(), } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/abort_tasks.rs
codex-rs/core/tests/suite/abort_tasks.rs
use assert_matches::assert_matches; use std::sync::Arc; use std::time::Duration; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_protocol::user_input::UserInput; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_sse_once; use core_test_support::responses::mount_sse_sequence; use core_test_support::responses::sse; use core_test_support::responses::start_mock_server; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use regex_lite::Regex; use serde_json::json; /// Integration test: spawn a long‑running shell_command tool via a mocked Responses SSE /// function call, then interrupt the session and expect TurnAborted. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn interrupt_long_running_tool_emits_turn_aborted() { let command = "sleep 60"; let args = json!({ "command": command, "timeout_ms": 60_000 }) .to_string(); let body = sse(vec![ ev_function_call("call_sleep", "shell_command", &args), ev_completed("done"), ]); let server = start_mock_server().await; mount_sse_once(&server, body).await; let codex = test_codex() .with_model("gpt-5.1") .build(&server) .await .unwrap() .codex; // Kick off a turn that triggers the function call. codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "start sleep".into(), }], }) .await .unwrap(); // Wait until the exec begins to avoid a race, then interrupt. wait_for_event(&codex, |ev| matches!(ev, EventMsg::ExecCommandBegin(_))).await; codex.submit(Op::Interrupt).await.unwrap(); // Expect TurnAborted soon after. wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnAborted(_))).await; } /// After an interrupt we expect the next request to the model to include both /// the original tool call and an `"aborted"` `function_call_output`. This test /// exercises the follow-up flow: it sends another user turn, inspects the mock /// responses server, and ensures the model receives the synthesized abort. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn interrupt_tool_records_history_entries() { let command = "sleep 60"; let call_id = "call-history"; let args = json!({ "command": command, "timeout_ms": 60_000 }) .to_string(); let first_body = sse(vec![ ev_response_created("resp-history"), ev_function_call(call_id, "shell_command", &args), ev_completed("resp-history"), ]); let follow_up_body = sse(vec![ ev_response_created("resp-followup"), ev_completed("resp-followup"), ]); let server = start_mock_server().await; let response_mock = mount_sse_sequence(&server, vec![first_body, follow_up_body]).await; let fixture = test_codex() .with_model("gpt-5.1") .build(&server) .await .unwrap(); let codex = Arc::clone(&fixture.codex); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "start history recording".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::ExecCommandBegin(_))).await; tokio::time::sleep(Duration::from_secs_f32(0.1)).await; codex.submit(Op::Interrupt).await.unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnAborted(_))).await; codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "follow up".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let requests = response_mock.requests(); assert!( requests.len() == 2, "expected two calls to the responses API, got {}", requests.len() ); assert!( response_mock.saw_function_call(call_id), "function call not recorded in responses payload" ); let output = response_mock .function_call_output_text(call_id) .expect("missing function_call_output text"); let re = Regex::new(r"^Wall time: ([0-9]+(?:\.[0-9])?) seconds\naborted by user$") .expect("compile regex"); let captures = re.captures(&output); assert_matches!( captures.as_ref(), Some(caps) if caps.get(1).is_some(), "aborted message with elapsed seconds" ); let secs: f32 = captures .expect("aborted message with elapsed seconds") .get(1) .unwrap() .as_str() .parse() .unwrap(); assert!( secs >= 0.1, "expected at least one tenth of a second of elapsed time, got {secs}" ); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/read_file.rs
codex-rs/core/tests/suite/read_file.rs
#![cfg(not(target_os = "windows"))] use core_test_support::responses::mount_function_call_agent_response; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::test_codex; use pretty_assertions::assert_eq; use serde_json::json; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[ignore = "disabled until we enable read_file tool"] async fn read_file_tool_returns_requested_lines() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let test = test_codex().build(&server).await?; let file_path = test.cwd.path().join("sample.txt"); std::fs::write(&file_path, "first\nsecond\nthird\nfourth\n")?; let file_path = file_path.to_string_lossy().to_string(); let call_id = "read-file-call"; let arguments = json!({ "file_path": file_path, "offset": 2, "limit": 2, }) .to_string(); let mocks = mount_function_call_agent_response(&server, call_id, &arguments, "read_file").await; test.submit_turn("please inspect sample.txt").await?; let req = mocks.completion.single_request(); let (output_text_opt, _) = req .function_call_output_content_and_success(call_id) .expect("output present"); let output_text = output_text_opt.expect("output text present"); assert_eq!(output_text, "L2: second\nL3: third"); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/tools.rs
codex-rs/core/tests/suite/tools.rs
#![cfg(not(target_os = "windows"))] #![allow(clippy::unwrap_used, clippy::expect_used)] use std::fs; use std::time::Duration; use std::time::Instant; use anyhow::Context; use anyhow::Result; use codex_core::features::Feature; use codex_core::protocol::AskForApproval; use codex_core::protocol::SandboxPolicy; use codex_core::sandboxing::SandboxPermissions; use core_test_support::assert_regex_match; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_custom_tool_call; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_sse_once; use core_test_support::responses::mount_sse_sequence; use core_test_support::responses::sse; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::test_codex; use regex_lite::Regex; use serde_json::Value; use serde_json::json; fn tool_names(body: &Value) -> Vec<String> { body.get("tools") .and_then(Value::as_array) .map(|tools| { tools .iter() .filter_map(|tool| { tool.get("name") .or_else(|| tool.get("type")) .and_then(Value::as_str) .map(str::to_string) }) .collect() }) .unwrap_or_default() } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn custom_tool_unknown_returns_custom_output_error() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex(); let test = builder.build(&server).await?; let call_id = "custom-unsupported"; let tool_name = "unsupported_tool"; mount_sse_once( &server, sse(vec![ ev_response_created("resp-1"), ev_custom_tool_call(call_id, tool_name, "\"payload\""), ev_completed("resp-1"), ]), ) .await; let mock = mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ) .await; test.submit_turn_with_policies( "invoke custom tool", AskForApproval::Never, SandboxPolicy::DangerFullAccess, ) .await?; let item = mock.single_request().custom_tool_call_output(call_id); let output = item .get("output") .and_then(Value::as_str) .unwrap_or_default(); let expected = format!("unsupported custom tool call: {tool_name}"); assert_eq!(output, expected); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn shell_escalated_permissions_rejected_then_ok() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_model("gpt-5"); let test = builder.build(&server).await?; let command = ["/bin/echo", "shell ok"]; let call_id_blocked = "shell-blocked"; let call_id_success = "shell-success"; let first_args = json!({ "command": command, "timeout_ms": 1_000, "sandbox_permissions": SandboxPermissions::RequireEscalated, }); let second_args = json!({ "command": command, "timeout_ms": 1_000, }); mount_sse_once( &server, sse(vec![ ev_response_created("resp-1"), ev_function_call( call_id_blocked, "shell", &serde_json::to_string(&first_args)?, ), ev_completed("resp-1"), ]), ) .await; let second_mock = mount_sse_once( &server, sse(vec![ ev_response_created("resp-2"), ev_function_call( call_id_success, "shell", &serde_json::to_string(&second_args)?, ), ev_completed("resp-2"), ]), ) .await; let third_mock = mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-3"), ]), ) .await; test.submit_turn_with_policies( "run the shell command", AskForApproval::Never, SandboxPolicy::DangerFullAccess, ) .await?; let policy = AskForApproval::Never; let expected_message = format!( "approval policy is {policy:?}; reject command — you should not ask for escalated permissions if the approval policy is {policy:?}" ); let blocked_output = second_mock .single_request() .function_call_output_content_and_success(call_id_blocked) .and_then(|(content, _)| content) .expect("blocked output string"); assert_eq!( blocked_output, expected_message, "unexpected rejection message" ); let success_output = third_mock .single_request() .function_call_output_content_and_success(call_id_success) .and_then(|(content, _)| content) .expect("success output string"); let output_json: Value = serde_json::from_str(&success_output)?; assert_eq!( output_json["metadata"]["exit_code"].as_i64(), Some(0), "expected exit code 0 after rerunning without escalation", ); let stdout = output_json["output"].as_str().unwrap_or_default(); let stdout_pattern = r"(?s)^shell ok\n?$"; assert_regex_match(stdout_pattern, stdout); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn sandbox_denied_shell_returns_original_output() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_model("gpt-5.1-codex"); let fixture = builder.build(&server).await?; let call_id = "sandbox-denied-shell"; let target_path = fixture.workspace_path("sandbox-denied.txt"); let sentinel = "sandbox-denied sentinel output"; let command = vec![ "/bin/sh".to_string(), "-c".to_string(), format!( "printf {sentinel:?}; printf {content:?} > {path:?}", sentinel = format!("{sentinel}\n"), content = "sandbox denied", path = &target_path ), ]; let args = json!({ "command": command, "timeout_ms": 1_000, }); let responses = vec![ sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "shell", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ]; let mock = mount_sse_sequence(&server, responses).await; fixture .submit_turn_with_policy( "run a command that should be denied by the read-only sandbox", SandboxPolicy::ReadOnly, ) .await?; let output_text = mock .function_call_output_text(call_id) .context("shell output present")?; let exit_code_line = output_text .lines() .next() .context("exit code line present")?; let exit_code = exit_code_line .strip_prefix("Exit code: ") .context("exit code prefix present")? .trim() .parse::<i32>() .context("exit code is integer")?; let body = output_text; let body_lower = body.to_lowercase(); // Required for multi-OS. let has_denial = body_lower.contains("permission denied") || body_lower.contains("operation not permitted") || body_lower.contains("read-only file system"); assert!( has_denial, "expected sandbox denial details in tool output: {body}" ); assert!( body.contains(sentinel), "expected sentinel output from command to reach the model: {body}" ); let target_path_str = target_path .to_str() .context("target path string representation")?; assert!( body.contains(target_path_str), "expected sandbox error to mention denied path: {body}" ); assert!( !body_lower.contains("failed in sandbox"), "expected original tool output, found fallback message: {body}" ); assert_ne!( exit_code, 0, "sandbox denial should surface a non-zero exit code" ); Ok(()) } async fn collect_tools(use_unified_exec: bool) -> Result<Vec<String>> { let server = start_mock_server().await; let responses = vec![sse(vec![ ev_response_created("resp-1"), ev_assistant_message("msg-1", "done"), ev_completed("resp-1"), ])]; let mock = mount_sse_sequence(&server, responses).await; let mut builder = test_codex().with_config(move |config| { if use_unified_exec { config.features.enable(Feature::UnifiedExec); } else { config.features.disable(Feature::UnifiedExec); } }); let test = builder.build(&server).await?; test.submit_turn_with_policies( "list tools", AskForApproval::Never, SandboxPolicy::DangerFullAccess, ) .await?; let first_body = mock.single_request().body_json(); Ok(tool_names(&first_body)) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn unified_exec_spec_toggle_end_to_end() -> Result<()> { skip_if_no_network!(Ok(())); let tools_disabled = collect_tools(false).await?; assert!( !tools_disabled.iter().any(|name| name == "exec_command"), "tools list should not include exec_command when disabled: {tools_disabled:?}" ); assert!( !tools_disabled.iter().any(|name| name == "write_stdin"), "tools list should not include write_stdin when disabled: {tools_disabled:?}" ); let tools_enabled = collect_tools(true).await?; assert!( tools_enabled.iter().any(|name| name == "exec_command"), "tools list should include exec_command when enabled: {tools_enabled:?}" ); assert!( tools_enabled.iter().any(|name| name == "write_stdin"), "tools list should include write_stdin when enabled: {tools_enabled:?}" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn shell_timeout_includes_timeout_prefix_and_metadata() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_model("gpt-5"); let test = builder.build(&server).await?; let call_id = "shell-timeout"; let timeout_ms = 50u64; let args = json!({ "command": ["/bin/sh", "-c", "yes line | head -n 400; sleep 1"], "timeout_ms": timeout_ms, }); mount_sse_once( &server, sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "shell", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), ) .await; let second_mock = mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ) .await; test.submit_turn_with_policies( "run a long command", AskForApproval::Never, SandboxPolicy::DangerFullAccess, ) .await?; let timeout_item = second_mock.single_request().function_call_output(call_id); let output_str = timeout_item .get("output") .and_then(Value::as_str) .expect("timeout output string"); // The exec path can report a timeout in two ways depending on timing: // 1) Structured JSON with exit_code 124 and a timeout prefix (preferred), or // 2) A plain error string if the child is observed as killed by a signal first. if let Ok(output_json) = serde_json::from_str::<Value>(output_str) { assert_eq!( output_json["metadata"]["exit_code"].as_i64(), Some(124), "expected timeout exit code 124", ); let stdout = output_json["output"].as_str().unwrap_or_default(); assert!( stdout.contains("command timed out"), "timeout output missing `command timed out`: {stdout}" ); } else { // Fallback: accept the signal classification path to deflake the test. let signal_pattern = r"(?is)^execution error:.*signal.*$"; assert_regex_match(signal_pattern, output_str); } Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn shell_timeout_handles_background_grandchild_stdout() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_model("gpt-5.1").with_config(|config| { config .sandbox_policy .set(SandboxPolicy::DangerFullAccess) .expect("set sandbox policy"); }); let test = builder.build(&server).await?; let call_id = "shell-grandchild-timeout"; let pid_path = test.cwd.path().join("grandchild_pid.txt"); let script_path = test.cwd.path().join("spawn_detached.py"); let script = format!( r#"import subprocess import time from pathlib import Path # Spawn a detached grandchild that inherits stdout/stderr so the pipe stays open. proc = subprocess.Popen(["/bin/sh", "-c", "sleep 60"], start_new_session=True) Path({pid_path:?}).write_text(str(proc.pid)) time.sleep(60) "# ); fs::write(&script_path, script)?; let args = json!({ "command": ["python3", script_path.to_string_lossy()], "timeout_ms": 200, }); mount_sse_once( &server, sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "shell", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), ) .await; let second_mock = mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ) .await; let start = Instant::now(); let output_str = tokio::time::timeout(Duration::from_secs(10), async { test.submit_turn_with_policies( "run a command with a detached grandchild", AskForApproval::Never, SandboxPolicy::DangerFullAccess, ) .await?; let timeout_item = second_mock.single_request().function_call_output(call_id); timeout_item .get("output") .and_then(Value::as_str) .map(str::to_string) .context("timeout output string") }) .await .context("exec call should not hang waiting for grandchild pipes to close")??; let elapsed = start.elapsed(); if let Ok(output_json) = serde_json::from_str::<Value>(&output_str) { assert_eq!( output_json["metadata"]["exit_code"].as_i64(), Some(124), "expected timeout exit code 124", ); } else { let timeout_pattern = r"(?is)command timed out|timeout"; assert_regex_match(timeout_pattern, &output_str); } assert!( elapsed < Duration::from_secs(9), "command should return shortly after timeout even with live grandchildren: {elapsed:?}" ); if let Ok(pid_str) = fs::read_to_string(&pid_path) && let Ok(pid) = pid_str.trim().parse::<libc::pid_t>() { unsafe { libc::kill(pid, libc::SIGKILL) }; } Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn shell_spawn_failure_truncates_exec_error() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex().with_config(|cfg| { cfg.sandbox_policy .set(SandboxPolicy::DangerFullAccess) .expect("set sandbox policy"); }); let test = builder.build(&server).await?; let call_id = "shell-spawn-failure"; let bogus_component = "missing-bin-".repeat(700); let bogus_exe = test .cwd .path() .join(bogus_component) .to_string_lossy() .into_owned(); let args = json!({ "command": [bogus_exe], "timeout_ms": 1_000, }); mount_sse_once( &server, sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, "shell", &serde_json::to_string(&args)?), ev_completed("resp-1"), ]), ) .await; let second_mock = mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]), ) .await; test.submit_turn_with_policies( "spawn a missing binary", AskForApproval::Never, SandboxPolicy::DangerFullAccess, ) .await?; let failure_item = second_mock.single_request().function_call_output(call_id); let output = failure_item .get("output") .and_then(Value::as_str) .expect("spawn failure output string"); let spawn_error_pattern = r#"(?s)^Exit code: -?\d+ Wall time: [0-9]+(?:\.[0-9]+)? seconds Output: execution error: .*$"#; let spawn_truncated_pattern = r#"(?s)^Exit code: -?\d+ Wall time: [0-9]+(?:\.[0-9]+)? seconds Total output lines: \d+ Output: execution error: .*$"#; let spawn_error_regex = Regex::new(spawn_error_pattern)?; let spawn_truncated_regex = Regex::new(spawn_truncated_pattern)?; if !spawn_error_regex.is_match(output) && !spawn_truncated_regex.is_match(output) { let fallback_pattern = r"(?s)^execution error: .*$"; assert_regex_match(fallback_pattern, output); } assert!(output.len() <= 10 * 1024); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/skills.rs
codex-rs/core/tests/suite/skills.rs
#![cfg(not(target_os = "windows"))] #![allow(clippy::unwrap_used, clippy::expect_used)] use anyhow::Result; use codex_core::features::Feature; use codex_core::protocol::AskForApproval; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_protocol::user_input::UserInput; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_sse_once; use core_test_support::responses::sse; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::test_codex; use std::fs; use std::path::Path; fn write_skill(home: &Path, name: &str, description: &str, body: &str) -> std::path::PathBuf { let skill_dir = home.join("skills").join(name); fs::create_dir_all(&skill_dir).unwrap(); let contents = format!("---\nname: {name}\ndescription: {description}\n---\n\n{body}\n"); let path = skill_dir.join("SKILL.md"); fs::write(&path, contents).unwrap(); path } fn system_skill_md_path(home: impl AsRef<Path>, name: &str) -> std::path::PathBuf { home.as_ref() .join("skills") .join(".system") .join(name) .join("SKILL.md") } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn user_turn_includes_skill_instructions() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let skill_body = "skill body"; let mut builder = test_codex() .with_config(|config| { config.features.enable(Feature::Skills); }) .with_pre_build_hook(|home| { write_skill(home, "demo", "demo skill", skill_body); }); let test = builder.build(&server).await?; let skill_path = test.codex_home_path().join("skills/demo/SKILL.md"); let skill_path = std::fs::canonicalize(skill_path)?; let mock = mount_sse_once( &server, sse(vec![ ev_response_created("resp-1"), ev_assistant_message("msg-1", "done"), ev_completed("resp-1"), ]), ) .await; let session_model = test.session_configured.model.clone(); test.codex .submit(Op::UserTurn { items: vec![ UserInput::Text { text: "please use $demo".to_string(), }, UserInput::Skill { name: "demo".to_string(), path: skill_path.clone(), }, ], final_output_json_schema: None, cwd: test.cwd_path().to_path_buf(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: session_model, effort: None, summary: codex_protocol::config_types::ReasoningSummary::Auto, }) .await?; core_test_support::wait_for_event(test.codex.as_ref(), |event| { matches!(event, codex_core::protocol::EventMsg::TaskComplete(_)) }) .await; let request = mock.single_request(); let user_texts = request.message_input_texts("user"); let skill_path_str = skill_path.to_string_lossy(); assert!( user_texts.iter().any(|text| { text.contains("<skill>\n<name>demo</name>") && text.contains("<path>") && text.contains(skill_body) && text.contains(skill_path_str.as_ref()) }), "expected skill instructions in user input, got {user_texts:?}" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn skill_load_errors_surface_in_session_configured() -> Result<()> { skip_if_no_network!(Ok(())); let server = start_mock_server().await; let mut builder = test_codex() .with_config(|config| { config.features.enable(Feature::Skills); }) .with_pre_build_hook(|home| { let skill_dir = home.join("skills").join("broken"); fs::create_dir_all(&skill_dir).unwrap(); fs::write(skill_dir.join("SKILL.md"), "not yaml").unwrap(); }); let test = builder.build(&server).await?; test.codex .submit(Op::ListSkills { cwds: Vec::new(), force_reload: false, }) .await?; let response = core_test_support::wait_for_event_match(test.codex.as_ref(), |event| match event { codex_core::protocol::EventMsg::ListSkillsResponse(response) => Some(response.clone()), _ => None, }) .await; let cwd = test.cwd_path(); let (skills, errors) = response .skills .iter() .find(|entry| entry.cwd.as_path() == cwd) .map(|entry| (entry.skills.clone(), entry.errors.clone())) .unwrap_or_default(); assert!( skills.iter().all(|skill| { !skill .path .to_string_lossy() .ends_with("skills/broken/SKILL.md") }), "expected broken skill not loaded, got {skills:?}" ); assert_eq!(errors.len(), 1, "expected one load error"); let error_path = errors[0].path.to_string_lossy(); assert!( error_path.ends_with("skills/broken/SKILL.md"), "unexpected error path: {error_path}" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn list_skills_includes_system_cache_entries() -> Result<()> { skip_if_no_network!(Ok(())); const SYSTEM_SKILL_NAME: &str = "skill-creator"; let server = start_mock_server().await; let mut builder = test_codex() .with_config(|config| { config.features.enable(Feature::Skills); }) .with_pre_build_hook(|home| { let system_skill_path = system_skill_md_path(home, SYSTEM_SKILL_NAME); assert!( !system_skill_path.exists(), "expected embedded system skills not yet installed, but {system_skill_path:?} exists" ); }); let test = builder.build(&server).await?; let system_skill_path = system_skill_md_path(test.codex_home_path(), SYSTEM_SKILL_NAME); assert!( system_skill_path.exists(), "expected embedded system skills installed to {system_skill_path:?}" ); let system_skill_contents = fs::read_to_string(&system_skill_path)?; let expected_name_line = format!("name: {SYSTEM_SKILL_NAME}"); assert!( system_skill_contents.contains(&expected_name_line), "expected embedded system skill file, got:\n{system_skill_contents}" ); test.codex .submit(Op::ListSkills { cwds: Vec::new(), force_reload: true, }) .await?; let response = core_test_support::wait_for_event_match(test.codex.as_ref(), |event| match event { codex_core::protocol::EventMsg::ListSkillsResponse(response) => Some(response.clone()), _ => None, }) .await; let cwd = test.cwd_path(); let (skills, _errors) = response .skills .iter() .find(|entry| entry.cwd.as_path() == cwd) .map(|entry| (entry.skills.clone(), entry.errors.clone())) .unwrap_or_default(); let skill = skills .iter() .find(|skill| skill.name == SYSTEM_SKILL_NAME) .expect("expected system skill to be present"); assert_eq!(skill.scope, codex_protocol::protocol::SkillScope::System); let path_str = skill.path.to_string_lossy().replace('\\', "/"); let expected_path_suffix = format!("/skills/.system/{SYSTEM_SKILL_NAME}/SKILL.md"); assert!( path_str.ends_with(&expected_path_suffix), "unexpected skill path: {path_str}" ); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/exec.rs
codex-rs/core/tests/suite/exec.rs
#![cfg(target_os = "macos")] use std::collections::HashMap; use std::string::ToString; use codex_core::exec::ExecParams; use codex_core::exec::ExecToolCallOutput; use codex_core::exec::SandboxType; use codex_core::exec::process_exec_tool_call; use codex_core::protocol::SandboxPolicy; use codex_core::sandboxing::SandboxPermissions; use codex_core::spawn::CODEX_SANDBOX_ENV_VAR; use tempfile::TempDir; use codex_core::error::Result; use codex_core::get_platform_sandbox; fn skip_test() -> bool { if std::env::var(CODEX_SANDBOX_ENV_VAR) == Ok("seatbelt".to_string()) { eprintln!("{CODEX_SANDBOX_ENV_VAR} is set to 'seatbelt', skipping test."); return true; } false } #[expect(clippy::expect_used)] async fn run_test_cmd(tmp: TempDir, cmd: Vec<&str>) -> Result<ExecToolCallOutput> { let sandbox_type = get_platform_sandbox().expect("should be able to get sandbox type"); assert_eq!(sandbox_type, SandboxType::MacosSeatbelt); let params = ExecParams { command: cmd.iter().map(ToString::to_string).collect(), cwd: tmp.path().to_path_buf(), expiration: 1000.into(), env: HashMap::new(), sandbox_permissions: SandboxPermissions::UseDefault, justification: None, arg0: None, }; let policy = SandboxPolicy::new_read_only_policy(); process_exec_tool_call(params, &policy, tmp.path(), &None, None).await } /// Command succeeds with exit code 0 normally #[tokio::test] async fn exit_code_0_succeeds() { if skip_test() { return; } let tmp = TempDir::new().expect("should be able to create temp dir"); let cmd = vec!["echo", "hello"]; let output = run_test_cmd(tmp, cmd).await.unwrap(); assert_eq!(output.stdout.text, "hello\n"); assert_eq!(output.stderr.text, ""); assert_eq!(output.stdout.truncated_after_lines, None); } /// Command succeeds with exit code 0 normally #[tokio::test] async fn truncates_output_lines() { if skip_test() { return; } let tmp = TempDir::new().expect("should be able to create temp dir"); let cmd = vec!["seq", "300"]; let output = run_test_cmd(tmp, cmd).await.unwrap(); let expected_output = (1..=300) .map(|i| format!("{i}\n")) .collect::<Vec<_>>() .join(""); assert_eq!(output.stdout.text, expected_output); assert_eq!(output.stdout.truncated_after_lines, None); } /// Command succeeds with exit code 0 normally #[tokio::test] async fn truncates_output_bytes() { if skip_test() { return; } let tmp = TempDir::new().expect("should be able to create temp dir"); // each line is 1000 bytes let cmd = vec!["bash", "-lc", "seq 15 | awk '{printf \"%-1000s\\n\", $0}'"]; let output = run_test_cmd(tmp, cmd).await.unwrap(); assert!(output.stdout.text.len() >= 15000); assert_eq!(output.stdout.truncated_after_lines, None); } /// Command not found returns exit code 127, this is not considered a sandbox error #[tokio::test] async fn exit_command_not_found_is_ok() { if skip_test() { return; } let tmp = TempDir::new().expect("should be able to create temp dir"); let cmd = vec!["/bin/bash", "-c", "nonexistent_command_12345"]; run_test_cmd(tmp, cmd).await.unwrap(); } /// Writing a file fails and should be considered a sandbox error #[tokio::test] async fn write_file_fails_as_sandbox_error() { if skip_test() { return; } let tmp = TempDir::new().expect("should be able to create temp dir"); let path = tmp.path().join("test.txt"); let cmd = vec![ "/user/bin/touch", path.to_str().expect("should be able to get path"), ]; assert!(run_test_cmd(tmp, cmd).await.is_err()); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/otel.rs
codex-rs/core/tests/suite/otel.rs
use codex_core::config::Constrained; use codex_core::features::Feature; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::Op; use codex_protocol::protocol::ReviewDecision; use codex_protocol::protocol::SandboxPolicy; use codex_protocol::user_input::UserInput; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; use core_test_support::responses::ev_custom_tool_call; use core_test_support::responses::ev_function_call; use core_test_support::responses::ev_local_shell_call; use core_test_support::responses::ev_message_item_added; use core_test_support::responses::ev_output_text_delta; use core_test_support::responses::ev_reasoning_item; use core_test_support::responses::ev_reasoning_summary_text_delta; use core_test_support::responses::ev_reasoning_text_delta; use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_response_once; use core_test_support::responses::mount_sse_once; use core_test_support::responses::sse; use core_test_support::responses::sse_response; use core_test_support::responses::start_mock_server; use core_test_support::test_codex::TestCodex; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use std::sync::Mutex; use tracing::Level; use tracing_test::traced_test; use tracing_subscriber::fmt::format::FmtSpan; use tracing_test::internal::MockWriter; #[tokio::test] #[traced_test] async fn responses_api_emits_api_request_event() { let server = start_mock_server().await; mount_sse_once(&server, sse(vec![ev_completed("done")])).await; let TestCodex { codex, .. } = test_codex().build(&server).await.unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; logs_assert(|lines: &[&str]| { lines .iter() .find(|line| line.contains("codex.api_request")) .map(|_| Ok(())) .unwrap_or_else(|| Err("expected codex.api_request event".to_string())) }); logs_assert(|lines: &[&str]| { lines .iter() .find(|line| line.contains("codex.conversation_starts")) .map(|_| Ok(())) .unwrap_or_else(|| Err("expected codex.conversation_starts event".to_string())) }); } #[tokio::test] #[traced_test] async fn process_sse_emits_tracing_for_output_item() { let server = start_mock_server().await; mount_sse_once( &server, sse(vec![ev_assistant_message("id1", "hi"), ev_completed("id2")]), ) .await; let TestCodex { codex, .. } = test_codex().build(&server).await.unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; logs_assert(|lines: &[&str]| { lines .iter() .find(|line| { line.contains("codex.sse_event") && line.contains("event.kind=response.output_item.done") }) .map(|_| Ok(())) .unwrap_or(Err("missing response.output_item.done event".to_string())) }); } #[tokio::test] #[traced_test] async fn process_sse_emits_failed_event_on_parse_error() { let server = start_mock_server().await; mount_sse_once(&server, "data: not-json\n\n".to_string()).await; let TestCodex { codex, .. } = test_codex() .with_config(move |config| { config.features.disable(Feature::GhostCommit); }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; logs_assert(|lines: &[&str]| { lines .iter() .find(|line| { line.contains("codex.sse_event") && line.contains("error.message") && line.contains("expected ident at line 1 column 2") }) .map(|_| Ok(())) .unwrap_or(Err("missing codex.sse_event".to_string())) }); } #[tokio::test] #[traced_test] async fn process_sse_records_failed_event_when_stream_closes_without_completed() { let server = start_mock_server().await; mount_sse_once(&server, sse(vec![ev_assistant_message("id", "hi")])).await; let TestCodex { codex, .. } = test_codex() .with_config(move |config| { config.features.disable(Feature::GhostCommit); }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; logs_assert(|lines: &[&str]| { lines .iter() .find(|line| { line.contains("codex.sse_event") && line.contains("error.message") && line.contains("stream closed before response.completed") }) .map(|_| Ok(())) .unwrap_or(Err("missing codex.sse_event".to_string())) }); } #[tokio::test] #[traced_test] async fn process_sse_failed_event_records_response_error_message() { let server = start_mock_server().await; mount_sse_once( &server, sse(vec![serde_json::json!({ "type": "response.failed", "response": { "error": { "message": "boom", "code": "bad" } } })]), ) .await; mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "local shell done"), ev_completed("done"), ]), ) .await; let TestCodex { codex, .. } = test_codex() .with_config(move |config| { config.features.disable(Feature::GhostCommit); }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; logs_assert(|lines: &[&str]| { lines .iter() .find(|line| { line.contains("codex.sse_event") && line.contains("event.kind=response.failed") && line.contains("error.message") && line.contains("boom") }) .map(|_| Ok(())) .unwrap_or(Err("missing codex.sse_event".to_string())) }); } #[tokio::test] #[traced_test] async fn process_sse_failed_event_logs_parse_error() { let server = start_mock_server().await; mount_sse_once( &server, sse(vec![serde_json::json!({ "type": "response.failed", "response": { "error": "not-an-object" } })]), ) .await; mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "local shell done"), ev_completed("done"), ]), ) .await; let TestCodex { codex, .. } = test_codex() .with_config(move |config| { config.features.disable(Feature::GhostCommit); }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; logs_assert(|lines: &[&str]| { lines .iter() .find(|line| { line.contains("codex.sse_event") && line.contains("event.kind=response.failed") }) .map(|_| Ok(())) .unwrap_or(Err("missing codex.sse_event".to_string())) }); } #[tokio::test] #[traced_test] async fn process_sse_failed_event_logs_missing_error() { let server = start_mock_server().await; mount_sse_once( &server, sse(vec![serde_json::json!({ "type": "response.failed", "response": {} })]), ) .await; let TestCodex { codex, .. } = test_codex() .with_config(move |config| { config.features.disable(Feature::GhostCommit); }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; logs_assert(|lines: &[&str]| { lines .iter() .find(|line| { line.contains("codex.sse_event") && line.contains("event.kind=response.failed") }) .map(|_| Ok(())) .unwrap_or(Err("missing codex.sse_event".to_string())) }); } #[tokio::test] #[traced_test] async fn process_sse_failed_event_logs_response_completed_parse_error() { let server = start_mock_server().await; mount_sse_once( &server, sse(vec![serde_json::json!({ "type": "response.completed", "response": {} })]), ) .await; mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "local shell done"), ev_completed("done"), ]), ) .await; let TestCodex { codex, .. } = test_codex() .with_config(move |config| { config.features.disable(Feature::GhostCommit); }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; logs_assert(|lines: &[&str]| { lines .iter() .find(|line| { line.contains("codex.sse_event") && line.contains("event.kind=response.completed") && line.contains("error.message") && line.contains("failed to parse ResponseCompleted") }) .map(|_| Ok(())) .unwrap_or(Err("missing codex.sse_event".to_string())) }); } #[tokio::test] #[traced_test] async fn process_sse_emits_completed_telemetry() { let server = start_mock_server().await; mount_sse_once( &server, sse(vec![serde_json::json!({ "type": "response.completed", "response": { "id": "resp1", "usage": { "input_tokens": 3, "input_tokens_details": { "cached_tokens": 1 }, "output_tokens": 5, "output_tokens_details": { "reasoning_tokens": 2 }, "total_tokens": 9 } } })]), ) .await; let TestCodex { codex, .. } = test_codex().build(&server).await.unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; logs_assert(|lines: &[&str]| { lines .iter() .find(|line| { line.contains("codex.sse_event") && line.contains("event.kind=response.completed") && line.contains("input_token_count=3") && line.contains("output_token_count=5") && line.contains("cached_token_count=1") && line.contains("reasoning_token_count=2") && line.contains("tool_token_count=9") }) .map(|_| Ok(())) .unwrap_or(Err("missing response.completed telemetry".to_string())) }); } #[tokio::test] async fn handle_responses_span_records_response_kind_and_tool_name() { let buffer: &'static Mutex<Vec<u8>> = Box::leak(Box::new(Mutex::new(Vec::new()))); let subscriber = tracing_subscriber::fmt() .with_level(true) .with_ansi(false) .with_max_level(Level::TRACE) .with_span_events(FmtSpan::FULL) .with_writer(MockWriter::new(buffer)) .finish(); let _guard = tracing::subscriber::set_default(subscriber); let server = start_mock_server().await; mount_sse_once( &server, sse(vec![ ev_function_call("function-call", "nonexistent", "{\"value\":1}"), ev_completed("done"), ]), ) .await; mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "tool handled"), ev_completed("done"), ]), ) .await; let TestCodex { codex, .. } = test_codex() .with_config(|config| { config.features.disable(Feature::GhostCommit); }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let logs = String::from_utf8(buffer.lock().unwrap().clone()).unwrap(); assert!( logs.contains("handle_responses{otel.name=\"function_call\"") && logs.contains("tool_name=\"nonexistent\"") && logs.contains("from=\"output_item_done\""), "missing handle_responses span with function call metadata\nlogs:\n{logs}" ); assert!( logs.contains("handle_responses{otel.name=\"completed\""), "missing handle_responses span for completion\nlogs:\n{logs}" ); } #[tokio::test(flavor = "current_thread")] async fn record_responses_sets_span_fields_for_response_events() { let buffer: &'static Mutex<Vec<u8>> = Box::leak(Box::new(Mutex::new(Vec::new()))); let subscriber = tracing_subscriber::fmt() .with_level(true) .with_ansi(false) .with_max_level(Level::TRACE) .with_span_events(FmtSpan::FULL) .with_writer(MockWriter::new(buffer)) .finish(); let _guard = tracing::subscriber::set_default(subscriber); let server = start_mock_server().await; let sse_body = sse(vec![ ev_response_created("resp-1"), ev_function_call("call-1", "fn", "{\"value\":1}"), ev_custom_tool_call("custom-1", "custom_tool", "{\"key\":\"value\"}"), ev_message_item_added("msg-added", "hi there"), ev_output_text_delta("delta"), ev_reasoning_summary_text_delta("summary-delta"), ev_reasoning_text_delta("raw-delta"), ev_function_call("call-1", "fn", "{\"key\":\"value\"}"), ev_custom_tool_call("custom-1", "custom_tool", "{\"key\":\"value\"}"), ev_assistant_message("msg-1", "agent"), ev_reasoning_item("reasoning-1", &["summary"], &[]), ev_completed("resp-1"), ]); mount_response_once(&server, sse_response(sse_body)).await; let TestCodex { codex, .. } = test_codex() .with_config(|config| { config.features.disable(Feature::GhostCommit); }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; let logs = String::from_utf8(buffer.lock().unwrap().clone()).unwrap(); let expected = [ ("created", None::<&str>, None::<&str>), ("rate_limits", None, None), ("function_call", Some("output_item_added"), Some("fn")), ("message_from_assistant", Some("output_item_done"), None), ("reasoning", Some("output_item_done"), None), ("text_delta", None, None), ("reasoning_summary_delta", None, None), ("reasoning_content_delta", None, None), ("completed", None, None), ]; for (name, from, tool_name) in expected { assert!( logs.contains(&format!("handle_responses{{otel.name=\"{name}\"")), "missing otel.name={name}\nlogs:\n{logs}" ); if let Some(from) = from { assert!( logs.contains(&format!("from=\"{from}\"")), "missing from={from} for {name}\nlogs:\n{logs}" ); } if let Some(tool_name) = tool_name { assert!( logs.contains(&format!("tool_name=\"{tool_name}\"")), "missing tool_name={tool_name} for {name}\nlogs:\n{logs}" ); } } } #[tokio::test] #[traced_test] async fn handle_response_item_records_tool_result_for_custom_tool_call() { let server = start_mock_server().await; mount_sse_once( &server, sse(vec![ ev_custom_tool_call( "custom-tool-call", "unsupported_tool", "{\"key\":\"value\"}", ), ev_completed("done"), ]), ) .await; mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "local shell done"), ev_completed("done"), ]), ) .await; let TestCodex { codex, .. } = test_codex() .with_config(move |config| { config.features.disable(Feature::GhostCommit); }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TokenCount(_))).await; logs_assert(|lines: &[&str]| { let line = lines .iter() .find(|line| { line.contains("codex.tool_result") && line.contains("call_id=custom-tool-call") }) .ok_or_else(|| "missing codex.tool_result event".to_string())?; if !line.contains("tool_name=unsupported_tool") { return Err("missing tool_name field".to_string()); } if !line.contains("arguments={\"key\":\"value\"}") { return Err("missing arguments field".to_string()); } if !line.contains("output=unsupported custom tool call: unsupported_tool") { return Err("missing output field".to_string()); } if !line.contains("success=false") { return Err("missing success field".to_string()); } Ok(()) }); } #[tokio::test] #[traced_test] async fn handle_response_item_records_tool_result_for_function_call() { let server = start_mock_server().await; mount_sse_once( &server, sse(vec![ ev_function_call("function-call", "nonexistent", "{\"value\":1}"), ev_completed("done"), ]), ) .await; mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "local shell done"), ev_completed("done"), ]), ) .await; let TestCodex { codex, .. } = test_codex() .with_config(move |config| { config.features.disable(Feature::GhostCommit); }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TokenCount(_))).await; logs_assert(|lines: &[&str]| { let line = lines .iter() .find(|line| { line.contains("codex.tool_result") && line.contains("call_id=function-call") }) .ok_or_else(|| "missing codex.tool_result event".to_string())?; if !line.contains("tool_name=nonexistent") { return Err("missing tool_name field".to_string()); } if !line.contains("arguments={\"value\":1}") { return Err("missing arguments field".to_string()); } if !line.contains("output=unsupported call: nonexistent") { return Err("missing output field".to_string()); } if !line.contains("success=false") { return Err("missing success field".to_string()); } Ok(()) }); } #[tokio::test] #[traced_test] async fn handle_response_item_records_tool_result_for_local_shell_missing_ids() { let server = start_mock_server().await; mount_sse_once( &server, sse(vec![ serde_json::json!({ "type": "response.output_item.done", "item": { "type": "local_shell_call", "status": "completed", "action": { "type": "exec", "command": vec!["/bin/echo", "hello"], } } }), ev_completed("done"), ]), ) .await; mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "local shell done"), ev_completed("done"), ]), ) .await; let TestCodex { codex, .. } = test_codex() .with_config(move |config| { config.features.disable(Feature::GhostCommit); }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TokenCount(_))).await; logs_assert(|lines: &[&str]| { let line = lines .iter() .find(|line| { line.contains("codex.tool_result") && line.contains(&"tool_name=local_shell".to_string()) && line.contains("output=LocalShellCall without call_id or id") }) .ok_or_else(|| "missing codex.tool_result event".to_string())?; if !line.contains("success=false") { return Err("missing success field".to_string()); } Ok(()) }); } #[cfg(target_os = "macos")] #[tokio::test] #[traced_test] async fn handle_response_item_records_tool_result_for_local_shell_call() { let server = start_mock_server().await; mount_sse_once( &server, sse(vec![ ev_local_shell_call("shell-call", "completed", vec!["/bin/echo", "shell"]), ev_completed("done"), ]), ) .await; mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "local shell done"), ev_completed("done"), ]), ) .await; let TestCodex { codex, .. } = test_codex() .with_config(move |config| { config.features.disable(Feature::GhostCommit); }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TokenCount(_))).await; logs_assert(|lines: &[&str]| { let line = lines .iter() .find(|line| line.contains("codex.tool_result") && line.contains("call_id=shell-call")) .ok_or_else(|| "missing codex.tool_result event".to_string())?; if !line.contains("tool_name=local_shell") { return Err("missing tool_name field".to_string()); } if !line.contains("arguments=/bin/echo shell") { return Err("missing arguments field".to_string()); } let output_idx = line .find("output=") .ok_or_else(|| "missing output field".to_string())?; if line[output_idx + "output=".len()..].is_empty() { return Err("empty output field".to_string()); } if !line.contains("success=false") { return Err("missing success field".to_string()); } Ok(()) }); } fn tool_decision_assertion<'a>( call_id: &'a str, expected_decision: &'a str, expected_source: &'a str, ) -> impl Fn(&[&str]) -> Result<(), String> + 'a { let call_id = call_id.to_string(); let expected_decision = expected_decision.to_string(); let expected_source = expected_source.to_string(); move |lines: &[&str]| { let line = lines .iter() .find(|line| { line.contains("codex.tool_decision") && line.contains(&format!("call_id={call_id}")) }) .ok_or_else(|| format!("missing codex.tool_decision event for {call_id}"))?; let lower = line.to_lowercase(); if !lower.contains("tool_name=local_shell") { return Err("missing tool_name for local_shell".to_string()); } if !lower.contains(&format!("decision={expected_decision}")) { return Err(format!("unexpected decision for {call_id}")); } if !lower.contains(&format!("source={expected_source}")) { return Err(format!("unexpected source for {expected_source}")); } Ok(()) } } #[tokio::test] #[traced_test] async fn handle_container_exec_autoapprove_from_config_records_tool_decision() { let server = start_mock_server().await; mount_sse_once( &server, sse(vec![ ev_local_shell_call( "auto_config_call", "completed", vec!["/bin/echo", "local shell"], ), ev_completed("done"), ]), ) .await; mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "local shell done"), ev_completed("done"), ]), ) .await; let TestCodex { codex, .. } = test_codex() .with_config(|config| { config.approval_policy = Constrained::allow_any(AskForApproval::OnRequest); config.sandbox_policy = Constrained::allow_any(SandboxPolicy::DangerFullAccess); }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "hello".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; logs_assert(tool_decision_assertion( "auto_config_call", "approved", "config", )); } #[tokio::test] #[traced_test] async fn handle_container_exec_user_approved_records_tool_decision() { let server = start_mock_server().await; mount_sse_once( &server, sse(vec![ ev_local_shell_call("user_approved_call", "completed", vec!["/bin/date"]), ev_completed("done"), ]), ) .await; mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "local shell done"), ev_completed("done"), ]), ) .await; let TestCodex { codex, .. } = test_codex() .with_config(|config| { config.approval_policy = Constrained::allow_any(AskForApproval::UnlessTrusted); }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "approved".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::ExecApprovalRequest(_))).await; codex .submit(Op::ExecApproval { id: "0".into(), decision: ReviewDecision::Approved, }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TokenCount(_))).await; logs_assert(tool_decision_assertion( "user_approved_call", "approved", "user", )); } #[tokio::test] #[traced_test] async fn handle_container_exec_user_approved_for_session_records_tool_decision() { let server = start_mock_server().await; mount_sse_once( &server, sse(vec![ ev_local_shell_call("user_approved_session_call", "completed", vec!["/bin/date"]), ev_completed("done"), ]), ) .await; mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "local shell done"), ev_completed("done"), ]), ) .await; let TestCodex { codex, .. } = test_codex() .with_config(|config| { config.approval_policy = Constrained::allow_any(AskForApproval::UnlessTrusted); }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "persist".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::ExecApprovalRequest(_))).await; codex .submit(Op::ExecApproval { id: "0".into(), decision: ReviewDecision::ApprovedForSession, }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TokenCount(_))).await; logs_assert(tool_decision_assertion( "user_approved_session_call", "approvedforsession", "user", )); } #[tokio::test] #[traced_test] async fn handle_sandbox_error_user_approves_retry_records_tool_decision() { let server = start_mock_server().await; mount_sse_once( &server, sse(vec![ ev_local_shell_call("sandbox_retry_call", "completed", vec!["/bin/date"]), ev_completed("done"), ]), ) .await; mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "local shell done"), ev_completed("done"), ]), ) .await; let TestCodex { codex, .. } = test_codex() .with_config(|config| { config.approval_policy = Constrained::allow_any(AskForApproval::UnlessTrusted); }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "retry".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::ExecApprovalRequest(_))).await; codex .submit(Op::ExecApproval { id: "0".into(), decision: ReviewDecision::Approved, }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TokenCount(_))).await; logs_assert(tool_decision_assertion( "sandbox_retry_call", "approved", "user", )); } #[tokio::test] #[traced_test] async fn handle_container_exec_user_denies_records_tool_decision() { let server = start_mock_server().await; mount_sse_once( &server, sse(vec![ ev_local_shell_call("user_denied_call", "completed", vec!["/bin/date"]), ev_completed("done"), ]), ) .await; mount_sse_once( &server, sse(vec![ ev_assistant_message("msg-1", "local shell done"), ev_completed("done"), ]), ) .await; let TestCodex { codex, .. } = test_codex() .with_config(|config| { config.approval_policy = Constrained::allow_any(AskForApproval::UnlessTrusted); }) .build(&server)
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/review.rs
codex-rs/core/tests/suite/review.rs
use codex_core::CodexAuth; use codex_core::CodexConversation; use codex_core::ContentItem; use codex_core::ConversationManager; use codex_core::ModelProviderInfo; use codex_core::REVIEW_PROMPT; use codex_core::ResponseItem; use codex_core::built_in_model_providers; use codex_core::config::Config; use codex_core::protocol::ENVIRONMENT_CONTEXT_OPEN_TAG; use codex_core::protocol::EventMsg; use codex_core::protocol::ExitedReviewModeEvent; use codex_core::protocol::Op; use codex_core::protocol::ReviewCodeLocation; use codex_core::protocol::ReviewFinding; use codex_core::protocol::ReviewLineRange; use codex_core::protocol::ReviewOutputEvent; use codex_core::protocol::ReviewRequest; use codex_core::protocol::ReviewTarget; use codex_core::protocol::RolloutItem; use codex_core::protocol::RolloutLine; use codex_core::review_format::render_review_output_text; use codex_protocol::user_input::UserInput; use core_test_support::load_default_config_for_test; use core_test_support::load_sse_fixture_with_id_from_str; use core_test_support::responses::get_responses_requests; use core_test_support::skip_if_no_network; use core_test_support::wait_for_event; use pretty_assertions::assert_eq; use std::path::PathBuf; use std::sync::Arc; use tempfile::TempDir; use tokio::io::AsyncWriteExt as _; use uuid::Uuid; use wiremock::Mock; use wiremock::MockServer; use wiremock::ResponseTemplate; use wiremock::matchers::method; use wiremock::matchers::path; /// Verify that submitting `Op::Review` spawns a child task and emits /// EnteredReviewMode -> ExitedReviewMode(None) -> TaskComplete /// in that order when the model returns a structured review JSON payload. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn review_op_emits_lifecycle_and_review_output() { // Skip under Codex sandbox network restrictions. skip_if_no_network!(); // Start mock Responses API server. Return a single assistant message whose // text is a JSON-encoded ReviewOutputEvent. let review_json = serde_json::json!({ "findings": [ { "title": "Prefer Stylize helpers", "body": "Use .dim()/.bold() chaining instead of manual Style where possible.", "confidence_score": 0.9, "priority": 1, "code_location": { "absolute_file_path": "/tmp/file.rs", "line_range": {"start": 10, "end": 20} } } ], "overall_correctness": "good", "overall_explanation": "All good with some improvements suggested.", "overall_confidence_score": 0.8 }) .to_string(); let sse_template = r#"[ {"type":"response.output_item.done", "item":{ "type":"message", "role":"assistant", "content":[{"type":"output_text","text":__REVIEW__}] }}, {"type":"response.completed", "response": {"id": "__ID__"}} ]"#; let review_json_escaped = serde_json::to_string(&review_json).unwrap(); let sse_raw = sse_template.replace("__REVIEW__", &review_json_escaped); let server = start_responses_server_with_sse(&sse_raw, 1).await; let codex_home = TempDir::new().unwrap(); let codex = new_conversation_for_server(&server, &codex_home, |_| {}).await; // Submit review request. codex .submit(Op::Review { review_request: ReviewRequest { target: ReviewTarget::Custom { instructions: "Please review my changes".to_string(), }, user_facing_hint: None, }, }) .await .unwrap(); // Verify lifecycle: Entered -> Exited(Some(review)) -> TaskComplete. let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await; let closed = wait_for_event(&codex, |ev| matches!(ev, EventMsg::ExitedReviewMode(_))).await; let review = match closed { EventMsg::ExitedReviewMode(ev) => ev .review_output .expect("expected ExitedReviewMode with Some(review_output)"), other => panic!("expected ExitedReviewMode(..), got {other:?}"), }; // Deep compare full structure using PartialEq (floats are f32 on both sides). let expected = ReviewOutputEvent { findings: vec![ReviewFinding { title: "Prefer Stylize helpers".to_string(), body: "Use .dim()/.bold() chaining instead of manual Style where possible.".to_string(), confidence_score: 0.9, priority: 1, code_location: ReviewCodeLocation { absolute_file_path: PathBuf::from("/tmp/file.rs"), line_range: ReviewLineRange { start: 10, end: 20 }, }, }], overall_correctness: "good".to_string(), overall_explanation: "All good with some improvements suggested.".to_string(), overall_confidence_score: 0.8, }; assert_eq!(expected, review); let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; // Also verify that a user message with the header and a formatted finding // was recorded back in the parent session's rollout. let path = codex.rollout_path(); let text = std::fs::read_to_string(&path).expect("read rollout file"); let mut saw_header = false; let mut saw_finding_line = false; let expected_assistant_text = render_review_output_text(&expected); let mut saw_assistant_plain = false; let mut saw_assistant_xml = false; for line in text.lines() { if line.trim().is_empty() { continue; } let v: serde_json::Value = serde_json::from_str(line).expect("jsonl line"); let rl: RolloutLine = serde_json::from_value(v).expect("rollout line"); if let RolloutItem::ResponseItem(ResponseItem::Message { role, content, .. }) = rl.item { if role == "user" { for c in content { if let ContentItem::InputText { text } = c { if text.contains("full review output from reviewer model") { saw_header = true; } if text.contains("- Prefer Stylize helpers — /tmp/file.rs:10-20") { saw_finding_line = true; } } } } else if role == "assistant" { for c in content { if let ContentItem::OutputText { text } = c { if text.contains("<user_action>") { saw_assistant_xml = true; } if text == expected_assistant_text { saw_assistant_plain = true; } } } } } } assert!(saw_header, "user header missing from rollout"); assert!( saw_finding_line, "formatted finding line missing from rollout" ); assert!( saw_assistant_plain, "assistant review output missing from rollout" ); assert!( !saw_assistant_xml, "assistant review output contains user_action markup" ); server.verify().await; } /// When the model returns plain text that is not JSON, ensure the child /// lifecycle still occurs and the plain text is surfaced via /// ExitedReviewMode(Some(..)) as the overall_explanation. // Windows CI only: bump to 4 workers to prevent SSE/event starvation and test timeouts. #[cfg_attr(windows, tokio::test(flavor = "multi_thread", worker_threads = 4))] #[cfg_attr(not(windows), tokio::test(flavor = "multi_thread", worker_threads = 2))] async fn review_op_with_plain_text_emits_review_fallback() { skip_if_no_network!(); let sse_raw = r#"[ {"type":"response.output_item.done", "item":{ "type":"message", "role":"assistant", "content":[{"type":"output_text","text":"just plain text"}] }}, {"type":"response.completed", "response": {"id": "__ID__"}} ]"#; let server = start_responses_server_with_sse(sse_raw, 1).await; let codex_home = TempDir::new().unwrap(); let codex = new_conversation_for_server(&server, &codex_home, |_| {}).await; codex .submit(Op::Review { review_request: ReviewRequest { target: ReviewTarget::Custom { instructions: "Plain text review".to_string(), }, user_facing_hint: None, }, }) .await .unwrap(); let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await; let closed = wait_for_event(&codex, |ev| matches!(ev, EventMsg::ExitedReviewMode(_))).await; let review = match closed { EventMsg::ExitedReviewMode(ev) => ev .review_output .expect("expected ExitedReviewMode with Some(review_output)"), other => panic!("expected ExitedReviewMode(..), got {other:?}"), }; // Expect a structured fallback carrying the plain text. let expected = ReviewOutputEvent { overall_explanation: "just plain text".to_string(), ..Default::default() }; assert_eq!(expected, review); let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; server.verify().await; } /// Ensure review flow suppresses assistant-specific streaming/completion events: /// - AgentMessageContentDelta /// - AgentMessageDelta (legacy) /// - ItemCompleted for TurnItem::AgentMessage // Windows CI only: bump to 4 workers to prevent SSE/event starvation and test timeouts. #[cfg_attr(windows, tokio::test(flavor = "multi_thread", worker_threads = 4))] #[cfg_attr(not(windows), tokio::test(flavor = "multi_thread", worker_threads = 2))] async fn review_filters_agent_message_related_events() { skip_if_no_network!(); // Stream simulating a typing assistant message with deltas and finalization. let sse_raw = r#"[ {"type":"response.output_item.added", "item":{ "type":"message", "role":"assistant", "id":"msg-1", "content":[{"type":"output_text","text":""}] }}, {"type":"response.output_text.delta", "delta":"Hi"}, {"type":"response.output_text.delta", "delta":" there"}, {"type":"response.output_item.done", "item":{ "type":"message", "role":"assistant", "id":"msg-1", "content":[{"type":"output_text","text":"Hi there"}] }}, {"type":"response.completed", "response": {"id": "__ID__"}} ]"#; let server = start_responses_server_with_sse(sse_raw, 1).await; let codex_home = TempDir::new().unwrap(); let codex = new_conversation_for_server(&server, &codex_home, |_| {}).await; codex .submit(Op::Review { review_request: ReviewRequest { target: ReviewTarget::Custom { instructions: "Filter streaming events".to_string(), }, user_facing_hint: None, }, }) .await .unwrap(); let mut saw_entered = false; let mut saw_exited = false; // Drain until TaskComplete; assert streaming-related events never surface. wait_for_event(&codex, |event| match event { EventMsg::TaskComplete(_) => true, EventMsg::EnteredReviewMode(_) => { saw_entered = true; false } EventMsg::ExitedReviewMode(_) => { saw_exited = true; false } // The following must be filtered by review flow EventMsg::AgentMessageContentDelta(_) => { panic!("unexpected AgentMessageContentDelta surfaced during review") } EventMsg::AgentMessageDelta(_) => { panic!("unexpected AgentMessageDelta surfaced during review") } _ => false, }) .await; assert!(saw_entered && saw_exited, "missing review lifecycle events"); server.verify().await; } /// When the model returns structured JSON in a review, ensure only a single /// non-streaming AgentMessage is emitted; the UI consumes the structured /// result via ExitedReviewMode plus a final assistant message. // Windows CI only: bump to 4 workers to prevent SSE/event starvation and test timeouts. #[cfg_attr(windows, tokio::test(flavor = "multi_thread", worker_threads = 4))] #[cfg_attr(not(windows), tokio::test(flavor = "multi_thread", worker_threads = 2))] async fn review_does_not_emit_agent_message_on_structured_output() { skip_if_no_network!(); let review_json = serde_json::json!({ "findings": [ { "title": "Example", "body": "Structured review output.", "confidence_score": 0.5, "priority": 1, "code_location": { "absolute_file_path": "/tmp/file.rs", "line_range": {"start": 1, "end": 2} } } ], "overall_correctness": "ok", "overall_explanation": "ok", "overall_confidence_score": 0.5 }) .to_string(); let sse_template = r#"[ {"type":"response.output_item.done", "item":{ "type":"message", "role":"assistant", "content":[{"type":"output_text","text":__REVIEW__}] }}, {"type":"response.completed", "response": {"id": "__ID__"}} ]"#; let review_json_escaped = serde_json::to_string(&review_json).unwrap(); let sse_raw = sse_template.replace("__REVIEW__", &review_json_escaped); let server = start_responses_server_with_sse(&sse_raw, 1).await; let codex_home = TempDir::new().unwrap(); let codex = new_conversation_for_server(&server, &codex_home, |_| {}).await; codex .submit(Op::Review { review_request: ReviewRequest { target: ReviewTarget::Custom { instructions: "check structured".to_string(), }, user_facing_hint: None, }, }) .await .unwrap(); // Drain events until TaskComplete; ensure we only see a final // AgentMessage (no streaming assistant messages). let mut saw_entered = false; let mut saw_exited = false; let mut agent_messages = 0; wait_for_event(&codex, |event| match event { EventMsg::TaskComplete(_) => true, EventMsg::AgentMessage(_) => { agent_messages += 1; false } EventMsg::EnteredReviewMode(_) => { saw_entered = true; false } EventMsg::ExitedReviewMode(_) => { saw_exited = true; false } _ => false, }) .await; assert_eq!(1, agent_messages, "expected exactly one AgentMessage event"); assert!(saw_entered && saw_exited, "missing review lifecycle events"); server.verify().await; } /// Ensure that when a custom `review_model` is set in the config, the review /// request uses that model (and not the main chat model). #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn review_uses_custom_review_model_from_config() { skip_if_no_network!(); // Minimal stream: just a completed event let sse_raw = r#"[ {"type":"response.completed", "response": {"id": "__ID__"}} ]"#; let server = start_responses_server_with_sse(sse_raw, 1).await; let codex_home = TempDir::new().unwrap(); // Choose a review model different from the main model; ensure it is used. let codex = new_conversation_for_server(&server, &codex_home, |cfg| { cfg.model = Some("gpt-4.1".to_string()); cfg.review_model = "gpt-5.1".to_string(); }) .await; codex .submit(Op::Review { review_request: ReviewRequest { target: ReviewTarget::Custom { instructions: "use custom model".to_string(), }, user_facing_hint: None, }, }) .await .unwrap(); // Wait for completion let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await; let _closed = wait_for_event(&codex, |ev| { matches!( ev, EventMsg::ExitedReviewMode(ExitedReviewModeEvent { review_output: None }) ) }) .await; let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; // Assert the request body model equals the configured review model let requests = get_responses_requests(&server).await; let request = requests .first() .expect("expected POST request to /responses"); let body = request.body_json::<serde_json::Value>().unwrap(); assert_eq!(body["model"].as_str().unwrap(), "gpt-5.1"); server.verify().await; } /// When a review session begins, it must not prepend prior chat history from /// the parent session. The request `input` should contain only the review /// prompt from the user. // Windows CI only: bump to 4 workers to prevent SSE/event starvation and test timeouts. #[cfg_attr(windows, tokio::test(flavor = "multi_thread", worker_threads = 4))] #[cfg_attr(not(windows), tokio::test(flavor = "multi_thread", worker_threads = 2))] async fn review_input_isolated_from_parent_history() { skip_if_no_network!(); // Mock server for the single review request let sse_raw = r#"[ {"type":"response.completed", "response": {"id": "__ID__"}} ]"#; let server = start_responses_server_with_sse(sse_raw, 1).await; // Seed a parent session history via resume file with both user + assistant items. let codex_home = TempDir::new().unwrap(); let mut config = load_default_config_for_test(&codex_home).await; config.model_provider = ModelProviderInfo { base_url: Some(format!("{}/v1", server.uri())), ..built_in_model_providers()["openai"].clone() }; let session_file = codex_home.path().join("resume.jsonl"); { let mut f = tokio::fs::File::create(&session_file).await.unwrap(); let convo_id = Uuid::new_v4(); // Proper session_meta line (enveloped) with a conversation id let meta_line = serde_json::json!({ "timestamp": "2024-01-01T00:00:00.000Z", "type": "session_meta", "payload": { "id": convo_id, "timestamp": "2024-01-01T00:00:00Z", "instructions": null, "cwd": ".", "originator": "test_originator", "cli_version": "test_version", "model_provider": "test-provider" } }); f.write_all(format!("{meta_line}\n").as_bytes()) .await .unwrap(); // Prior user message (enveloped response_item) let user = codex_protocol::models::ResponseItem::Message { id: None, role: "user".to_string(), content: vec![codex_protocol::models::ContentItem::InputText { text: "parent: earlier user message".to_string(), }], }; let user_json = serde_json::to_value(&user).unwrap(); let user_line = serde_json::json!({ "timestamp": "2024-01-01T00:00:01.000Z", "type": "response_item", "payload": user_json }); f.write_all(format!("{user_line}\n").as_bytes()) .await .unwrap(); // Prior assistant message (enveloped response_item) let assistant = codex_protocol::models::ResponseItem::Message { id: None, role: "assistant".to_string(), content: vec![codex_protocol::models::ContentItem::OutputText { text: "parent: assistant reply".to_string(), }], }; let assistant_json = serde_json::to_value(&assistant).unwrap(); let assistant_line = serde_json::json!({ "timestamp": "2024-01-01T00:00:02.000Z", "type": "response_item", "payload": assistant_json }); f.write_all(format!("{assistant_line}\n").as_bytes()) .await .unwrap(); } let codex = resume_conversation_for_server(&server, &codex_home, session_file.clone(), |_| {}).await; // Submit review request; it must start fresh (no parent history in `input`). let review_prompt = "Please review only this".to_string(); codex .submit(Op::Review { review_request: ReviewRequest { target: ReviewTarget::Custom { instructions: review_prompt.clone(), }, user_facing_hint: None, }, }) .await .unwrap(); let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await; let _closed = wait_for_event(&codex, |ev| { matches!( ev, EventMsg::ExitedReviewMode(ExitedReviewModeEvent { review_output: None }) ) }) .await; let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; // Assert the request `input` contains the environment context followed by the user review prompt. let requests = get_responses_requests(&server).await; let request = requests .first() .expect("expected POST request to /responses"); let body = request.body_json::<serde_json::Value>().unwrap(); let input = body["input"].as_array().expect("input array"); assert!( input.len() >= 2, "expected at least environment context and review prompt" ); let env_text = input .iter() .filter_map(|msg| msg["content"][0]["text"].as_str()) .find(|text| text.starts_with(ENVIRONMENT_CONTEXT_OPEN_TAG)) .expect("env text"); assert!( env_text.contains("<cwd>"), "environment context should include cwd" ); let review_text = input .iter() .filter_map(|msg| msg["content"][0]["text"].as_str()) .find(|text| *text == review_prompt) .expect("review prompt text"); assert_eq!( review_text, review_prompt, "user message should only contain the raw review prompt" ); // Ensure the REVIEW_PROMPT rubric is sent via instructions. let instructions = body["instructions"].as_str().expect("instructions string"); assert_eq!(instructions, REVIEW_PROMPT); // Also verify that a user interruption note was recorded in the rollout. let path = codex.rollout_path(); let text = std::fs::read_to_string(&path).expect("read rollout file"); let mut saw_interruption_message = false; for line in text.lines() { if line.trim().is_empty() { continue; } let v: serde_json::Value = serde_json::from_str(line).expect("jsonl line"); let rl: RolloutLine = serde_json::from_value(v).expect("rollout line"); if let RolloutItem::ResponseItem(ResponseItem::Message { role, content, .. }) = rl.item && role == "user" { for c in content { if let ContentItem::InputText { text } = c && text.contains("User initiated a review task, but was interrupted.") { saw_interruption_message = true; break; } } } if saw_interruption_message { break; } } assert!( saw_interruption_message, "expected user interruption message in rollout" ); server.verify().await; } /// After a review thread finishes, its conversation should be visible in the /// parent session so later turns can reference the results. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn review_history_surfaces_in_parent_session() { skip_if_no_network!(); // Respond to both the review request and the subsequent parent request. let sse_raw = r#"[ {"type":"response.output_item.done", "item":{ "type":"message", "role":"assistant", "content":[{"type":"output_text","text":"review assistant output"}] }}, {"type":"response.completed", "response": {"id": "__ID__"}} ]"#; let server = start_responses_server_with_sse(sse_raw, 2).await; let codex_home = TempDir::new().unwrap(); let codex = new_conversation_for_server(&server, &codex_home, |_| {}).await; // 1) Run a review turn that produces an assistant message (isolated in child). codex .submit(Op::Review { review_request: ReviewRequest { target: ReviewTarget::Custom { instructions: "Start a review".to_string(), }, user_facing_hint: None, }, }) .await .unwrap(); let _entered = wait_for_event(&codex, |ev| matches!(ev, EventMsg::EnteredReviewMode(_))).await; let _closed = wait_for_event(&codex, |ev| { matches!( ev, EventMsg::ExitedReviewMode(ExitedReviewModeEvent { review_output: Some(_) }) ) }) .await; let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; // 2) Continue in the parent session; request input must not include any review items. let followup = "back to parent".to_string(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: followup.clone(), }], }) .await .unwrap(); let _complete = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; // Inspect the second request (parent turn) input contents. // Parent turns include session initial messages (user_instructions, environment_context). // Critically, no messages from the review thread should appear. let requests = get_responses_requests(&server).await; assert_eq!(requests.len(), 2); let body = requests[1].body_json::<serde_json::Value>().unwrap(); let input = body["input"].as_array().expect("input array"); // Must include the followup as the last item for this turn let last = input.last().expect("at least one item in input"); assert_eq!(last["role"].as_str().unwrap(), "user"); let last_text = last["content"][0]["text"].as_str().unwrap(); assert_eq!(last_text, followup); // Ensure review-thread content is present for downstream turns. let contains_review_rollout_user = input.iter().any(|msg| { msg["content"][0]["text"] .as_str() .unwrap_or_default() .contains("User initiated a review task.") }); let contains_review_assistant = input.iter().any(|msg| { msg["content"][0]["text"] .as_str() .unwrap_or_default() .contains("review assistant output") }); assert!( contains_review_rollout_user, "review rollout user message missing from parent turn input" ); assert!( contains_review_assistant, "review assistant output missing from parent turn input" ); server.verify().await; } /// Start a mock Responses API server and mount the given SSE stream body. async fn start_responses_server_with_sse(sse_raw: &str, expected_requests: usize) -> MockServer { let server = MockServer::start().await; let sse = load_sse_fixture_with_id_from_str(sse_raw, &Uuid::new_v4().to_string()); Mock::given(method("POST")) .and(path("/v1/responses")) .respond_with( ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_raw(sse.clone(), "text/event-stream"), ) .expect(expected_requests as u64) .mount(&server) .await; server } /// Create a conversation configured to talk to the provided mock server. #[expect(clippy::expect_used)] async fn new_conversation_for_server<F>( server: &MockServer, codex_home: &TempDir, mutator: F, ) -> Arc<CodexConversation> where F: FnOnce(&mut Config), { let model_provider = ModelProviderInfo { base_url: Some(format!("{}/v1", server.uri())), ..built_in_model_providers()["openai"].clone() }; let mut config = load_default_config_for_test(codex_home).await; config.model_provider = model_provider; mutator(&mut config); let conversation_manager = ConversationManager::with_models_provider( CodexAuth::from_api_key("Test API Key"), config.model_provider.clone(), ); conversation_manager .new_conversation(config) .await .expect("create conversation") .conversation } /// Create a conversation resuming from a rollout file, configured to talk to the provided mock server. #[expect(clippy::expect_used)] async fn resume_conversation_for_server<F>( server: &MockServer, codex_home: &TempDir, resume_path: std::path::PathBuf, mutator: F, ) -> Arc<CodexConversation> where F: FnOnce(&mut Config), { let model_provider = ModelProviderInfo { base_url: Some(format!("{}/v1", server.uri())), ..built_in_model_providers()["openai"].clone() }; let mut config = load_default_config_for_test(codex_home).await; config.model_provider = model_provider; mutator(&mut config); let conversation_manager = ConversationManager::with_models_provider( CodexAuth::from_api_key("Test API Key"), config.model_provider.clone(), ); let auth_manager = codex_core::AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key")); conversation_manager .resume_conversation_from_rollout(config, resume_path, auth_manager) .await .expect("resume conversation") .conversation }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/stream_error_allows_next_turn.rs
codex-rs/core/tests/suite/stream_error_allows_next_turn.rs
use codex_core::ModelProviderInfo; use codex_core::WireApi; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_protocol::user_input::UserInput; use core_test_support::load_sse_fixture_with_id; use core_test_support::skip_if_no_network; use core_test_support::test_codex::TestCodex; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use wiremock::Mock; use wiremock::MockServer; use wiremock::ResponseTemplate; use wiremock::matchers::body_string_contains; use wiremock::matchers::method; use wiremock::matchers::path; fn sse_completed(id: &str) -> String { load_sse_fixture_with_id("tests/fixtures/completed_template.json", id) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn continue_after_stream_error() { skip_if_no_network!(); let server = MockServer::start().await; let fail = ResponseTemplate::new(500) .insert_header("content-type", "application/json") .set_body_string( serde_json::json!({ "error": {"type": "bad_request", "message": "synthetic client error"} }) .to_string(), ); // The provider below disables request retries (request_max_retries = 0), // so the failing request should only occur once. Mock::given(method("POST")) .and(path("/v1/responses")) .and(body_string_contains("first message")) .respond_with(fail) .up_to_n_times(2) .mount(&server) .await; let ok = ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_raw(sse_completed("resp_ok2"), "text/event-stream"); Mock::given(method("POST")) .and(path("/v1/responses")) .and(body_string_contains("follow up")) .respond_with(ok) .expect(1) .mount(&server) .await; // Configure a provider that uses the Responses API and points at our mock // server. Use an existing env var (PATH) to satisfy the auth plumbing // without requiring a real secret. let provider = ModelProviderInfo { name: "mock-openai".into(), base_url: Some(format!("{}/v1", server.uri())), env_key: Some("PATH".into()), env_key_instructions: None, experimental_bearer_token: None, wire_api: WireApi::Responses, query_params: None, http_headers: None, env_http_headers: None, request_max_retries: Some(1), stream_max_retries: Some(1), stream_idle_timeout_ms: Some(2_000), requires_openai_auth: false, }; let TestCodex { codex, .. } = test_codex() .with_config(move |config| { config.base_instructions = Some("You are a helpful assistant".to_string()); config.model_provider = provider; }) .build(&server) .await .unwrap(); codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "first message".into(), }], }) .await .unwrap(); // Expect an Error followed by TaskComplete so the session is released. wait_for_event(&codex, |ev| matches!(ev, EventMsg::Error(_))).await; wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; // 2) Second turn: now send another prompt that should succeed using the // mock server SSE stream. If the agent failed to clear the running task on // error above, this submission would be rejected/queued indefinitely. codex .submit(Op::UserInput { items: vec![UserInput::Text { text: "follow up".into(), }], }) .await .unwrap(); wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/suite/fork_conversation.rs
codex-rs/core/tests/suite/fork_conversation.rs
use codex_core::CodexAuth; use codex_core::ConversationManager; use codex_core::ModelProviderInfo; use codex_core::NewConversation; use codex_core::built_in_model_providers; use codex_core::parse_turn_item; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::RolloutItem; use codex_core::protocol::RolloutLine; use codex_protocol::items::TurnItem; use codex_protocol::user_input::UserInput; use core_test_support::load_default_config_for_test; use core_test_support::skip_if_no_network; use core_test_support::wait_for_event; use tempfile::TempDir; use wiremock::Mock; use wiremock::MockServer; use wiremock::ResponseTemplate; use wiremock::matchers::method; use wiremock::matchers::path; /// Build minimal SSE stream with completed marker using the JSON fixture. fn sse_completed(id: &str) -> String { core_test_support::load_sse_fixture_with_id("tests/fixtures/completed_template.json", id) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn fork_conversation_twice_drops_to_first_message() { skip_if_no_network!(); // Start a mock server that completes three turns. let server = MockServer::start().await; let sse = sse_completed("resp"); let first = ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_raw(sse.clone(), "text/event-stream"); // Expect three calls to /v1/responses – one per user input. Mock::given(method("POST")) .and(path("/v1/responses")) .respond_with(first) .expect(3) .mount(&server) .await; // Configure Codex to use the mock server. let model_provider = ModelProviderInfo { base_url: Some(format!("{}/v1", server.uri())), ..built_in_model_providers()["openai"].clone() }; let home = TempDir::new().unwrap(); let mut config = load_default_config_for_test(&home).await; config.model_provider = model_provider.clone(); let config_for_fork = config.clone(); let conversation_manager = ConversationManager::with_models_provider( CodexAuth::from_api_key("dummy"), config.model_provider.clone(), ); let NewConversation { conversation: codex, .. } = conversation_manager .new_conversation(config) .await .expect("create conversation"); // Send three user messages; wait for three completed turns. for text in ["first", "second", "third"] { codex .submit(Op::UserInput { items: vec![UserInput::Text { text: text.to_string(), }], }) .await .unwrap(); let _ = wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await; } // Request history from the base conversation to obtain rollout path. let base_path = codex.rollout_path(); // GetHistory flushes before returning the path; no wait needed. // Helper: read rollout items (excluding SessionMeta) from a JSONL path. let read_items = |p: &std::path::Path| -> Vec<RolloutItem> { let text = std::fs::read_to_string(p).expect("read rollout file"); let mut items: Vec<RolloutItem> = Vec::new(); for line in text.lines() { if line.trim().is_empty() { continue; } let v: serde_json::Value = serde_json::from_str(line).expect("jsonl line"); let rl: RolloutLine = serde_json::from_value(v).expect("rollout line"); match rl.item { RolloutItem::SessionMeta(_) => {} other => items.push(other), } } items }; // Compute expected prefixes after each fork by truncating base rollout // strictly before the nth user input (0-based). let base_items = read_items(&base_path); let find_user_input_positions = |items: &[RolloutItem]| -> Vec<usize> { let mut pos = Vec::new(); for (i, it) in items.iter().enumerate() { if let RolloutItem::ResponseItem(response_item) = it && let Some(TurnItem::UserMessage(_)) = parse_turn_item(response_item) { // Consider any user message as an input boundary; recorder stores both EventMsg and ResponseItem. // We specifically look for input items, which are represented as ContentItem::InputText. pos.push(i); } } pos }; let user_inputs = find_user_input_positions(&base_items); // After cutting at nth user input (n=1 → second user message), cut strictly before that input. let cut1 = user_inputs.get(1).copied().unwrap_or(0); let expected_after_first: Vec<RolloutItem> = base_items[..cut1].to_vec(); // After dropping again (n=1 on fork1), compute expected relative to fork1's rollout. // Fork once with n=1 → drops the last user input and everything after. let NewConversation { conversation: codex_fork1, .. } = conversation_manager .fork_conversation(1, config_for_fork.clone(), base_path.clone()) .await .expect("fork 1"); let fork1_path = codex_fork1.rollout_path(); // GetHistory on fork1 flushed; the file is ready. let fork1_items = read_items(&fork1_path); pretty_assertions::assert_eq!( serde_json::to_value(&fork1_items).unwrap(), serde_json::to_value(&expected_after_first).unwrap() ); // Fork again with n=0 → drops the (new) last user message, leaving only the first. let NewConversation { conversation: codex_fork2, .. } = conversation_manager .fork_conversation(0, config_for_fork.clone(), fork1_path.clone()) .await .expect("fork 2"); let fork2_path = codex_fork2.rollout_path(); // GetHistory on fork2 flushed; the file is ready. let fork1_items = read_items(&fork1_path); let fork1_user_inputs = find_user_input_positions(&fork1_items); let cut_last_on_fork1 = fork1_user_inputs .get(fork1_user_inputs.len().saturating_sub(1)) .copied() .unwrap_or(0); let expected_after_second: Vec<RolloutItem> = fork1_items[..cut_last_on_fork1].to_vec(); let fork2_items = read_items(&fork2_path); pretty_assertions::assert_eq!( serde_json::to_value(&fork2_items).unwrap(), serde_json::to_value(&expected_after_second).unwrap() ); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/common/responses.rs
codex-rs/core/tests/common/responses.rs
use std::sync::Arc; use std::sync::Mutex; use anyhow::Result; use base64::Engine; use codex_protocol::openai_models::ModelsResponse; use serde_json::Value; use wiremock::BodyPrintLimit; use wiremock::Match; use wiremock::Mock; use wiremock::MockBuilder; use wiremock::MockServer; use wiremock::Respond; use wiremock::ResponseTemplate; use wiremock::matchers::method; use wiremock::matchers::path_regex; use crate::test_codex::ApplyPatchModelOutput; #[derive(Debug, Clone)] pub struct ResponseMock { requests: Arc<Mutex<Vec<ResponsesRequest>>>, } impl ResponseMock { fn new() -> Self { Self { requests: Arc::new(Mutex::new(Vec::new())), } } pub fn single_request(&self) -> ResponsesRequest { let requests = self.requests.lock().unwrap(); if requests.len() != 1 { panic!("expected 1 request, got {}", requests.len()); } requests.first().unwrap().clone() } pub fn requests(&self) -> Vec<ResponsesRequest> { self.requests.lock().unwrap().clone() } pub fn last_request(&self) -> Option<ResponsesRequest> { self.requests.lock().unwrap().last().cloned() } /// Returns true if any captured request contains a `function_call` with the /// provided `call_id`. pub fn saw_function_call(&self, call_id: &str) -> bool { self.requests() .iter() .any(|req| req.has_function_call(call_id)) } /// Returns the `output` string for a matching `function_call_output` with /// the provided `call_id`, searching across all captured requests. pub fn function_call_output_text(&self, call_id: &str) -> Option<String> { self.requests() .iter() .find_map(|req| req.function_call_output_text(call_id)) } } #[derive(Debug, Clone)] pub struct ResponsesRequest(wiremock::Request); impl ResponsesRequest { pub fn body_json(&self) -> Value { self.0.body_json().unwrap() } /// Returns all `input_text` spans from `message` inputs for the provided role. pub fn message_input_texts(&self, role: &str) -> Vec<String> { self.inputs_of_type("message") .into_iter() .filter(|item| item.get("role").and_then(Value::as_str) == Some(role)) .filter_map(|item| item.get("content").and_then(Value::as_array).cloned()) .flatten() .filter(|span| span.get("type").and_then(Value::as_str) == Some("input_text")) .filter_map(|span| span.get("text").and_then(Value::as_str).map(str::to_owned)) .collect() } pub fn input(&self) -> Vec<Value> { self.0.body_json::<Value>().unwrap()["input"] .as_array() .expect("input array not found in request") .clone() } pub fn inputs_of_type(&self, ty: &str) -> Vec<Value> { self.input() .iter() .filter(|item| item.get("type").and_then(Value::as_str) == Some(ty)) .cloned() .collect() } pub fn function_call_output(&self, call_id: &str) -> Value { self.call_output(call_id, "function_call_output") } pub fn custom_tool_call_output(&self, call_id: &str) -> Value { self.call_output(call_id, "custom_tool_call_output") } pub fn call_output(&self, call_id: &str, call_type: &str) -> Value { self.input() .iter() .find(|item| { item.get("type").unwrap() == call_type && item.get("call_id").unwrap() == call_id }) .cloned() .unwrap_or_else(|| panic!("function call output {call_id} item not found in request")) } /// Returns true if this request's `input` contains a `function_call` with /// the specified `call_id`. pub fn has_function_call(&self, call_id: &str) -> bool { self.input().iter().any(|item| { item.get("type").and_then(Value::as_str) == Some("function_call") && item.get("call_id").and_then(Value::as_str) == Some(call_id) }) } /// If present, returns the `output` string of the `function_call_output` /// entry matching `call_id` in this request's `input`. pub fn function_call_output_text(&self, call_id: &str) -> Option<String> { let binding = self.input(); let item = binding.iter().find(|item| { item.get("type").and_then(Value::as_str) == Some("function_call_output") && item.get("call_id").and_then(Value::as_str) == Some(call_id) })?; item.get("output") .and_then(Value::as_str) .map(str::to_string) } pub fn function_call_output_content_and_success( &self, call_id: &str, ) -> Option<(Option<String>, Option<bool>)> { self.call_output_content_and_success(call_id, "function_call_output") } pub fn custom_tool_call_output_content_and_success( &self, call_id: &str, ) -> Option<(Option<String>, Option<bool>)> { self.call_output_content_and_success(call_id, "custom_tool_call_output") } fn call_output_content_and_success( &self, call_id: &str, call_type: &str, ) -> Option<(Option<String>, Option<bool>)> { let output = self .call_output(call_id, call_type) .get("output") .cloned() .unwrap_or(Value::Null); match output { Value::String(text) => Some((Some(text), None)), Value::Object(obj) => Some(( obj.get("content") .and_then(Value::as_str) .map(str::to_string), obj.get("success").and_then(Value::as_bool), )), _ => Some((None, None)), } } pub fn header(&self, name: &str) -> Option<String> { self.0 .headers .get(name) .and_then(|v| v.to_str().ok()) .map(str::to_string) } pub fn path(&self) -> String { self.0.url.path().to_string() } pub fn query_param(&self, name: &str) -> Option<String> { self.0 .url .query_pairs() .find(|(k, _)| k == name) .map(|(_, v)| v.to_string()) } } #[derive(Debug, Clone)] pub struct ModelsMock { requests: Arc<Mutex<Vec<wiremock::Request>>>, } impl ModelsMock { fn new() -> Self { Self { requests: Arc::new(Mutex::new(Vec::new())), } } pub fn requests(&self) -> Vec<wiremock::Request> { self.requests.lock().unwrap().clone() } pub fn single_request_path(&self) -> String { let requests = self.requests.lock().unwrap(); if requests.len() != 1 { panic!("expected 1 request, got {}", requests.len()); } requests.first().unwrap().url.path().to_string() } } impl Match for ModelsMock { fn matches(&self, request: &wiremock::Request) -> bool { self.requests.lock().unwrap().push(request.clone()); true } } impl Match for ResponseMock { fn matches(&self, request: &wiremock::Request) -> bool { self.requests .lock() .unwrap() .push(ResponsesRequest(request.clone())); // Enforce invariant checks on every request body captured by the mock. // Panic on orphan tool outputs or calls to catch regressions early. validate_request_body_invariants(request); true } } /// Build an SSE stream body from a list of JSON events. pub fn sse(events: Vec<Value>) -> String { use std::fmt::Write as _; let mut out = String::new(); for ev in events { let kind = ev.get("type").and_then(|v| v.as_str()).unwrap(); writeln!(&mut out, "event: {kind}").unwrap(); if !ev.as_object().map(|o| o.len() == 1).unwrap_or(false) { write!(&mut out, "data: {ev}\n\n").unwrap(); } else { out.push('\n'); } } out } /// Convenience: SSE event for a completed response with a specific id. pub fn ev_completed(id: &str) -> Value { serde_json::json!({ "type": "response.completed", "response": { "id": id, "usage": {"input_tokens":0,"input_tokens_details":null,"output_tokens":0,"output_tokens_details":null,"total_tokens":0} } }) } /// Convenience: SSE event for a created response with a specific id. pub fn ev_response_created(id: &str) -> Value { serde_json::json!({ "type": "response.created", "response": { "id": id, } }) } pub fn ev_completed_with_tokens(id: &str, total_tokens: i64) -> Value { serde_json::json!({ "type": "response.completed", "response": { "id": id, "usage": { "input_tokens": total_tokens, "input_tokens_details": null, "output_tokens": 0, "output_tokens_details": null, "total_tokens": total_tokens } } }) } /// Convenience: SSE event for a single assistant message output item. pub fn ev_assistant_message(id: &str, text: &str) -> Value { serde_json::json!({ "type": "response.output_item.done", "item": { "type": "message", "role": "assistant", "id": id, "content": [{"type": "output_text", "text": text}] } }) } pub fn ev_message_item_added(id: &str, text: &str) -> Value { serde_json::json!({ "type": "response.output_item.added", "item": { "type": "message", "role": "assistant", "id": id, "content": [{"type": "output_text", "text": text}] } }) } pub fn ev_output_text_delta(delta: &str) -> Value { serde_json::json!({ "type": "response.output_text.delta", "delta": delta, }) } pub fn ev_reasoning_item(id: &str, summary: &[&str], raw_content: &[&str]) -> Value { let summary_entries: Vec<Value> = summary .iter() .map(|text| serde_json::json!({"type": "summary_text", "text": text})) .collect(); let overhead = "b".repeat(550); let raw_content_joined = raw_content.join(""); let encrypted_content = base64::engine::general_purpose::STANDARD.encode(overhead + raw_content_joined.as_str()); let mut event = serde_json::json!({ "type": "response.output_item.done", "item": { "type": "reasoning", "id": id, "summary": summary_entries, "encrypted_content": encrypted_content, } }); if !raw_content.is_empty() { let content_entries: Vec<Value> = raw_content .iter() .map(|text| serde_json::json!({"type": "reasoning_text", "text": text})) .collect(); event["item"]["content"] = Value::Array(content_entries); } event } pub fn ev_reasoning_item_added(id: &str, summary: &[&str]) -> Value { let summary_entries: Vec<Value> = summary .iter() .map(|text| serde_json::json!({"type": "summary_text", "text": text})) .collect(); serde_json::json!({ "type": "response.output_item.added", "item": { "type": "reasoning", "id": id, "summary": summary_entries, } }) } pub fn ev_reasoning_summary_text_delta(delta: &str) -> Value { serde_json::json!({ "type": "response.reasoning_summary_text.delta", "delta": delta, "summary_index": 0, }) } pub fn ev_reasoning_text_delta(delta: &str) -> Value { serde_json::json!({ "type": "response.reasoning_text.delta", "delta": delta, "content_index": 0, }) } pub fn ev_web_search_call_added(id: &str, status: &str, query: &str) -> Value { serde_json::json!({ "type": "response.output_item.added", "item": { "type": "web_search_call", "id": id, "status": status, "action": {"type": "search", "query": query} } }) } pub fn ev_web_search_call_done(id: &str, status: &str, query: &str) -> Value { serde_json::json!({ "type": "response.output_item.done", "item": { "type": "web_search_call", "id": id, "status": status, "action": {"type": "search", "query": query} } }) } pub fn ev_function_call(call_id: &str, name: &str, arguments: &str) -> Value { serde_json::json!({ "type": "response.output_item.done", "item": { "type": "function_call", "call_id": call_id, "name": name, "arguments": arguments } }) } pub fn ev_custom_tool_call(call_id: &str, name: &str, input: &str) -> Value { serde_json::json!({ "type": "response.output_item.done", "item": { "type": "custom_tool_call", "call_id": call_id, "name": name, "input": input } }) } pub fn ev_local_shell_call(call_id: &str, status: &str, command: Vec<&str>) -> Value { serde_json::json!({ "type": "response.output_item.done", "item": { "type": "local_shell_call", "call_id": call_id, "status": status, "action": { "type": "exec", "command": command, } } }) } pub fn ev_apply_patch_call( call_id: &str, patch: &str, output_type: ApplyPatchModelOutput, ) -> Value { match output_type { ApplyPatchModelOutput::Freeform => ev_apply_patch_custom_tool_call(call_id, patch), ApplyPatchModelOutput::Function => ev_apply_patch_function_call(call_id, patch), ApplyPatchModelOutput::Shell => ev_apply_patch_shell_call(call_id, patch), ApplyPatchModelOutput::ShellViaHeredoc => { ev_apply_patch_shell_call_via_heredoc(call_id, patch) } ApplyPatchModelOutput::ShellCommandViaHeredoc => { ev_apply_patch_shell_command_call_via_heredoc(call_id, patch) } } } /// Convenience: SSE event for an `apply_patch` custom tool call with raw patch /// text. This mirrors the payload produced by the Responses API when the model /// invokes `apply_patch` directly (before we convert it to a function call). pub fn ev_apply_patch_custom_tool_call(call_id: &str, patch: &str) -> Value { serde_json::json!({ "type": "response.output_item.done", "item": { "type": "custom_tool_call", "name": "apply_patch", "input": patch, "call_id": call_id } }) } /// Convenience: SSE event for an `apply_patch` function call. The Responses API /// wraps the patch content in a JSON string under the `input` key; we recreate /// the same structure so downstream code exercises the full parsing path. pub fn ev_apply_patch_function_call(call_id: &str, patch: &str) -> Value { let arguments = serde_json::json!({ "input": patch }); let arguments = serde_json::to_string(&arguments).expect("serialize apply_patch arguments"); serde_json::json!({ "type": "response.output_item.done", "item": { "type": "function_call", "name": "apply_patch", "arguments": arguments, "call_id": call_id } }) } pub fn ev_shell_command_call(call_id: &str, command: &str) -> Value { let args = serde_json::json!({ "command": command }); ev_shell_command_call_with_args(call_id, &args) } pub fn ev_shell_command_call_with_args(call_id: &str, args: &serde_json::Value) -> Value { let arguments = serde_json::to_string(args).expect("serialize shell command arguments"); ev_function_call(call_id, "shell_command", &arguments) } pub fn ev_apply_patch_shell_call(call_id: &str, patch: &str) -> Value { let args = serde_json::json!({ "command": ["apply_patch", patch] }); let arguments = serde_json::to_string(&args).expect("serialize apply_patch arguments"); ev_function_call(call_id, "shell", &arguments) } pub fn ev_apply_patch_shell_call_via_heredoc(call_id: &str, patch: &str) -> Value { let script = format!("apply_patch <<'EOF'\n{patch}\nEOF\n"); let args = serde_json::json!({ "command": ["bash", "-lc", script] }); let arguments = serde_json::to_string(&args).expect("serialize apply_patch arguments"); ev_function_call(call_id, "shell", &arguments) } pub fn ev_apply_patch_shell_command_call_via_heredoc(call_id: &str, patch: &str) -> Value { let args = serde_json::json!({ "command": format!("apply_patch <<'EOF'\n{patch}\nEOF\n") }); let arguments = serde_json::to_string(&args).expect("serialize apply_patch arguments"); ev_function_call(call_id, "shell_command", &arguments) } pub fn sse_failed(id: &str, code: &str, message: &str) -> String { sse(vec![serde_json::json!({ "type": "response.failed", "response": { "id": id, "error": {"code": code, "message": message} } })]) } pub fn sse_response(body: String) -> ResponseTemplate { ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_raw(body, "text/event-stream") } pub async fn mount_response_once(server: &MockServer, response: ResponseTemplate) -> ResponseMock { let (mock, response_mock) = base_mock(); mock.respond_with(response) .up_to_n_times(1) .mount(server) .await; response_mock } pub async fn mount_response_once_match<M>( server: &MockServer, matcher: M, response: ResponseTemplate, ) -> ResponseMock where M: wiremock::Match + Send + Sync + 'static, { let (mock, response_mock) = base_mock(); mock.and(matcher) .respond_with(response) .up_to_n_times(1) .mount(server) .await; response_mock } fn base_mock() -> (MockBuilder, ResponseMock) { let response_mock = ResponseMock::new(); let mock = Mock::given(method("POST")) .and(path_regex(".*/responses$")) .and(response_mock.clone()); (mock, response_mock) } fn compact_mock() -> (MockBuilder, ResponseMock) { let response_mock = ResponseMock::new(); let mock = Mock::given(method("POST")) .and(path_regex(".*/responses/compact$")) .and(response_mock.clone()); (mock, response_mock) } fn models_mock() -> (MockBuilder, ModelsMock) { let models_mock = ModelsMock::new(); let mock = Mock::given(method("GET")) .and(path_regex(".*/models$")) .and(models_mock.clone()); (mock, models_mock) } pub async fn mount_sse_once_match<M>(server: &MockServer, matcher: M, body: String) -> ResponseMock where M: wiremock::Match + Send + Sync + 'static, { let (mock, response_mock) = base_mock(); mock.and(matcher) .respond_with(sse_response(body)) .up_to_n_times(1) .mount(server) .await; response_mock } pub async fn mount_sse_once(server: &MockServer, body: String) -> ResponseMock { let (mock, response_mock) = base_mock(); mock.respond_with(sse_response(body)) .up_to_n_times(1) .mount(server) .await; response_mock } pub async fn mount_compact_json_once_match<M>( server: &MockServer, matcher: M, body: serde_json::Value, ) -> ResponseMock where M: wiremock::Match + Send + Sync + 'static, { let (mock, response_mock) = compact_mock(); mock.and(matcher) .respond_with( ResponseTemplate::new(200) .insert_header("content-type", "application/json") .set_body_json(body.clone()), ) .up_to_n_times(1) .mount(server) .await; response_mock } pub async fn mount_compact_json_once(server: &MockServer, body: serde_json::Value) -> ResponseMock { let (mock, response_mock) = compact_mock(); mock.respond_with( ResponseTemplate::new(200) .insert_header("content-type", "application/json") .set_body_json(body.clone()), ) .up_to_n_times(1) .mount(server) .await; response_mock } pub async fn mount_models_once(server: &MockServer, body: ModelsResponse) -> ModelsMock { let (mock, models_mock) = models_mock(); mock.respond_with( ResponseTemplate::new(200) .insert_header("content-type", "application/json") .set_body_json(body.clone()), ) .up_to_n_times(1) .mount(server) .await; models_mock } pub async fn mount_models_once_with_etag( server: &MockServer, body: ModelsResponse, etag: &str, ) -> ModelsMock { let (mock, models_mock) = models_mock(); mock.respond_with( ResponseTemplate::new(200) .insert_header("content-type", "application/json") // ModelsClient reads the ETag header, not a JSON field. .insert_header("ETag", etag) .set_body_json(body.clone()), ) .up_to_n_times(1) .mount(server) .await; models_mock } pub async fn start_mock_server() -> MockServer { let server = MockServer::builder() .body_print_limit(BodyPrintLimit::Limited(80_000)) .start() .await; // Provide a default `/models` response so tests remain hermetic when the client queries it. let _ = mount_models_once(&server, ModelsResponse { models: Vec::new() }).await; server } // todo(aibrahim): remove this and use our search matching patterns directly /// Get all POST requests to `/responses` endpoints from the mock server. /// Filters out GET requests (e.g., `/models`) . pub async fn get_responses_requests(server: &MockServer) -> Vec<wiremock::Request> { server .received_requests() .await .expect("mock server should not fail") .into_iter() .filter(|req| req.method == "POST" && req.url.path().ends_with("/responses")) .collect() } // todo(aibrahim): remove this and use our search matching patterns directly /// Get request bodies as JSON values from POST requests to `/responses` endpoints. /// Filters out GET requests (e.g., `/models`) . pub async fn get_responses_request_bodies(server: &MockServer) -> Vec<Value> { get_responses_requests(server) .await .into_iter() .map(|req| { req.body_json::<Value>() .expect("request body to be valid JSON") }) .collect() } #[derive(Clone)] pub struct FunctionCallResponseMocks { pub function_call: ResponseMock, pub completion: ResponseMock, } pub async fn mount_function_call_agent_response( server: &MockServer, call_id: &str, arguments: &str, tool_name: &str, ) -> FunctionCallResponseMocks { let first_response = sse(vec![ ev_response_created("resp-1"), ev_function_call(call_id, tool_name, arguments), ev_completed("resp-1"), ]); let function_call = mount_sse_once(server, first_response).await; let second_response = sse(vec![ ev_assistant_message("msg-1", "done"), ev_completed("resp-2"), ]); let completion = mount_sse_once(server, second_response).await; FunctionCallResponseMocks { function_call, completion, } } /// Mounts a sequence of SSE response bodies and serves them in order for each /// POST to `/v1/responses`. Panics if more requests are received than bodies /// provided. Also asserts the exact number of expected calls. pub async fn mount_sse_sequence(server: &MockServer, bodies: Vec<String>) -> ResponseMock { use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; struct SeqResponder { num_calls: AtomicUsize, responses: Vec<String>, } impl Respond for SeqResponder { fn respond(&self, _: &wiremock::Request) -> ResponseTemplate { let call_num = self.num_calls.fetch_add(1, Ordering::SeqCst); match self.responses.get(call_num) { Some(body) => ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_string(body.clone()), None => panic!("no response for {call_num}"), } } } let num_calls = bodies.len(); let responder = SeqResponder { num_calls: AtomicUsize::new(0), responses: bodies, }; let (mock, response_mock) = base_mock(); mock.respond_with(responder) .up_to_n_times(num_calls as u64) .expect(num_calls as u64) .mount(server) .await; response_mock } /// Validate invariants on the request body sent to `/v1/responses`. /// /// - No `function_call_output`/`custom_tool_call_output` with missing/empty `call_id`. /// - Every `function_call_output` must match a prior `function_call` or /// `local_shell_call` with the same `call_id` in the same `input`. /// - Every `custom_tool_call_output` must match a prior `custom_tool_call`. /// - Additionally, enforce symmetry: every `function_call`/`custom_tool_call` /// in the `input` must have a matching output entry. fn validate_request_body_invariants(request: &wiremock::Request) { // Skip GET requests (e.g., /models) if request.method != "POST" || !request.url.path().ends_with("/responses") { return; } let Ok(body): Result<Value, _> = request.body_json() else { return; }; let Some(items) = body.get("input").and_then(Value::as_array) else { panic!("input array not found in request"); }; use std::collections::HashSet; fn get_call_id(item: &Value) -> Option<&str> { item.get("call_id") .and_then(Value::as_str) .filter(|id| !id.is_empty()) } fn gather_ids(items: &[Value], kind: &str) -> HashSet<String> { items .iter() .filter(|item| item.get("type").and_then(Value::as_str) == Some(kind)) .filter_map(get_call_id) .map(str::to_string) .collect() } fn gather_output_ids(items: &[Value], kind: &str, missing_msg: &str) -> HashSet<String> { items .iter() .filter(|item| item.get("type").and_then(Value::as_str) == Some(kind)) .map(|item| { let Some(id) = get_call_id(item) else { panic!("{missing_msg}"); }; id.to_string() }) .collect() } let function_calls = gather_ids(items, "function_call"); let custom_tool_calls = gather_ids(items, "custom_tool_call"); let local_shell_calls = gather_ids(items, "local_shell_call"); let function_call_outputs = gather_output_ids( items, "function_call_output", "orphan function_call_output with empty call_id should be dropped", ); let custom_tool_call_outputs = gather_output_ids( items, "custom_tool_call_output", "orphan custom_tool_call_output with empty call_id should be dropped", ); for cid in &function_call_outputs { assert!( function_calls.contains(cid) || local_shell_calls.contains(cid), "function_call_output without matching call in input: {cid}", ); } for cid in &custom_tool_call_outputs { assert!( custom_tool_calls.contains(cid), "custom_tool_call_output without matching call in input: {cid}", ); } for cid in &function_calls { assert!( function_call_outputs.contains(cid), "Function call output is missing for call id: {cid}", ); } for cid in &custom_tool_calls { assert!( custom_tool_call_outputs.contains(cid), "Custom tool call output is missing for call id: {cid}", ); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/common/lib.rs
codex-rs/core/tests/common/lib.rs
#![expect(clippy::expect_used)] use tempfile::TempDir; use codex_core::CodexConversation; use codex_core::config::Config; use codex_core::config::ConfigBuilder; use codex_core::config::ConfigOverrides; use codex_utils_absolute_path::AbsolutePathBuf; use regex_lite::Regex; use std::path::PathBuf; pub mod process; pub mod responses; pub mod streaming_sse; pub mod test_codex; pub mod test_codex_exec; #[track_caller] pub fn assert_regex_match<'s>(pattern: &str, actual: &'s str) -> regex_lite::Captures<'s> { let regex = Regex::new(pattern).unwrap_or_else(|err| { panic!("failed to compile regex {pattern:?}: {err}"); }); regex .captures(actual) .unwrap_or_else(|| panic!("regex {pattern:?} did not match {actual:?}")) } pub fn test_path_buf_with_windows(unix_path: &str, windows_path: Option<&str>) -> PathBuf { if cfg!(windows) { if let Some(windows) = windows_path { PathBuf::from(windows) } else { let mut path = PathBuf::from(r"C:\"); path.extend( unix_path .trim_start_matches('/') .split('/') .filter(|segment| !segment.is_empty()), ); path } } else { PathBuf::from(unix_path) } } pub fn test_path_buf(unix_path: &str) -> PathBuf { test_path_buf_with_windows(unix_path, None) } pub fn test_absolute_path_with_windows( unix_path: &str, windows_path: Option<&str>, ) -> AbsolutePathBuf { AbsolutePathBuf::from_absolute_path(test_path_buf_with_windows(unix_path, windows_path)) .expect("test path should be absolute") } pub fn test_absolute_path(unix_path: &str) -> AbsolutePathBuf { test_absolute_path_with_windows(unix_path, None) } pub fn test_tmp_path() -> AbsolutePathBuf { test_absolute_path_with_windows("/tmp", Some(r"C:\Users\codex\AppData\Local\Temp")) } pub fn test_tmp_path_buf() -> PathBuf { test_tmp_path().into_path_buf() } /// Returns a default `Config` whose on-disk state is confined to the provided /// temporary directory. Using a per-test directory keeps tests hermetic and /// avoids clobbering a developer’s real `~/.codex`. pub async fn load_default_config_for_test(codex_home: &TempDir) -> Config { ConfigBuilder::default() .codex_home(codex_home.path().to_path_buf()) .harness_overrides(default_test_overrides()) .build() .await .expect("defaults for test should always succeed") } #[cfg(target_os = "linux")] fn default_test_overrides() -> ConfigOverrides { ConfigOverrides { codex_linux_sandbox_exe: Some( codex_utils_cargo_bin::cargo_bin("codex-linux-sandbox") .expect("should find binary for codex-linux-sandbox"), ), ..ConfigOverrides::default() } } #[cfg(not(target_os = "linux"))] fn default_test_overrides() -> ConfigOverrides { ConfigOverrides::default() } /// Builds an SSE stream body from a JSON fixture. /// /// The fixture must contain an array of objects where each object represents a /// single SSE event with at least a `type` field matching the `event:` value. /// Additional fields become the JSON payload for the `data:` line. An object /// with only a `type` field results in an event with no `data:` section. This /// makes it trivial to extend the fixtures as OpenAI adds new event kinds or /// fields. pub fn load_sse_fixture(path: impl AsRef<std::path::Path>) -> String { let events: Vec<serde_json::Value> = serde_json::from_reader(std::fs::File::open(path).expect("read fixture")) .expect("parse JSON fixture"); events .into_iter() .map(|e| { let kind = e .get("type") .and_then(|v| v.as_str()) .expect("fixture event missing type"); if e.as_object().map(|o| o.len() == 1).unwrap_or(false) { format!("event: {kind}\n\n") } else { format!("event: {kind}\ndata: {e}\n\n") } }) .collect() } pub fn load_sse_fixture_with_id_from_str(raw: &str, id: &str) -> String { let replaced = raw.replace("__ID__", id); let events: Vec<serde_json::Value> = serde_json::from_str(&replaced).expect("parse JSON fixture"); events .into_iter() .map(|e| { let kind = e .get("type") .and_then(|v| v.as_str()) .expect("fixture event missing type"); if e.as_object().map(|o| o.len() == 1).unwrap_or(false) { format!("event: {kind}\n\n") } else { format!("event: {kind}\ndata: {e}\n\n") } }) .collect() } /// Same as [`load_sse_fixture`], but replaces the placeholder `__ID__` in the /// fixture template with the supplied identifier before parsing. This lets a /// single JSON template be reused by multiple tests that each need a unique /// `response_id`. pub fn load_sse_fixture_with_id(path: impl AsRef<std::path::Path>, id: &str) -> String { let raw = std::fs::read_to_string(path).expect("read fixture template"); let replaced = raw.replace("__ID__", id); let events: Vec<serde_json::Value> = serde_json::from_str(&replaced).expect("parse JSON fixture"); events .into_iter() .map(|e| { let kind = e .get("type") .and_then(|v| v.as_str()) .expect("fixture event missing type"); if e.as_object().map(|o| o.len() == 1).unwrap_or(false) { format!("event: {kind}\n\n") } else { format!("event: {kind}\ndata: {e}\n\n") } }) .collect() } pub async fn wait_for_event<F>( codex: &CodexConversation, predicate: F, ) -> codex_core::protocol::EventMsg where F: FnMut(&codex_core::protocol::EventMsg) -> bool, { use tokio::time::Duration; wait_for_event_with_timeout(codex, predicate, Duration::from_secs(1)).await } pub async fn wait_for_event_match<T, F>(codex: &CodexConversation, matcher: F) -> T where F: Fn(&codex_core::protocol::EventMsg) -> Option<T>, { let ev = wait_for_event(codex, |ev| matcher(ev).is_some()).await; matcher(&ev).unwrap() } pub async fn wait_for_event_with_timeout<F>( codex: &CodexConversation, mut predicate: F, wait_time: tokio::time::Duration, ) -> codex_core::protocol::EventMsg where F: FnMut(&codex_core::protocol::EventMsg) -> bool, { use tokio::time::Duration; use tokio::time::timeout; loop { // Allow a bit more time to accommodate async startup work (e.g. config IO, tool discovery) let ev = timeout(wait_time.max(Duration::from_secs(5)), codex.next_event()) .await .expect("timeout waiting for event") .expect("stream ended unexpectedly"); if predicate(&ev.msg) { return ev.msg; } } } pub fn sandbox_env_var() -> &'static str { codex_core::spawn::CODEX_SANDBOX_ENV_VAR } pub fn sandbox_network_env_var() -> &'static str { codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR } pub fn format_with_current_shell(command: &str) -> Vec<String> { codex_core::shell::default_user_shell().derive_exec_args(command, true) } pub fn format_with_current_shell_display(command: &str) -> String { let args = format_with_current_shell(command); shlex::try_join(args.iter().map(String::as_str)).expect("serialize current shell command") } pub fn format_with_current_shell_non_login(command: &str) -> Vec<String> { codex_core::shell::default_user_shell().derive_exec_args(command, false) } pub fn format_with_current_shell_display_non_login(command: &str) -> String { let args = format_with_current_shell_non_login(command); shlex::try_join(args.iter().map(String::as_str)) .expect("serialize current shell command without login") } pub mod fs_wait { use anyhow::Result; use anyhow::anyhow; use notify::RecursiveMode; use notify::Watcher; use std::path::Path; use std::path::PathBuf; use std::sync::mpsc; use std::sync::mpsc::RecvTimeoutError; use std::time::Duration; use std::time::Instant; use tokio::task; use walkdir::WalkDir; pub async fn wait_for_path_exists( path: impl Into<PathBuf>, timeout: Duration, ) -> Result<PathBuf> { let path = path.into(); task::spawn_blocking(move || wait_for_path_exists_blocking(path, timeout)).await? } pub async fn wait_for_matching_file( root: impl Into<PathBuf>, timeout: Duration, predicate: impl FnMut(&Path) -> bool + Send + 'static, ) -> Result<PathBuf> { let root = root.into(); task::spawn_blocking(move || { let mut predicate = predicate; blocking_find_matching_file(root, timeout, &mut predicate) }) .await? } fn wait_for_path_exists_blocking(path: PathBuf, timeout: Duration) -> Result<PathBuf> { if path.exists() { return Ok(path); } let watch_root = nearest_existing_ancestor(&path); let (tx, rx) = mpsc::channel(); let mut watcher = notify::recommended_watcher(move |res| { let _ = tx.send(res); })?; watcher.watch(&watch_root, RecursiveMode::Recursive)?; let deadline = Instant::now() + timeout; loop { if path.exists() { return Ok(path.clone()); } let now = Instant::now(); if now >= deadline { break; } let remaining = deadline.saturating_duration_since(now); match rx.recv_timeout(remaining) { Ok(Ok(_event)) => { if path.exists() { return Ok(path.clone()); } } Ok(Err(err)) => return Err(err.into()), Err(RecvTimeoutError::Timeout) => break, Err(RecvTimeoutError::Disconnected) => break, } } if path.exists() { Ok(path) } else { Err(anyhow!("timed out waiting for {path:?}")) } } fn blocking_find_matching_file( root: PathBuf, timeout: Duration, predicate: &mut impl FnMut(&Path) -> bool, ) -> Result<PathBuf> { let root = wait_for_path_exists_blocking(root, timeout)?; if let Some(found) = scan_for_match(&root, predicate) { return Ok(found); } let (tx, rx) = mpsc::channel(); let mut watcher = notify::recommended_watcher(move |res| { let _ = tx.send(res); })?; watcher.watch(&root, RecursiveMode::Recursive)?; let deadline = Instant::now() + timeout; while Instant::now() < deadline { let remaining = deadline.saturating_duration_since(Instant::now()); match rx.recv_timeout(remaining) { Ok(Ok(_event)) => { if let Some(found) = scan_for_match(&root, predicate) { return Ok(found); } } Ok(Err(err)) => return Err(err.into()), Err(RecvTimeoutError::Timeout) => break, Err(RecvTimeoutError::Disconnected) => break, } } if let Some(found) = scan_for_match(&root, predicate) { Ok(found) } else { Err(anyhow!("timed out waiting for matching file in {root:?}")) } } fn scan_for_match(root: &Path, predicate: &mut impl FnMut(&Path) -> bool) -> Option<PathBuf> { for entry in WalkDir::new(root).into_iter().filter_map(Result::ok) { let path = entry.path(); if !entry.file_type().is_file() { continue; } if predicate(path) { return Some(path.to_path_buf()); } } None } fn nearest_existing_ancestor(path: &Path) -> PathBuf { let mut current = path; loop { if current.exists() { return current.to_path_buf(); } match current.parent() { Some(parent) => current = parent, None => return PathBuf::from("."), } } } } #[macro_export] macro_rules! skip_if_sandbox { () => {{ if ::std::env::var($crate::sandbox_env_var()) == ::core::result::Result::Ok("seatbelt".to_string()) { eprintln!( "{} is set to 'seatbelt', skipping test.", $crate::sandbox_env_var() ); return; } }}; ($return_value:expr $(,)?) => {{ if ::std::env::var($crate::sandbox_env_var()) == ::core::result::Result::Ok("seatbelt".to_string()) { eprintln!( "{} is set to 'seatbelt', skipping test.", $crate::sandbox_env_var() ); return $return_value; } }}; } #[macro_export] macro_rules! skip_if_no_network { () => {{ if ::std::env::var($crate::sandbox_network_env_var()).is_ok() { println!( "Skipping test because it cannot execute when network is disabled in a Codex sandbox." ); return; } }}; ($return_value:expr $(,)?) => {{ if ::std::env::var($crate::sandbox_network_env_var()).is_ok() { println!( "Skipping test because it cannot execute when network is disabled in a Codex sandbox." ); return $return_value; } }}; } #[macro_export] macro_rules! skip_if_windows { ($return_value:expr $(,)?) => {{ if cfg!(target_os = "windows") { println!("Skipping test because it cannot execute on Windows."); return $return_value; } }}; }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/common/test_codex_exec.rs
codex-rs/core/tests/common/test_codex_exec.rs
#![allow(clippy::expect_used)] use codex_core::auth::CODEX_API_KEY_ENV_VAR; use std::path::Path; use tempfile::TempDir; use wiremock::MockServer; pub struct TestCodexExecBuilder { home: TempDir, cwd: TempDir, } impl TestCodexExecBuilder { pub fn cmd(&self) -> assert_cmd::Command { let mut cmd = assert_cmd::Command::new( codex_utils_cargo_bin::cargo_bin("codex-exec") .expect("should find binary for codex-exec"), ); cmd.current_dir(self.cwd.path()) .env("CODEX_HOME", self.home.path()) .env(CODEX_API_KEY_ENV_VAR, "dummy"); cmd } pub fn cmd_with_server(&self, server: &MockServer) -> assert_cmd::Command { let mut cmd = self.cmd(); let base = format!("{}/v1", server.uri()); cmd.env("OPENAI_BASE_URL", base); cmd } pub fn cwd_path(&self) -> &Path { self.cwd.path() } pub fn home_path(&self) -> &Path { self.home.path() } } pub fn test_codex_exec() -> TestCodexExecBuilder { TestCodexExecBuilder { home: TempDir::new().expect("create temp home"), cwd: TempDir::new().expect("create temp cwd"), } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/common/process.rs
codex-rs/core/tests/common/process.rs
use anyhow::Context; use std::fs; use std::path::Path; use std::time::Duration; pub async fn wait_for_pid_file(path: &Path) -> anyhow::Result<String> { let pid = tokio::time::timeout(Duration::from_secs(2), async { loop { if let Ok(contents) = fs::read_to_string(path) { let trimmed = contents.trim(); if !trimmed.is_empty() { return trimmed.to_string(); } } tokio::time::sleep(Duration::from_millis(25)).await; } }) .await .context("timed out waiting for pid file")?; Ok(pid) } pub fn process_is_alive(pid: &str) -> anyhow::Result<bool> { let status = std::process::Command::new("kill") .args(["-0", pid]) .status() .context("failed to probe process liveness with kill -0")?; Ok(status.success()) } async fn wait_for_process_exit_inner(pid: String) -> anyhow::Result<()> { loop { if !process_is_alive(&pid)? { return Ok(()); } tokio::time::sleep(Duration::from_millis(25)).await; } } pub async fn wait_for_process_exit(pid: &str) -> anyhow::Result<()> { let pid = pid.to_string(); tokio::time::timeout(Duration::from_secs(2), wait_for_process_exit_inner(pid)) .await .context("timed out waiting for process to exit")??; Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/common/streaming_sse.rs
codex-rs/core/tests/common/streaming_sse.rs
use std::collections::VecDeque; use std::sync::Arc; use std::time::SystemTime; use std::time::UNIX_EPOCH; use tokio::io::AsyncReadExt; use tokio::io::AsyncWriteExt; use tokio::net::TcpListener; use tokio::sync::Mutex as TokioMutex; use tokio::sync::oneshot; /// Streaming SSE chunk payload gated by a per-chunk signal. #[derive(Debug)] pub struct StreamingSseChunk { pub gate: Option<oneshot::Receiver<()>>, pub body: String, } /// Minimal streaming SSE server for tests that need gated per-chunk delivery. pub struct StreamingSseServer { uri: String, shutdown: oneshot::Sender<()>, task: tokio::task::JoinHandle<()>, } impl StreamingSseServer { pub fn uri(&self) -> &str { &self.uri } pub async fn shutdown(self) { let _ = self.shutdown.send(()); let _ = self.task.await; } } /// Starts a lightweight HTTP server that supports: /// - GET /v1/models -> empty models response /// - POST /v1/responses -> SSE stream gated per-chunk, served in order /// /// Returns the server handle and a list of receivers that fire when each /// response stream finishes sending its final chunk. pub async fn start_streaming_sse_server( responses: Vec<Vec<StreamingSseChunk>>, ) -> (StreamingSseServer, Vec<oneshot::Receiver<i64>>) { let listener = TcpListener::bind("127.0.0.1:0") .await .expect("bind streaming SSE server"); let addr = listener.local_addr().expect("streaming SSE server address"); let uri = format!("http://{addr}"); let mut completion_senders = Vec::with_capacity(responses.len()); let mut completion_receivers = Vec::with_capacity(responses.len()); for _ in 0..responses.len() { let (tx, rx) = oneshot::channel(); completion_senders.push(tx); completion_receivers.push(rx); } let state = Arc::new(TokioMutex::new(StreamingSseState { responses: VecDeque::from(responses), completions: VecDeque::from(completion_senders), })); let (shutdown_tx, mut shutdown_rx) = oneshot::channel(); let task = tokio::spawn(async move { loop { tokio::select! { _ = &mut shutdown_rx => break, accept_res = listener.accept() => { let (mut stream, _) = accept_res.expect("accept streaming SSE connection"); let state = Arc::clone(&state); tokio::spawn(async move { let (request, body_prefix) = read_http_request(&mut stream).await; let Some((method, path)) = parse_request_line(&request) else { let _ = write_http_response(&mut stream, 400, "bad request", "text/plain").await; return; }; if method == "GET" && path == "/v1/models" { if drain_request_body(&mut stream, &request, body_prefix) .await .is_err() { let _ = write_http_response(&mut stream, 400, "bad request", "text/plain").await; return; } let body = serde_json::json!({ "data": [], "object": "list" }) .to_string(); let _ = write_http_response(&mut stream, 200, &body, "application/json").await; return; } if method == "POST" && path == "/v1/responses" { if drain_request_body(&mut stream, &request, body_prefix) .await .is_err() { let _ = write_http_response(&mut stream, 400, "bad request", "text/plain").await; return; } let Some((chunks, completion)) = take_next_stream(&state).await else { let _ = write_http_response(&mut stream, 500, "no responses queued", "text/plain").await; return; }; if write_sse_headers(&mut stream).await.is_err() { return; } for chunk in chunks { if let Some(gate) = chunk.gate && gate.await.is_err() { return; } if stream.write_all(chunk.body.as_bytes()).await.is_err() { return; } let _ = stream.flush().await; } let _ = completion.send(unix_ms_now()); let _ = stream.shutdown().await; return; } let _ = write_http_response(&mut stream, 404, "not found", "text/plain").await; }); } } } }); ( StreamingSseServer { uri, shutdown: shutdown_tx, task, }, completion_receivers, ) } struct StreamingSseState { responses: VecDeque<Vec<StreamingSseChunk>>, completions: VecDeque<oneshot::Sender<i64>>, } async fn take_next_stream( state: &TokioMutex<StreamingSseState>, ) -> Option<(Vec<StreamingSseChunk>, oneshot::Sender<i64>)> { let mut guard = state.lock().await; let chunks = guard.responses.pop_front()?; let completion = guard.completions.pop_front()?; Some((chunks, completion)) } async fn read_http_request(stream: &mut tokio::net::TcpStream) -> (String, Vec<u8>) { let mut buf = Vec::new(); let mut scratch = [0u8; 1024]; loop { let read = stream.read(&mut scratch).await.unwrap_or(0); if read == 0 { break; } buf.extend_from_slice(&scratch[..read]); if let Some(end) = header_terminator_index(&buf) { let header_end = end + 4; let header = String::from_utf8_lossy(&buf[..header_end]).into_owned(); let rest = buf[header_end..].to_vec(); return (header, rest); } } (String::from_utf8_lossy(&buf).into_owned(), Vec::new()) } fn parse_request_line(request: &str) -> Option<(&str, &str)> { let line = request.lines().next()?; let mut parts = line.split_whitespace(); let method = parts.next()?; let path = parts.next()?; Some((method, path)) } fn header_terminator_index(buf: &[u8]) -> Option<usize> { buf.windows(4).position(|w| w == b"\r\n\r\n") } fn content_length(headers: &str) -> Option<usize> { headers.lines().skip(1).find_map(|line| { let mut parts = line.splitn(2, ':'); let name = parts.next()?.trim(); let value = parts.next()?.trim(); if name.eq_ignore_ascii_case("content-length") { value.parse::<usize>().ok() } else { None } }) } async fn drain_request_body( stream: &mut tokio::net::TcpStream, headers: &str, mut body_prefix: Vec<u8>, ) -> std::io::Result<()> { let Some(content_len) = content_length(headers) else { return Ok(()); }; if body_prefix.len() > content_len { body_prefix.truncate(content_len); } let remaining = content_len.saturating_sub(body_prefix.len()); if remaining == 0 { return Ok(()); } let mut rest = vec![0u8; remaining]; stream.read_exact(&mut rest).await?; Ok(()) } async fn write_sse_headers(stream: &mut tokio::net::TcpStream) -> std::io::Result<()> { let headers = "HTTP/1.1 200 OK\r\ncontent-type: text/event-stream\r\ncache-control: no-cache\r\nconnection: close\r\n\r\n"; stream.write_all(headers.as_bytes()).await } async fn write_http_response( stream: &mut tokio::net::TcpStream, status: i64, body: &str, content_type: &str, ) -> std::io::Result<()> { let body_len = body.len(); let headers = format!( "HTTP/1.1 {status} OK\r\ncontent-type: {content_type}\r\ncontent-length: {body_len}\r\nconnection: close\r\n\r\n" ); stream.write_all(headers.as_bytes()).await?; stream.write_all(body.as_bytes()).await?; stream.shutdown().await } fn unix_ms_now() -> i64 { SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap_or_default() .as_millis() as i64 } #[cfg(test)] mod tests { use super::*; use pretty_assertions::assert_eq; use reqwest::StatusCode; use tokio::net::TcpStream; use tokio::time::Duration; use tokio::time::timeout; fn split_response(response: &str) -> (&str, &str) { response .split_once("\r\n\r\n") .expect("response missing header separator") } fn status_code(headers: &str) -> u16 { let line = headers.lines().next().expect("status line"); let mut parts = line.split_whitespace(); let _ = parts.next(); let status = parts.next().expect("status code"); status.parse().expect("parse status code") } fn header_value<'a>(headers: &'a str, name: &str) -> Option<&'a str> { headers.lines().skip(1).find_map(|line| { let mut parts = line.splitn(2, ':'); let key = parts.next()?.trim(); let value = parts.next()?.trim(); if key.eq_ignore_ascii_case(name) { Some(value) } else { None } }) } async fn connect(uri: &str) -> TcpStream { let addr = uri.strip_prefix("http://").expect("uri should be http"); TcpStream::connect(addr) .await .expect("connect to streaming SSE server") } async fn read_to_end(stream: &mut TcpStream) -> String { let mut buf = Vec::new(); stream.read_to_end(&mut buf).await.expect("read response"); String::from_utf8_lossy(&buf).into_owned() } async fn read_until(stream: &mut TcpStream, needle: &str) -> (String, String) { let mut buf = Vec::new(); let mut scratch = [0u8; 256]; let needle_bytes = needle.as_bytes(); loop { let read = stream.read(&mut scratch).await.expect("read response"); if read == 0 { break; } buf.extend_from_slice(&scratch[..read]); if let Some(pos) = buf .windows(needle_bytes.len()) .position(|window| window == needle_bytes) { let end = pos + needle_bytes.len(); let headers = String::from_utf8_lossy(&buf[..end]).into_owned(); let remainder = String::from_utf8_lossy(&buf[end..]).into_owned(); return (headers, remainder); } } (String::from_utf8_lossy(&buf).into_owned(), String::new()) } async fn send_request(stream: &mut TcpStream, request: &str) { stream .write_all(request.as_bytes()) .await .expect("write request"); } #[tokio::test] async fn get_models_returns_empty_list() { let (server, _) = start_streaming_sse_server(Vec::new()).await; let mut stream = connect(server.uri()).await; send_request( &mut stream, "GET /v1/models HTTP/1.1\r\nHost: 127.0.0.1\r\n\r\n", ) .await; let response = read_to_end(&mut stream).await; let (headers, body) = split_response(&response); assert_eq!(status_code(headers), 200); assert_eq!( header_value(headers, "content-type"), Some("application/json") ); let parsed: serde_json::Value = serde_json::from_str(body).expect("parse json body"); assert_eq!( parsed, serde_json::json!({ "data": [], "object": "list" }) ); server.shutdown().await; } #[tokio::test] async fn post_responses_streams_in_order_and_closes() { let chunks = vec![ StreamingSseChunk { gate: None, body: "event: one\n\n".to_string(), }, StreamingSseChunk { gate: None, body: "event: two\n\n".to_string(), }, ]; let (server, mut completions) = start_streaming_sse_server(vec![chunks]).await; let mut stream = connect(server.uri()).await; send_request( &mut stream, "POST /v1/responses HTTP/1.1\r\nHost: 127.0.0.1\r\nContent-Length: 0\r\n\r\n", ) .await; let response = read_to_end(&mut stream).await; let (headers, body) = split_response(&response); assert_eq!(status_code(headers), 200); assert_eq!( header_value(headers, "content-type"), Some("text/event-stream") ); assert_eq!(body, "event: one\n\nevent: two\n\n"); let mut extra = [0u8; 1]; let read = stream.read(&mut extra).await.expect("read after eof"); assert_eq!(read, 0); let completion = completions.pop().expect("completion receiver"); let timestamp = completion.await.expect("completion timestamp"); assert!(timestamp > 0); server.shutdown().await; } #[tokio::test] async fn none_gate_streams_immediately() { let chunks = vec![StreamingSseChunk { gate: None, body: "event: immediate\n\n".to_string(), }]; let (server, _) = start_streaming_sse_server(vec![chunks]).await; let mut stream = connect(server.uri()).await; send_request( &mut stream, "POST /v1/responses HTTP/1.1\r\nHost: 127.0.0.1\r\nContent-Length: 0\r\n\r\n", ) .await; let (headers, remainder) = read_until(&mut stream, "\r\n\r\n").await; let (headers, _) = split_response(&headers); assert_eq!(status_code(headers), 200); let immediate = format!("{remainder}{}", read_to_end(&mut stream).await); assert_eq!(immediate, "event: immediate\n\n"); server.shutdown().await; } #[tokio::test] async fn post_responses_with_no_queue_returns_500() { let (server, _) = start_streaming_sse_server(Vec::new()).await; let mut stream = connect(server.uri()).await; send_request( &mut stream, "POST /v1/responses HTTP/1.1\r\nHost: 127.0.0.1\r\nContent-Length: 0\r\n\r\n", ) .await; let response = read_to_end(&mut stream).await; let (headers, body) = split_response(&response); assert_eq!(status_code(headers), 500); assert_eq!(header_value(headers, "content-type"), Some("text/plain")); assert_eq!(body, "no responses queued"); server.shutdown().await; } #[tokio::test] async fn gated_chunks_wait_for_signal_and_preserve_order() { let (gate_one_tx, gate_one_rx) = oneshot::channel(); let (gate_two_tx, gate_two_rx) = oneshot::channel(); let chunks = vec![ StreamingSseChunk { gate: Some(gate_one_rx), body: "event: one\n\n".to_string(), }, StreamingSseChunk { gate: Some(gate_two_rx), body: "event: two\n\n".to_string(), }, ]; let (server, _) = start_streaming_sse_server(vec![chunks]).await; let mut stream = connect(server.uri()).await; send_request( &mut stream, "POST /v1/responses HTTP/1.1\r\nHost: 127.0.0.1\r\nContent-Length: 0\r\n\r\n", ) .await; let (headers, remainder) = read_until(&mut stream, "\r\n\r\n").await; let (headers, _) = split_response(&headers); assert_eq!(status_code(headers), 200); assert_eq!( header_value(headers, "content-type"), Some("text/event-stream") ); assert!( remainder.is_empty(), "unexpected body before gate: {remainder:?}" ); let mut scratch = [0u8; 32]; let pending = timeout(Duration::from_millis(200), stream.read(&mut scratch)).await; assert!(pending.is_err()); let _ = gate_one_tx.send(()); let mut first_chunk = vec![0u8; "event: one\n\n".len()]; stream .read_exact(&mut first_chunk) .await .expect("read first chunk"); assert_eq!(String::from_utf8_lossy(&first_chunk), "event: one\n\n"); let pending = timeout(Duration::from_millis(200), stream.read(&mut scratch)).await; assert!(pending.is_err()); let _ = gate_two_tx.send(()); let remaining = read_to_end(&mut stream).await; assert_eq!(remaining, "event: two\n\n"); server.shutdown().await; } #[tokio::test] async fn multiple_responses_are_fifo_and_completion_timestamps_monotonic() { let first_chunks = vec![StreamingSseChunk { gate: None, body: "event: first\n\n".to_string(), }]; let second_chunks = vec![StreamingSseChunk { gate: None, body: "event: second\n\n".to_string(), }]; let (server, mut completions) = start_streaming_sse_server(vec![first_chunks, second_chunks]).await; let mut first_stream = connect(server.uri()).await; send_request( &mut first_stream, "POST /v1/responses HTTP/1.1\r\nHost: 127.0.0.1\r\nContent-Length: 0\r\n\r\n", ) .await; let first_response = read_to_end(&mut first_stream).await; let (_, first_body) = split_response(&first_response); assert_eq!(first_body, "event: first\n\n"); let mut second_stream = connect(server.uri()).await; send_request( &mut second_stream, "POST /v1/responses HTTP/1.1\r\nHost: 127.0.0.1\r\nContent-Length: 0\r\n\r\n", ) .await; let second_response = read_to_end(&mut second_stream).await; let (_, second_body) = split_response(&second_response); assert_eq!(second_body, "event: second\n\n"); let first_completion = completions.remove(0); let second_completion = completions.remove(0); let first_timestamp = first_completion.await.expect("first completion"); let second_timestamp = second_completion.await.expect("second completion"); assert!(first_timestamp > 0); assert!(second_timestamp > 0); assert!(first_timestamp <= second_timestamp); assert!(completions.is_empty()); server.shutdown().await; } #[tokio::test] async fn unknown_route_returns_404() { let (server, _) = start_streaming_sse_server(Vec::new()).await; let mut stream = connect(server.uri()).await; send_request( &mut stream, "GET /v1/unknown HTTP/1.1\r\nHost: 127.0.0.1\r\n\r\n", ) .await; let response = read_to_end(&mut stream).await; let (headers, body) = split_response(&response); assert_eq!(status_code(headers), 404); assert_eq!(header_value(headers, "content-type"), Some("text/plain")); assert_eq!(body, "not found"); server.shutdown().await; } #[tokio::test] async fn malformed_request_returns_400() { let (server, _) = start_streaming_sse_server(Vec::new()).await; let mut stream = connect(server.uri()).await; send_request(&mut stream, "BAD\r\n\r\n").await; let response = read_to_end(&mut stream).await; let (headers, body) = split_response(&response); assert_eq!(status_code(headers), 400); assert_eq!(header_value(headers, "content-type"), Some("text/plain")); assert_eq!(body, "bad request"); server.shutdown().await; } #[tokio::test] async fn responses_post_drains_request_body() { let response_body = r#"event: response.completed data: {"type":"response.completed","response":{"id":"resp-1"}} "#; let (server, mut completions) = start_streaming_sse_server(vec![vec![StreamingSseChunk { gate: None, body: response_body.to_string(), }]]) .await; let url = format!("{}/v1/responses", server.uri()); let payload = serde_json::json!({ "model": "gpt-5.1", "instructions": "test", "input": [{"type": "message", "role": "user", "content": [{"type": "input_text", "text": "hello"}]}], "stream": true }); let resp = reqwest::Client::new() .post(url) .json(&payload) .send() .await .expect("send request"); assert_eq!(resp.status(), StatusCode::OK); let bytes = resp.bytes().await.expect("read response body"); assert_eq!(bytes, response_body.as_bytes()); let completion = completions.remove(0); let completed_at = completion.await.expect("completion timestamp"); assert!(completed_at > 0); server.shutdown().await; } #[tokio::test] async fn read_http_request_returns_after_header_terminator() { let listener = TcpListener::bind("127.0.0.1:0") .await .expect("bind test listener"); let addr = listener.local_addr().expect("listener address"); let (tx, rx) = oneshot::channel(); let server_task = tokio::spawn(async move { let (mut stream, _) = listener.accept().await.expect("accept client"); let (request, body) = read_http_request(&mut stream).await; let _ = tx.send((request, body)); }); let mut client = TcpStream::connect(addr) .await .expect("connect to test listener"); let request = "GET / HTTP/1.1\r\nHost: 127.0.0.1\r\n\r\n"; client .write_all(request.as_bytes()) .await .expect("write request"); let (received, body) = timeout(Duration::from_millis(200), rx) .await .expect("read_http_request timed out") .expect("receive request"); assert_eq!(received, request); assert!(body.is_empty()); drop(client); let _ = server_task.await; } #[test] fn parse_request_line_handles_valid_and_invalid() { assert_eq!(parse_request_line(""), None); assert_eq!(parse_request_line("BAD"), None); assert_eq!( parse_request_line("GET /v1/models HTTP/1.1"), Some(("GET", "/v1/models")) ); } #[tokio::test] async fn take_next_stream_consumes_in_lockstep() { let (first_tx, first_rx) = oneshot::channel(); let (second_tx, second_rx) = oneshot::channel(); let state = TokioMutex::new(StreamingSseState { responses: VecDeque::from(vec![ vec![StreamingSseChunk { gate: None, body: "first".to_string(), }], vec![StreamingSseChunk { gate: None, body: "second".to_string(), }], ]), completions: VecDeque::from(vec![first_tx, second_tx]), }); let (first_chunks, first_completion) = take_next_stream(&state).await.expect("first stream"); assert_eq!(first_chunks[0].body, "first"); let _ = first_completion.send(11); assert_eq!(first_rx.await.expect("first completion"), 11); let (second_chunks, second_completion) = take_next_stream(&state).await.expect("second stream"); assert_eq!(second_chunks[0].body, "second"); let _ = second_completion.send(22); assert_eq!(second_rx.await.expect("second completion"), 22); let third = take_next_stream(&state).await; assert!(third.is_none()); } #[tokio::test] async fn shutdown_terminates_accept_loop() { let (server, _) = start_streaming_sse_server(Vec::new()).await; let shutdown = timeout(Duration::from_millis(200), server.shutdown()).await; assert!(shutdown.is_ok()); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/tests/common/test_codex.rs
codex-rs/core/tests/common/test_codex.rs
use std::mem::swap; use std::path::Path; use std::path::PathBuf; use std::sync::Arc; use anyhow::Result; use codex_core::CodexAuth; use codex_core::CodexConversation; use codex_core::ConversationManager; use codex_core::ModelProviderInfo; use codex_core::built_in_model_providers; use codex_core::config::Config; use codex_core::features::Feature; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_core::protocol::SessionConfiguredEvent; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::user_input::UserInput; use serde_json::Value; use tempfile::TempDir; use wiremock::MockServer; use crate::load_default_config_for_test; use crate::responses::get_responses_request_bodies; use crate::responses::start_mock_server; use crate::streaming_sse::StreamingSseServer; use crate::wait_for_event; type ConfigMutator = dyn FnOnce(&mut Config) + Send; type PreBuildHook = dyn FnOnce(&Path) + Send + 'static; /// A collection of different ways the model can output an apply_patch call #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub enum ApplyPatchModelOutput { Freeform, Function, Shell, ShellViaHeredoc, ShellCommandViaHeredoc, } /// A collection of different ways the model can output an apply_patch call #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub enum ShellModelOutput { Shell, ShellCommand, LocalShell, // UnifiedExec has its own set of tests } pub struct TestCodexBuilder { config_mutators: Vec<Box<ConfigMutator>>, auth: CodexAuth, pre_build_hooks: Vec<Box<PreBuildHook>>, } impl TestCodexBuilder { pub fn with_config<T>(mut self, mutator: T) -> Self where T: FnOnce(&mut Config) + Send + 'static, { self.config_mutators.push(Box::new(mutator)); self } pub fn with_auth(mut self, auth: CodexAuth) -> Self { self.auth = auth; self } pub fn with_model(self, model: &str) -> Self { let new_model = model.to_string(); self.with_config(move |config| { config.model = Some(new_model.clone()); }) } pub fn with_pre_build_hook<F>(mut self, hook: F) -> Self where F: FnOnce(&Path) + Send + 'static, { self.pre_build_hooks.push(Box::new(hook)); self } pub async fn build(&mut self, server: &wiremock::MockServer) -> anyhow::Result<TestCodex> { let home = Arc::new(TempDir::new()?); self.build_with_home(server, home, None).await } pub async fn build_with_streaming_server( &mut self, server: &StreamingSseServer, ) -> anyhow::Result<TestCodex> { let base_url = server.uri(); let home = Arc::new(TempDir::new()?); self.build_with_home_and_base_url(format!("{base_url}/v1"), home, None) .await } pub async fn resume( &mut self, server: &wiremock::MockServer, home: Arc<TempDir>, rollout_path: PathBuf, ) -> anyhow::Result<TestCodex> { self.build_with_home(server, home, Some(rollout_path)).await } async fn build_with_home( &mut self, server: &wiremock::MockServer, home: Arc<TempDir>, resume_from: Option<PathBuf>, ) -> anyhow::Result<TestCodex> { let base_url = format!("{}/v1", server.uri()); let (config, cwd) = self.prepare_config(base_url, &home).await?; self.build_from_config(config, cwd, home, resume_from).await } async fn build_with_home_and_base_url( &mut self, base_url: String, home: Arc<TempDir>, resume_from: Option<PathBuf>, ) -> anyhow::Result<TestCodex> { let (config, cwd) = self.prepare_config(base_url, &home).await?; self.build_from_config(config, cwd, home, resume_from).await } async fn build_from_config( &mut self, config: Config, cwd: Arc<TempDir>, home: Arc<TempDir>, resume_from: Option<PathBuf>, ) -> anyhow::Result<TestCodex> { let auth = self.auth.clone(); let conversation_manager = ConversationManager::with_models_provider_and_home( auth.clone(), config.model_provider.clone(), config.codex_home.clone(), ); let new_conversation = match resume_from { Some(path) => { let auth_manager = codex_core::AuthManager::from_auth_for_testing(auth); conversation_manager .resume_conversation_from_rollout(config.clone(), path, auth_manager) .await? } None => { conversation_manager .new_conversation(config.clone()) .await? } }; Ok(TestCodex { home, cwd, config, codex: new_conversation.conversation, session_configured: new_conversation.session_configured, conversation_manager: Arc::new(conversation_manager), }) } async fn prepare_config( &mut self, base_url: String, home: &TempDir, ) -> anyhow::Result<(Config, Arc<TempDir>)> { let model_provider = ModelProviderInfo { base_url: Some(base_url), ..built_in_model_providers()["openai"].clone() }; let cwd = Arc::new(TempDir::new()?); let mut config = load_default_config_for_test(home).await; config.cwd = cwd.path().to_path_buf(); config.model_provider = model_provider; for hook in self.pre_build_hooks.drain(..) { hook(home.path()); } if let Ok(path) = codex_utils_cargo_bin::cargo_bin("codex") { config.codex_linux_sandbox_exe = Some(path); } let mut mutators = vec![]; swap(&mut self.config_mutators, &mut mutators); for mutator in mutators { mutator(&mut config); } if config.include_apply_patch_tool { config.features.enable(Feature::ApplyPatchFreeform); } else { config.features.disable(Feature::ApplyPatchFreeform); } Ok((config, cwd)) } } pub struct TestCodex { pub home: Arc<TempDir>, pub cwd: Arc<TempDir>, pub codex: Arc<CodexConversation>, pub session_configured: SessionConfiguredEvent, pub config: Config, pub conversation_manager: Arc<ConversationManager>, } impl TestCodex { pub fn cwd_path(&self) -> &Path { self.cwd.path() } pub fn codex_home_path(&self) -> &Path { self.config.codex_home.as_path() } pub fn workspace_path(&self, rel: impl AsRef<Path>) -> PathBuf { self.cwd_path().join(rel) } pub async fn submit_turn(&self, prompt: &str) -> Result<()> { self.submit_turn_with_policies( prompt, AskForApproval::Never, SandboxPolicy::DangerFullAccess, ) .await } pub async fn submit_turn_with_policy( &self, prompt: &str, sandbox_policy: SandboxPolicy, ) -> Result<()> { self.submit_turn_with_policies(prompt, AskForApproval::Never, sandbox_policy) .await } pub async fn submit_turn_with_policies( &self, prompt: &str, approval_policy: AskForApproval, sandbox_policy: SandboxPolicy, ) -> Result<()> { let session_model = self.session_configured.model.clone(); self.codex .submit(Op::UserTurn { items: vec![UserInput::Text { text: prompt.into(), }], final_output_json_schema: None, cwd: self.cwd.path().to_path_buf(), approval_policy, sandbox_policy, model: session_model, effort: None, summary: ReasoningSummary::Auto, }) .await?; wait_for_event(&self.codex, |event| { matches!(event, EventMsg::TaskComplete(_)) }) .await; Ok(()) } } pub struct TestCodexHarness { server: MockServer, test: TestCodex, } impl TestCodexHarness { pub async fn new() -> Result<Self> { Self::with_builder(test_codex()).await } pub async fn with_config(mutator: impl FnOnce(&mut Config) + Send + 'static) -> Result<Self> { Self::with_builder(test_codex().with_config(mutator)).await } pub async fn with_builder(mut builder: TestCodexBuilder) -> Result<Self> { let server = start_mock_server().await; let test = builder.build(&server).await?; Ok(Self { server, test }) } pub fn server(&self) -> &MockServer { &self.server } pub fn test(&self) -> &TestCodex { &self.test } pub fn cwd(&self) -> &Path { self.test.cwd_path() } pub fn path(&self, rel: impl AsRef<Path>) -> PathBuf { self.test.workspace_path(rel) } pub async fn submit(&self, prompt: &str) -> Result<()> { self.test.submit_turn(prompt).await } pub async fn submit_with_policy( &self, prompt: &str, sandbox_policy: SandboxPolicy, ) -> Result<()> { self.test .submit_turn_with_policy(prompt, sandbox_policy) .await } pub async fn request_bodies(&self) -> Vec<Value> { get_responses_request_bodies(&self.server).await } pub async fn function_call_output_value(&self, call_id: &str) -> Value { let bodies = self.request_bodies().await; function_call_output(&bodies, call_id).clone() } pub async fn function_call_stdout(&self, call_id: &str) -> String { self.function_call_output_value(call_id) .await .get("output") .and_then(Value::as_str) .expect("output string") .to_string() } pub async fn custom_tool_call_output(&self, call_id: &str) -> String { let bodies = self.request_bodies().await; custom_tool_call_output(&bodies, call_id) .get("output") .and_then(Value::as_str) .expect("output string") .to_string() } pub async fn apply_patch_output( &self, call_id: &str, output_type: ApplyPatchModelOutput, ) -> String { match output_type { ApplyPatchModelOutput::Freeform => self.custom_tool_call_output(call_id).await, ApplyPatchModelOutput::Function | ApplyPatchModelOutput::Shell | ApplyPatchModelOutput::ShellViaHeredoc | ApplyPatchModelOutput::ShellCommandViaHeredoc => { self.function_call_stdout(call_id).await } } } } fn custom_tool_call_output<'a>(bodies: &'a [Value], call_id: &str) -> &'a Value { for body in bodies { if let Some(items) = body.get("input").and_then(Value::as_array) { for item in items { if item.get("type").and_then(Value::as_str) == Some("custom_tool_call_output") && item.get("call_id").and_then(Value::as_str) == Some(call_id) { return item; } } } } panic!("custom_tool_call_output {call_id} not found"); } fn function_call_output<'a>(bodies: &'a [Value], call_id: &str) -> &'a Value { for body in bodies { if let Some(items) = body.get("input").and_then(Value::as_array) { for item in items { if item.get("type").and_then(Value::as_str) == Some("function_call_output") && item.get("call_id").and_then(Value::as_str) == Some(call_id) { return item; } } } } panic!("function_call_output {call_id} not found"); } pub fn test_codex() -> TestCodexBuilder { TestCodexBuilder { config_mutators: vec![], auth: CodexAuth::from_api_key("dummy"), pre_build_hooks: vec![], } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/keyring-store/src/lib.rs
codex-rs/keyring-store/src/lib.rs
use keyring::Entry; use keyring::Error as KeyringError; use std::error::Error; use std::fmt; use std::fmt::Debug; use tracing::trace; #[derive(Debug)] pub enum CredentialStoreError { Other(KeyringError), } impl CredentialStoreError { pub fn new(error: KeyringError) -> Self { Self::Other(error) } pub fn message(&self) -> String { match self { Self::Other(error) => error.to_string(), } } pub fn into_error(self) -> KeyringError { match self { Self::Other(error) => error, } } } impl fmt::Display for CredentialStoreError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Other(error) => write!(f, "{error}"), } } } impl Error for CredentialStoreError {} /// Shared credential store abstraction for keyring-backed implementations. pub trait KeyringStore: Debug + Send + Sync { fn load(&self, service: &str, account: &str) -> Result<Option<String>, CredentialStoreError>; fn save(&self, service: &str, account: &str, value: &str) -> Result<(), CredentialStoreError>; fn delete(&self, service: &str, account: &str) -> Result<bool, CredentialStoreError>; } #[derive(Debug)] pub struct DefaultKeyringStore; impl KeyringStore for DefaultKeyringStore { fn load(&self, service: &str, account: &str) -> Result<Option<String>, CredentialStoreError> { trace!("keyring.load start, service={service}, account={account}"); let entry = Entry::new(service, account).map_err(CredentialStoreError::new)?; match entry.get_password() { Ok(password) => { trace!("keyring.load success, service={service}, account={account}"); Ok(Some(password)) } Err(keyring::Error::NoEntry) => { trace!("keyring.load no entry, service={service}, account={account}"); Ok(None) } Err(error) => { trace!("keyring.load error, service={service}, account={account}, error={error}"); Err(CredentialStoreError::new(error)) } } } fn save(&self, service: &str, account: &str, value: &str) -> Result<(), CredentialStoreError> { trace!( "keyring.save start, service={service}, account={account}, value_len={}", value.len() ); let entry = Entry::new(service, account).map_err(CredentialStoreError::new)?; match entry.set_password(value) { Ok(()) => { trace!("keyring.save success, service={service}, account={account}"); Ok(()) } Err(error) => { trace!("keyring.save error, service={service}, account={account}, error={error}"); Err(CredentialStoreError::new(error)) } } } fn delete(&self, service: &str, account: &str) -> Result<bool, CredentialStoreError> { trace!("keyring.delete start, service={service}, account={account}"); let entry = Entry::new(service, account).map_err(CredentialStoreError::new)?; match entry.delete_credential() { Ok(()) => { trace!("keyring.delete success, service={service}, account={account}"); Ok(true) } Err(keyring::Error::NoEntry) => { trace!("keyring.delete no entry, service={service}, account={account}"); Ok(false) } Err(error) => { trace!("keyring.delete error, service={service}, account={account}, error={error}"); Err(CredentialStoreError::new(error)) } } } } pub mod tests { use super::CredentialStoreError; use super::KeyringStore; use keyring::Error as KeyringError; use keyring::credential::CredentialApi as _; use keyring::mock::MockCredential; use std::collections::HashMap; use std::sync::Arc; use std::sync::Mutex; use std::sync::PoisonError; #[derive(Default, Clone, Debug)] pub struct MockKeyringStore { credentials: Arc<Mutex<HashMap<String, Arc<MockCredential>>>>, } impl MockKeyringStore { pub fn credential(&self, account: &str) -> Arc<MockCredential> { let mut guard = self .credentials .lock() .unwrap_or_else(PoisonError::into_inner); guard .entry(account.to_string()) .or_insert_with(|| Arc::new(MockCredential::default())) .clone() } pub fn saved_value(&self, account: &str) -> Option<String> { let credential = { let guard = self .credentials .lock() .unwrap_or_else(PoisonError::into_inner); guard.get(account).cloned() }?; credential.get_password().ok() } pub fn set_error(&self, account: &str, error: KeyringError) { let credential = self.credential(account); credential.set_error(error); } pub fn contains(&self, account: &str) -> bool { let guard = self .credentials .lock() .unwrap_or_else(PoisonError::into_inner); guard.contains_key(account) } } impl KeyringStore for MockKeyringStore { fn load( &self, _service: &str, account: &str, ) -> Result<Option<String>, CredentialStoreError> { let credential = { let guard = self .credentials .lock() .unwrap_or_else(PoisonError::into_inner); guard.get(account).cloned() }; let Some(credential) = credential else { return Ok(None); }; match credential.get_password() { Ok(password) => Ok(Some(password)), Err(KeyringError::NoEntry) => Ok(None), Err(error) => Err(CredentialStoreError::new(error)), } } fn save( &self, _service: &str, account: &str, value: &str, ) -> Result<(), CredentialStoreError> { let credential = self.credential(account); credential .set_password(value) .map_err(CredentialStoreError::new) } fn delete(&self, _service: &str, account: &str) -> Result<bool, CredentialStoreError> { let credential = { let guard = self .credentials .lock() .unwrap_or_else(PoisonError::into_inner); guard.get(account).cloned() }; let Some(credential) = credential else { return Ok(false); }; let removed = match credential.delete_credential() { Ok(()) => Ok(true), Err(KeyringError::NoEntry) => Ok(false), Err(error) => Err(CredentialStoreError::new(error)), }?; let mut guard = self .credentials .lock() .unwrap_or_else(PoisonError::into_inner); guard.remove(account); Ok(removed) } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cli/src/lib.rs
codex-rs/cli/src/lib.rs
pub mod debug_sandbox; mod exit_status; pub mod login; use clap::Parser; use codex_common::CliConfigOverrides; #[derive(Debug, Parser)] pub struct SeatbeltCommand { /// Convenience alias for low-friction sandboxed automatic execution (network-disabled sandbox that can write to cwd and TMPDIR) #[arg(long = "full-auto", default_value_t = false)] pub full_auto: bool, /// While the command runs, capture macOS sandbox denials via `log stream` and print them after exit #[arg(long = "log-denials", default_value_t = false)] pub log_denials: bool, #[clap(skip)] pub config_overrides: CliConfigOverrides, /// Full command args to run under seatbelt. #[arg(trailing_var_arg = true)] pub command: Vec<String>, } #[derive(Debug, Parser)] pub struct LandlockCommand { /// Convenience alias for low-friction sandboxed automatic execution (network-disabled sandbox that can write to cwd and TMPDIR) #[arg(long = "full-auto", default_value_t = false)] pub full_auto: bool, #[clap(skip)] pub config_overrides: CliConfigOverrides, /// Full command args to run under landlock. #[arg(trailing_var_arg = true)] pub command: Vec<String>, } #[derive(Debug, Parser)] pub struct WindowsCommand { /// Convenience alias for low-friction sandboxed automatic execution (network-disabled sandbox that can write to cwd and TMPDIR) #[arg(long = "full-auto", default_value_t = false)] pub full_auto: bool, #[clap(skip)] pub config_overrides: CliConfigOverrides, /// Full command args to run under Windows restricted token sandbox. #[arg(trailing_var_arg = true)] pub command: Vec<String>, }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cli/src/wsl_paths.rs
codex-rs/cli/src/wsl_paths.rs
use std::ffi::OsStr; /// Returns true if the current process is running under WSL. pub use codex_core::env::is_wsl; /// Convert a Windows absolute path (`C:\foo\bar` or `C:/foo/bar`) to a WSL mount path (`/mnt/c/foo/bar`). /// Returns `None` if the input does not look like a Windows drive path. pub fn win_path_to_wsl(path: &str) -> Option<String> { let bytes = path.as_bytes(); if bytes.len() < 3 || bytes[1] != b':' || !(bytes[2] == b'\\' || bytes[2] == b'/') || !bytes[0].is_ascii_alphabetic() { return None; } let drive = (bytes[0] as char).to_ascii_lowercase(); let tail = path[3..].replace('\\', "/"); if tail.is_empty() { return Some(format!("/mnt/{drive}")); } Some(format!("/mnt/{drive}/{tail}")) } /// If under WSL and given a Windows-style path, return the equivalent `/mnt/<drive>/…` path. /// Otherwise returns the input unchanged. pub fn normalize_for_wsl<P: AsRef<OsStr>>(path: P) -> String { let value = path.as_ref().to_string_lossy().to_string(); if !is_wsl() { return value; } if let Some(mapped) = win_path_to_wsl(&value) { return mapped; } value } #[cfg(test)] mod tests { use super::*; #[test] fn win_to_wsl_basic() { assert_eq!( win_path_to_wsl(r"C:\Temp\codex.zip").as_deref(), Some("/mnt/c/Temp/codex.zip") ); assert_eq!( win_path_to_wsl("D:/Work/codex.tgz").as_deref(), Some("/mnt/d/Work/codex.tgz") ); assert!(win_path_to_wsl("/home/user/codex").is_none()); } #[test] fn normalize_is_noop_on_unix_paths() { assert_eq!(normalize_for_wsl("/home/u/x"), "/home/u/x"); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cli/src/login.rs
codex-rs/cli/src/login.rs
use codex_app_server_protocol::AuthMode; use codex_common::CliConfigOverrides; use codex_core::CodexAuth; use codex_core::auth::AuthCredentialsStoreMode; use codex_core::auth::CLIENT_ID; use codex_core::auth::login_with_api_key; use codex_core::auth::logout; use codex_core::config::Config; use codex_login::ServerOptions; use codex_login::run_device_code_login; use codex_login::run_login_server; use codex_protocol::config_types::ForcedLoginMethod; use std::io::IsTerminal; use std::io::Read; use std::path::PathBuf; pub async fn login_with_chatgpt( codex_home: PathBuf, forced_chatgpt_workspace_id: Option<String>, cli_auth_credentials_store_mode: AuthCredentialsStoreMode, ) -> std::io::Result<()> { let opts = ServerOptions::new( codex_home, CLIENT_ID.to_string(), forced_chatgpt_workspace_id, cli_auth_credentials_store_mode, ); let server = run_login_server(opts)?; eprintln!( "Starting local login server on http://localhost:{}.\nIf your browser did not open, navigate to this URL to authenticate:\n\n{}", server.actual_port, server.auth_url, ); server.block_until_done().await } pub async fn run_login_with_chatgpt(cli_config_overrides: CliConfigOverrides) -> ! { let config = load_config_or_exit(cli_config_overrides).await; if matches!(config.forced_login_method, Some(ForcedLoginMethod::Api)) { eprintln!("ChatGPT login is disabled. Use API key login instead."); std::process::exit(1); } let forced_chatgpt_workspace_id = config.forced_chatgpt_workspace_id.clone(); match login_with_chatgpt( config.codex_home, forced_chatgpt_workspace_id, config.cli_auth_credentials_store_mode, ) .await { Ok(_) => { eprintln!("Successfully logged in"); std::process::exit(0); } Err(e) => { eprintln!("Error logging in: {e}"); std::process::exit(1); } } } pub async fn run_login_with_api_key( cli_config_overrides: CliConfigOverrides, api_key: String, ) -> ! { let config = load_config_or_exit(cli_config_overrides).await; if matches!(config.forced_login_method, Some(ForcedLoginMethod::Chatgpt)) { eprintln!("API key login is disabled. Use ChatGPT login instead."); std::process::exit(1); } match login_with_api_key( &config.codex_home, &api_key, config.cli_auth_credentials_store_mode, ) { Ok(_) => { eprintln!("Successfully logged in"); std::process::exit(0); } Err(e) => { eprintln!("Error logging in: {e}"); std::process::exit(1); } } } pub fn read_api_key_from_stdin() -> String { let mut stdin = std::io::stdin(); if stdin.is_terminal() { eprintln!( "--with-api-key expects the API key on stdin. Try piping it, e.g. `printenv OPENAI_API_KEY | codex login --with-api-key`." ); std::process::exit(1); } eprintln!("Reading API key from stdin..."); let mut buffer = String::new(); if let Err(err) = stdin.read_to_string(&mut buffer) { eprintln!("Failed to read API key from stdin: {err}"); std::process::exit(1); } let api_key = buffer.trim().to_string(); if api_key.is_empty() { eprintln!("No API key provided via stdin."); std::process::exit(1); } api_key } /// Login using the OAuth device code flow. pub async fn run_login_with_device_code( cli_config_overrides: CliConfigOverrides, issuer_base_url: Option<String>, client_id: Option<String>, ) -> ! { let config = load_config_or_exit(cli_config_overrides).await; if matches!(config.forced_login_method, Some(ForcedLoginMethod::Api)) { eprintln!("ChatGPT login is disabled. Use API key login instead."); std::process::exit(1); } let forced_chatgpt_workspace_id = config.forced_chatgpt_workspace_id.clone(); let mut opts = ServerOptions::new( config.codex_home, client_id.unwrap_or(CLIENT_ID.to_string()), forced_chatgpt_workspace_id, config.cli_auth_credentials_store_mode, ); if let Some(iss) = issuer_base_url { opts.issuer = iss; } match run_device_code_login(opts).await { Ok(()) => { eprintln!("Successfully logged in"); std::process::exit(0); } Err(e) => { eprintln!("Error logging in with device code: {e}"); std::process::exit(1); } } } pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! { let config = load_config_or_exit(cli_config_overrides).await; match CodexAuth::from_auth_storage(&config.codex_home, config.cli_auth_credentials_store_mode) { Ok(Some(auth)) => match auth.mode { AuthMode::ApiKey => match auth.get_token().await { Ok(api_key) => { eprintln!("Logged in using an API key - {}", safe_format_key(&api_key)); std::process::exit(0); } Err(e) => { eprintln!("Unexpected error retrieving API key: {e}"); std::process::exit(1); } }, AuthMode::ChatGPT => { eprintln!("Logged in using ChatGPT"); std::process::exit(0); } }, Ok(None) => { eprintln!("Not logged in"); std::process::exit(1); } Err(e) => { eprintln!("Error checking login status: {e}"); std::process::exit(1); } } } pub async fn run_logout(cli_config_overrides: CliConfigOverrides) -> ! { let config = load_config_or_exit(cli_config_overrides).await; match logout(&config.codex_home, config.cli_auth_credentials_store_mode) { Ok(true) => { eprintln!("Successfully logged out"); std::process::exit(0); } Ok(false) => { eprintln!("Not logged in"); std::process::exit(0); } Err(e) => { eprintln!("Error logging out: {e}"); std::process::exit(1); } } } async fn load_config_or_exit(cli_config_overrides: CliConfigOverrides) -> Config { let cli_overrides = match cli_config_overrides.parse_overrides() { Ok(v) => v, Err(e) => { eprintln!("Error parsing -c overrides: {e}"); std::process::exit(1); } }; match Config::load_with_cli_overrides(cli_overrides).await { Ok(config) => config, Err(e) => { eprintln!("Error loading configuration: {e}"); std::process::exit(1); } } } fn safe_format_key(key: &str) -> String { if key.len() <= 13 { return "***".to_string(); } let prefix = &key[..8]; let suffix = &key[key.len() - 5..]; format!("{prefix}***{suffix}") } #[cfg(test)] mod tests { use super::safe_format_key; #[test] fn formats_long_key() { let key = "sk-proj-1234567890ABCDE"; assert_eq!(safe_format_key(key), "sk-proj-***ABCDE"); } #[test] fn short_key_returns_stars() { let key = "sk-proj-12345"; assert_eq!(safe_format_key(key), "***"); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cli/src/debug_sandbox.rs
codex-rs/cli/src/debug_sandbox.rs
#[cfg(target_os = "macos")] mod pid_tracker; #[cfg(target_os = "macos")] mod seatbelt; use std::path::PathBuf; use codex_common::CliConfigOverrides; use codex_core::config::Config; use codex_core::config::ConfigOverrides; use codex_core::exec_env::create_env; use codex_core::landlock::spawn_command_under_linux_sandbox; #[cfg(target_os = "macos")] use codex_core::seatbelt::spawn_command_under_seatbelt; use codex_core::spawn::StdioPolicy; use codex_protocol::config_types::SandboxMode; use crate::LandlockCommand; use crate::SeatbeltCommand; use crate::WindowsCommand; use crate::exit_status::handle_exit_status; #[cfg(target_os = "macos")] use seatbelt::DenialLogger; #[cfg(target_os = "macos")] pub async fn run_command_under_seatbelt( command: SeatbeltCommand, codex_linux_sandbox_exe: Option<PathBuf>, ) -> anyhow::Result<()> { let SeatbeltCommand { full_auto, log_denials, config_overrides, command, } = command; run_command_under_sandbox( full_auto, command, config_overrides, codex_linux_sandbox_exe, SandboxType::Seatbelt, log_denials, ) .await } #[cfg(not(target_os = "macos"))] pub async fn run_command_under_seatbelt( _command: SeatbeltCommand, _codex_linux_sandbox_exe: Option<PathBuf>, ) -> anyhow::Result<()> { anyhow::bail!("Seatbelt sandbox is only available on macOS"); } pub async fn run_command_under_landlock( command: LandlockCommand, codex_linux_sandbox_exe: Option<PathBuf>, ) -> anyhow::Result<()> { let LandlockCommand { full_auto, config_overrides, command, } = command; run_command_under_sandbox( full_auto, command, config_overrides, codex_linux_sandbox_exe, SandboxType::Landlock, false, ) .await } pub async fn run_command_under_windows( command: WindowsCommand, codex_linux_sandbox_exe: Option<PathBuf>, ) -> anyhow::Result<()> { let WindowsCommand { full_auto, config_overrides, command, } = command; run_command_under_sandbox( full_auto, command, config_overrides, codex_linux_sandbox_exe, SandboxType::Windows, false, ) .await } enum SandboxType { #[cfg(target_os = "macos")] Seatbelt, Landlock, Windows, } async fn run_command_under_sandbox( full_auto: bool, command: Vec<String>, config_overrides: CliConfigOverrides, codex_linux_sandbox_exe: Option<PathBuf>, sandbox_type: SandboxType, log_denials: bool, ) -> anyhow::Result<()> { let sandbox_mode = create_sandbox_mode(full_auto); let config = Config::load_with_cli_overrides_and_harness_overrides( config_overrides .parse_overrides() .map_err(anyhow::Error::msg)?, ConfigOverrides { sandbox_mode: Some(sandbox_mode), codex_linux_sandbox_exe, ..Default::default() }, ) .await?; // In practice, this should be `std::env::current_dir()` because this CLI // does not support `--cwd`, but let's use the config value for consistency. let cwd = config.cwd.clone(); // For now, we always use the same cwd for both the command and the // sandbox policy. In the future, we could add a CLI option to set them // separately. let sandbox_policy_cwd = cwd.clone(); let stdio_policy = StdioPolicy::Inherit; let env = create_env(&config.shell_environment_policy); // Special-case Windows sandbox: execute and exit the process to emulate inherited stdio. if let SandboxType::Windows = sandbox_type { #[cfg(target_os = "windows")] { use codex_core::features::Feature; use codex_windows_sandbox::run_windows_sandbox_capture; use codex_windows_sandbox::run_windows_sandbox_capture_elevated; let policy_str = serde_json::to_string(config.sandbox_policy.get())?; let sandbox_cwd = sandbox_policy_cwd.clone(); let cwd_clone = cwd.clone(); let env_map = env.clone(); let command_vec = command.clone(); let base_dir = config.codex_home.clone(); let use_elevated = config.features.enabled(Feature::WindowsSandbox) && config.features.enabled(Feature::WindowsSandboxElevated); // Preflight audit is invoked elsewhere at the appropriate times. let res = tokio::task::spawn_blocking(move || { if use_elevated { run_windows_sandbox_capture_elevated( policy_str.as_str(), &sandbox_cwd, base_dir.as_path(), command_vec, &cwd_clone, env_map, None, ) } else { run_windows_sandbox_capture( policy_str.as_str(), &sandbox_cwd, base_dir.as_path(), command_vec, &cwd_clone, env_map, None, ) } }) .await; let capture = match res { Ok(Ok(v)) => v, Ok(Err(err)) => { eprintln!("windows sandbox failed: {err}"); std::process::exit(1); } Err(join_err) => { eprintln!("windows sandbox join error: {join_err}"); std::process::exit(1); } }; if !capture.stdout.is_empty() { use std::io::Write; let _ = std::io::stdout().write_all(&capture.stdout); } if !capture.stderr.is_empty() { use std::io::Write; let _ = std::io::stderr().write_all(&capture.stderr); } std::process::exit(capture.exit_code); } #[cfg(not(target_os = "windows"))] { anyhow::bail!("Windows sandbox is only available on Windows"); } } #[cfg(target_os = "macos")] let mut denial_logger = log_denials.then(DenialLogger::new).flatten(); #[cfg(not(target_os = "macos"))] let _ = log_denials; let mut child = match sandbox_type { #[cfg(target_os = "macos")] SandboxType::Seatbelt => { spawn_command_under_seatbelt( command, cwd, config.sandbox_policy.get(), sandbox_policy_cwd.as_path(), stdio_policy, env, ) .await? } SandboxType::Landlock => { #[expect(clippy::expect_used)] let codex_linux_sandbox_exe = config .codex_linux_sandbox_exe .expect("codex-linux-sandbox executable not found"); spawn_command_under_linux_sandbox( codex_linux_sandbox_exe, command, cwd, config.sandbox_policy.get(), sandbox_policy_cwd.as_path(), stdio_policy, env, ) .await? } SandboxType::Windows => { unreachable!("Windows sandbox should have been handled above"); } }; #[cfg(target_os = "macos")] if let Some(denial_logger) = &mut denial_logger { denial_logger.on_child_spawn(&child); } let status = child.wait().await?; #[cfg(target_os = "macos")] if let Some(denial_logger) = denial_logger { let denials = denial_logger.finish().await; eprintln!("\n=== Sandbox denials ==="); if denials.is_empty() { eprintln!("None found."); } else { for seatbelt::SandboxDenial { name, capability } in denials { eprintln!("({name}) {capability}"); } } } handle_exit_status(status); } pub fn create_sandbox_mode(full_auto: bool) -> SandboxMode { if full_auto { SandboxMode::WorkspaceWrite } else { SandboxMode::ReadOnly } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cli/src/exit_status.rs
codex-rs/cli/src/exit_status.rs
#[cfg(unix)] pub(crate) fn handle_exit_status(status: std::process::ExitStatus) -> ! { use std::os::unix::process::ExitStatusExt; // Use ExitStatus to derive the exit code. if let Some(code) = status.code() { std::process::exit(code); } else if let Some(signal) = status.signal() { std::process::exit(128 + signal); } else { std::process::exit(1); } } #[cfg(windows)] pub(crate) fn handle_exit_status(status: std::process::ExitStatus) -> ! { if let Some(code) = status.code() { std::process::exit(code); } else { // Rare on Windows, but if it happens: use fallback code. std::process::exit(1); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cli/src/main.rs
codex-rs/cli/src/main.rs
use clap::Args; use clap::CommandFactory; use clap::Parser; use clap_complete::Shell; use clap_complete::generate; use codex_arg0::arg0_dispatch_or_else; use codex_chatgpt::apply_command::ApplyCommand; use codex_chatgpt::apply_command::run_apply_command; use codex_cli::LandlockCommand; use codex_cli::SeatbeltCommand; use codex_cli::WindowsCommand; use codex_cli::login::read_api_key_from_stdin; use codex_cli::login::run_login_status; use codex_cli::login::run_login_with_api_key; use codex_cli::login::run_login_with_chatgpt; use codex_cli::login::run_login_with_device_code; use codex_cli::login::run_logout; use codex_cloud_tasks::Cli as CloudTasksCli; use codex_common::CliConfigOverrides; use codex_exec::Cli as ExecCli; use codex_exec::Command as ExecCommand; use codex_exec::ReviewArgs; use codex_execpolicy::ExecPolicyCheckCommand; use codex_responses_api_proxy::Args as ResponsesApiProxyArgs; use codex_tui::AppExitInfo; use codex_tui::Cli as TuiCli; use codex_tui::update_action::UpdateAction; use codex_tui2 as tui2; use owo_colors::OwoColorize; use std::path::PathBuf; use supports_color::Stream; mod mcp_cmd; #[cfg(not(windows))] mod wsl_paths; use crate::mcp_cmd::McpCli; use codex_core::config::Config; use codex_core::config::ConfigOverrides; use codex_core::config::find_codex_home; use codex_core::config::load_config_as_toml_with_cli_overrides; use codex_core::features::Feature; use codex_core::features::FeatureOverrides; use codex_core::features::Features; use codex_core::features::is_known_feature_key; use codex_utils_absolute_path::AbsolutePathBuf; /// Codex CLI /// /// If no subcommand is specified, options will be forwarded to the interactive CLI. #[derive(Debug, Parser)] #[clap( author, version, // If a sub‑command is given, ignore requirements of the default args. subcommand_negates_reqs = true, // The executable is sometimes invoked via a platform‑specific name like // `codex-x86_64-unknown-linux-musl`, but the help output should always use // the generic `codex` command name that users run. bin_name = "codex", override_usage = "codex [OPTIONS] [PROMPT]\n codex [OPTIONS] <COMMAND> [ARGS]" )] struct MultitoolCli { #[clap(flatten)] pub config_overrides: CliConfigOverrides, #[clap(flatten)] pub feature_toggles: FeatureToggles, #[clap(flatten)] interactive: TuiCli, #[clap(subcommand)] subcommand: Option<Subcommand>, } #[derive(Debug, clap::Subcommand)] enum Subcommand { /// Run Codex non-interactively. #[clap(visible_alias = "e")] Exec(ExecCli), /// Run a code review non-interactively. Review(ReviewArgs), /// Manage login. Login(LoginCommand), /// Remove stored authentication credentials. Logout(LogoutCommand), /// [experimental] Run Codex as an MCP server and manage MCP servers. Mcp(McpCli), /// [experimental] Run the Codex MCP server (stdio transport). McpServer, /// [experimental] Run the app server or related tooling. AppServer(AppServerCommand), /// Generate shell completion scripts. Completion(CompletionCommand), /// Run commands within a Codex-provided sandbox. #[clap(visible_alias = "debug")] Sandbox(SandboxArgs), /// Execpolicy tooling. #[clap(hide = true)] Execpolicy(ExecpolicyCommand), /// Apply the latest diff produced by Codex agent as a `git apply` to your local working tree. #[clap(visible_alias = "a")] Apply(ApplyCommand), /// Resume a previous interactive session (picker by default; use --last to continue the most recent). Resume(ResumeCommand), /// [EXPERIMENTAL] Browse tasks from Codex Cloud and apply changes locally. #[clap(name = "cloud", alias = "cloud-tasks")] Cloud(CloudTasksCli), /// Internal: run the responses API proxy. #[clap(hide = true)] ResponsesApiProxy(ResponsesApiProxyArgs), /// Internal: relay stdio to a Unix domain socket. #[clap(hide = true, name = "stdio-to-uds")] StdioToUds(StdioToUdsCommand), /// Inspect feature flags. Features(FeaturesCli), } #[derive(Debug, Parser)] struct CompletionCommand { /// Shell to generate completions for #[clap(value_enum, default_value_t = Shell::Bash)] shell: Shell, } #[derive(Debug, Parser)] struct ResumeCommand { /// Conversation/session id (UUID). When provided, resumes this session. /// If omitted, use --last to pick the most recent recorded session. #[arg(value_name = "SESSION_ID")] session_id: Option<String>, /// Continue the most recent session without showing the picker. #[arg(long = "last", default_value_t = false, conflicts_with = "session_id")] last: bool, /// Show all sessions (disables cwd filtering and shows CWD column). #[arg(long = "all", default_value_t = false)] all: bool, #[clap(flatten)] config_overrides: TuiCli, } #[derive(Debug, Parser)] struct SandboxArgs { #[command(subcommand)] cmd: SandboxCommand, } #[derive(Debug, clap::Subcommand)] enum SandboxCommand { /// Run a command under Seatbelt (macOS only). #[clap(visible_alias = "seatbelt")] Macos(SeatbeltCommand), /// Run a command under Landlock+seccomp (Linux only). #[clap(visible_alias = "landlock")] Linux(LandlockCommand), /// Run a command under Windows restricted token (Windows only). Windows(WindowsCommand), } #[derive(Debug, Parser)] struct ExecpolicyCommand { #[command(subcommand)] sub: ExecpolicySubcommand, } #[derive(Debug, clap::Subcommand)] enum ExecpolicySubcommand { /// Check execpolicy files against a command. #[clap(name = "check")] Check(ExecPolicyCheckCommand), } #[derive(Debug, Parser)] struct LoginCommand { #[clap(skip)] config_overrides: CliConfigOverrides, #[arg( long = "with-api-key", help = "Read the API key from stdin (e.g. `printenv OPENAI_API_KEY | codex login --with-api-key`)" )] with_api_key: bool, #[arg( long = "api-key", value_name = "API_KEY", help = "(deprecated) Previously accepted the API key directly; now exits with guidance to use --with-api-key", hide = true )] api_key: Option<String>, #[arg(long = "device-auth")] use_device_code: bool, /// EXPERIMENTAL: Use custom OAuth issuer base URL (advanced) /// Override the OAuth issuer base URL (advanced) #[arg(long = "experimental_issuer", value_name = "URL", hide = true)] issuer_base_url: Option<String>, /// EXPERIMENTAL: Use custom OAuth client ID (advanced) #[arg(long = "experimental_client-id", value_name = "CLIENT_ID", hide = true)] client_id: Option<String>, #[command(subcommand)] action: Option<LoginSubcommand>, } #[derive(Debug, clap::Subcommand)] enum LoginSubcommand { /// Show login status. Status, } #[derive(Debug, Parser)] struct LogoutCommand { #[clap(skip)] config_overrides: CliConfigOverrides, } #[derive(Debug, Parser)] struct AppServerCommand { /// Omit to run the app server; specify a subcommand for tooling. #[command(subcommand)] subcommand: Option<AppServerSubcommand>, } #[derive(Debug, clap::Subcommand)] enum AppServerSubcommand { /// [experimental] Generate TypeScript bindings for the app server protocol. GenerateTs(GenerateTsCommand), /// [experimental] Generate JSON Schema for the app server protocol. GenerateJsonSchema(GenerateJsonSchemaCommand), } #[derive(Debug, Args)] struct GenerateTsCommand { /// Output directory where .ts files will be written #[arg(short = 'o', long = "out", value_name = "DIR")] out_dir: PathBuf, /// Optional path to the Prettier executable to format generated files #[arg(short = 'p', long = "prettier", value_name = "PRETTIER_BIN")] prettier: Option<PathBuf>, } #[derive(Debug, Args)] struct GenerateJsonSchemaCommand { /// Output directory where the schema bundle will be written #[arg(short = 'o', long = "out", value_name = "DIR")] out_dir: PathBuf, } #[derive(Debug, Parser)] struct StdioToUdsCommand { /// Path to the Unix domain socket to connect to. #[arg(value_name = "SOCKET_PATH")] socket_path: PathBuf, } fn format_exit_messages(exit_info: AppExitInfo, color_enabled: bool) -> Vec<String> { let AppExitInfo { token_usage, conversation_id, .. } = exit_info; if token_usage.is_zero() { return Vec::new(); } let mut lines = vec![format!( "{}", codex_core::protocol::FinalOutput::from(token_usage) )]; if let Some(session_id) = conversation_id { let resume_cmd = format!("codex resume {session_id}"); let command = if color_enabled { resume_cmd.cyan().to_string() } else { resume_cmd }; lines.push(format!("To continue this session, run {command}")); } lines } /// Handle the app exit and print the results. Optionally run the update action. fn handle_app_exit(exit_info: AppExitInfo) -> anyhow::Result<()> { let update_action = exit_info.update_action; let color_enabled = supports_color::on(Stream::Stdout).is_some(); for line in format_exit_messages(exit_info, color_enabled) { println!("{line}"); } if let Some(action) = update_action { run_update_action(action)?; } Ok(()) } /// Run the update action and print the result. fn run_update_action(action: UpdateAction) -> anyhow::Result<()> { println!(); let cmd_str = action.command_str(); println!("Updating Codex via `{cmd_str}`..."); let status = { #[cfg(windows)] { // On Windows, run via cmd.exe so .CMD/.BAT are correctly resolved (PATHEXT semantics). std::process::Command::new("cmd") .args(["/C", &cmd_str]) .status()? } #[cfg(not(windows))] { let (cmd, args) = action.command_args(); let command_path = crate::wsl_paths::normalize_for_wsl(cmd); let normalized_args: Vec<String> = args .iter() .map(crate::wsl_paths::normalize_for_wsl) .collect(); std::process::Command::new(&command_path) .args(&normalized_args) .status()? } }; if !status.success() { anyhow::bail!("`{cmd_str}` failed with status {status}"); } println!(); println!("🎉 Update ran successfully! Please restart Codex."); Ok(()) } fn run_execpolicycheck(cmd: ExecPolicyCheckCommand) -> anyhow::Result<()> { cmd.run() } #[derive(Debug, Default, Parser, Clone)] struct FeatureToggles { /// Enable a feature (repeatable). Equivalent to `-c features.<name>=true`. #[arg(long = "enable", value_name = "FEATURE", action = clap::ArgAction::Append, global = true)] enable: Vec<String>, /// Disable a feature (repeatable). Equivalent to `-c features.<name>=false`. #[arg(long = "disable", value_name = "FEATURE", action = clap::ArgAction::Append, global = true)] disable: Vec<String>, } impl FeatureToggles { fn to_overrides(&self) -> anyhow::Result<Vec<String>> { let mut v = Vec::new(); for feature in &self.enable { Self::validate_feature(feature)?; v.push(format!("features.{feature}=true")); } for feature in &self.disable { Self::validate_feature(feature)?; v.push(format!("features.{feature}=false")); } Ok(v) } fn validate_feature(feature: &str) -> anyhow::Result<()> { if is_known_feature_key(feature) { Ok(()) } else { anyhow::bail!("Unknown feature flag: {feature}") } } } #[derive(Debug, Parser)] struct FeaturesCli { #[command(subcommand)] sub: FeaturesSubcommand, } #[derive(Debug, Parser)] enum FeaturesSubcommand { /// List known features with their stage and effective state. List, } fn stage_str(stage: codex_core::features::Stage) -> &'static str { use codex_core::features::Stage; match stage { Stage::Experimental => "experimental", Stage::Beta { .. } => "beta", Stage::Stable => "stable", Stage::Deprecated => "deprecated", Stage::Removed => "removed", } } /// As early as possible in the process lifecycle, apply hardening measures. We /// skip this in debug builds to avoid interfering with debugging. #[ctor::ctor] #[cfg(not(debug_assertions))] fn pre_main_hardening() { codex_process_hardening::pre_main_hardening(); } fn main() -> anyhow::Result<()> { arg0_dispatch_or_else(|codex_linux_sandbox_exe| async move { cli_main(codex_linux_sandbox_exe).await?; Ok(()) }) } async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()> { let MultitoolCli { config_overrides: mut root_config_overrides, feature_toggles, mut interactive, subcommand, } = MultitoolCli::parse(); // Fold --enable/--disable into config overrides so they flow to all subcommands. let toggle_overrides = feature_toggles.to_overrides()?; root_config_overrides.raw_overrides.extend(toggle_overrides); match subcommand { None => { prepend_config_flags( &mut interactive.config_overrides, root_config_overrides.clone(), ); let exit_info = run_interactive_tui(interactive, codex_linux_sandbox_exe).await?; handle_app_exit(exit_info)?; } Some(Subcommand::Exec(mut exec_cli)) => { prepend_config_flags( &mut exec_cli.config_overrides, root_config_overrides.clone(), ); codex_exec::run_main(exec_cli, codex_linux_sandbox_exe).await?; } Some(Subcommand::Review(review_args)) => { let mut exec_cli = ExecCli::try_parse_from(["codex", "exec"])?; exec_cli.command = Some(ExecCommand::Review(review_args)); prepend_config_flags( &mut exec_cli.config_overrides, root_config_overrides.clone(), ); codex_exec::run_main(exec_cli, codex_linux_sandbox_exe).await?; } Some(Subcommand::McpServer) => { codex_mcp_server::run_main(codex_linux_sandbox_exe, root_config_overrides).await?; } Some(Subcommand::Mcp(mut mcp_cli)) => { // Propagate any root-level config overrides (e.g. `-c key=value`). prepend_config_flags(&mut mcp_cli.config_overrides, root_config_overrides.clone()); mcp_cli.run().await?; } Some(Subcommand::AppServer(app_server_cli)) => match app_server_cli.subcommand { None => { codex_app_server::run_main(codex_linux_sandbox_exe, root_config_overrides).await?; } Some(AppServerSubcommand::GenerateTs(gen_cli)) => { codex_app_server_protocol::generate_ts( &gen_cli.out_dir, gen_cli.prettier.as_deref(), )?; } Some(AppServerSubcommand::GenerateJsonSchema(gen_cli)) => { codex_app_server_protocol::generate_json(&gen_cli.out_dir)?; } }, Some(Subcommand::Resume(ResumeCommand { session_id, last, all, config_overrides, })) => { interactive = finalize_resume_interactive( interactive, root_config_overrides.clone(), session_id, last, all, config_overrides, ); let exit_info = run_interactive_tui(interactive, codex_linux_sandbox_exe).await?; handle_app_exit(exit_info)?; } Some(Subcommand::Login(mut login_cli)) => { prepend_config_flags( &mut login_cli.config_overrides, root_config_overrides.clone(), ); match login_cli.action { Some(LoginSubcommand::Status) => { run_login_status(login_cli.config_overrides).await; } None => { if login_cli.use_device_code { run_login_with_device_code( login_cli.config_overrides, login_cli.issuer_base_url, login_cli.client_id, ) .await; } else if login_cli.api_key.is_some() { eprintln!( "The --api-key flag is no longer supported. Pipe the key instead, e.g. `printenv OPENAI_API_KEY | codex login --with-api-key`." ); std::process::exit(1); } else if login_cli.with_api_key { let api_key = read_api_key_from_stdin(); run_login_with_api_key(login_cli.config_overrides, api_key).await; } else { run_login_with_chatgpt(login_cli.config_overrides).await; } } } } Some(Subcommand::Logout(mut logout_cli)) => { prepend_config_flags( &mut logout_cli.config_overrides, root_config_overrides.clone(), ); run_logout(logout_cli.config_overrides).await; } Some(Subcommand::Completion(completion_cli)) => { print_completion(completion_cli); } Some(Subcommand::Cloud(mut cloud_cli)) => { prepend_config_flags( &mut cloud_cli.config_overrides, root_config_overrides.clone(), ); codex_cloud_tasks::run_main(cloud_cli, codex_linux_sandbox_exe).await?; } Some(Subcommand::Sandbox(sandbox_args)) => match sandbox_args.cmd { SandboxCommand::Macos(mut seatbelt_cli) => { prepend_config_flags( &mut seatbelt_cli.config_overrides, root_config_overrides.clone(), ); codex_cli::debug_sandbox::run_command_under_seatbelt( seatbelt_cli, codex_linux_sandbox_exe, ) .await?; } SandboxCommand::Linux(mut landlock_cli) => { prepend_config_flags( &mut landlock_cli.config_overrides, root_config_overrides.clone(), ); codex_cli::debug_sandbox::run_command_under_landlock( landlock_cli, codex_linux_sandbox_exe, ) .await?; } SandboxCommand::Windows(mut windows_cli) => { prepend_config_flags( &mut windows_cli.config_overrides, root_config_overrides.clone(), ); codex_cli::debug_sandbox::run_command_under_windows( windows_cli, codex_linux_sandbox_exe, ) .await?; } }, Some(Subcommand::Execpolicy(ExecpolicyCommand { sub })) => match sub { ExecpolicySubcommand::Check(cmd) => run_execpolicycheck(cmd)?, }, Some(Subcommand::Apply(mut apply_cli)) => { prepend_config_flags( &mut apply_cli.config_overrides, root_config_overrides.clone(), ); run_apply_command(apply_cli, None).await?; } Some(Subcommand::ResponsesApiProxy(args)) => { tokio::task::spawn_blocking(move || codex_responses_api_proxy::run_main(args)) .await??; } Some(Subcommand::StdioToUds(cmd)) => { let socket_path = cmd.socket_path; tokio::task::spawn_blocking(move || codex_stdio_to_uds::run(socket_path.as_path())) .await??; } Some(Subcommand::Features(FeaturesCli { sub })) => match sub { FeaturesSubcommand::List => { // Respect root-level `-c` overrides plus top-level flags like `--profile`. let mut cli_kv_overrides = root_config_overrides .parse_overrides() .map_err(anyhow::Error::msg)?; // Honor `--search` via the new feature toggle. if interactive.web_search { cli_kv_overrides.push(( "features.web_search_request".to_string(), toml::Value::Boolean(true), )); } // Thread through relevant top-level flags (at minimum, `--profile`). let overrides = ConfigOverrides { config_profile: interactive.config_profile.clone(), ..Default::default() }; let config = Config::load_with_cli_overrides_and_harness_overrides( cli_kv_overrides, overrides, ) .await?; for def in codex_core::features::FEATURES.iter() { let name = def.key; let stage = stage_str(def.stage); let enabled = config.features.enabled(def.id); println!("{name}\t{stage}\t{enabled}"); } } }, } Ok(()) } /// Prepend root-level overrides so they have lower precedence than /// CLI-specific ones specified after the subcommand (if any). fn prepend_config_flags( subcommand_config_overrides: &mut CliConfigOverrides, cli_config_overrides: CliConfigOverrides, ) { subcommand_config_overrides .raw_overrides .splice(0..0, cli_config_overrides.raw_overrides); } /// Run the interactive Codex TUI, dispatching to either the legacy implementation or the /// experimental TUI v2 shim based on feature flags resolved from config. async fn run_interactive_tui( interactive: TuiCli, codex_linux_sandbox_exe: Option<PathBuf>, ) -> std::io::Result<AppExitInfo> { if is_tui2_enabled(&interactive).await? { let result = tui2::run_main(interactive.into(), codex_linux_sandbox_exe).await?; Ok(result.into()) } else { codex_tui::run_main(interactive, codex_linux_sandbox_exe).await } } /// Returns `Ok(true)` when the resolved configuration enables the `tui2` feature flag. /// /// This performs a lightweight config load (honoring the same precedence as the lower-level TUI /// bootstrap: `$CODEX_HOME`, config.toml, profile, and CLI `-c` overrides) solely to decide which /// TUI frontend to launch. The full configuration is still loaded later by the interactive TUI. async fn is_tui2_enabled(cli: &TuiCli) -> std::io::Result<bool> { let raw_overrides = cli.config_overrides.raw_overrides.clone(); let overrides_cli = codex_common::CliConfigOverrides { raw_overrides }; let cli_kv_overrides = overrides_cli .parse_overrides() .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?; let codex_home = find_codex_home()?; let cwd = cli.cwd.clone(); let config_cwd = match cwd.as_deref() { Some(path) => AbsolutePathBuf::from_absolute_path(path)?, None => AbsolutePathBuf::current_dir()?, }; let config_toml = load_config_as_toml_with_cli_overrides(&codex_home, &config_cwd, cli_kv_overrides).await?; let config_profile = config_toml.get_config_profile(cli.config_profile.clone())?; let overrides = FeatureOverrides::default(); let features = Features::from_config(&config_toml, &config_profile, overrides); Ok(features.enabled(Feature::Tui2)) } /// Build the final `TuiCli` for a `codex resume` invocation. fn finalize_resume_interactive( mut interactive: TuiCli, root_config_overrides: CliConfigOverrides, session_id: Option<String>, last: bool, show_all: bool, resume_cli: TuiCli, ) -> TuiCli { // Start with the parsed interactive CLI so resume shares the same // configuration surface area as `codex` without additional flags. let resume_session_id = session_id; interactive.resume_picker = resume_session_id.is_none() && !last; interactive.resume_last = last; interactive.resume_session_id = resume_session_id; interactive.resume_show_all = show_all; // Merge resume-scoped flags and overrides with highest precedence. merge_resume_cli_flags(&mut interactive, resume_cli); // Propagate any root-level config overrides (e.g. `-c key=value`). prepend_config_flags(&mut interactive.config_overrides, root_config_overrides); interactive } /// Merge flags provided to `codex resume` so they take precedence over any /// root-level flags. Only overrides fields explicitly set on the resume-scoped /// CLI. Also appends `-c key=value` overrides with highest precedence. fn merge_resume_cli_flags(interactive: &mut TuiCli, resume_cli: TuiCli) { if let Some(model) = resume_cli.model { interactive.model = Some(model); } if resume_cli.oss { interactive.oss = true; } if let Some(profile) = resume_cli.config_profile { interactive.config_profile = Some(profile); } if let Some(sandbox) = resume_cli.sandbox_mode { interactive.sandbox_mode = Some(sandbox); } if let Some(approval) = resume_cli.approval_policy { interactive.approval_policy = Some(approval); } if resume_cli.full_auto { interactive.full_auto = true; } if resume_cli.dangerously_bypass_approvals_and_sandbox { interactive.dangerously_bypass_approvals_and_sandbox = true; } if let Some(cwd) = resume_cli.cwd { interactive.cwd = Some(cwd); } if resume_cli.web_search { interactive.web_search = true; } if !resume_cli.images.is_empty() { interactive.images = resume_cli.images; } if !resume_cli.add_dir.is_empty() { interactive.add_dir.extend(resume_cli.add_dir); } if let Some(prompt) = resume_cli.prompt { interactive.prompt = Some(prompt); } interactive .config_overrides .raw_overrides .extend(resume_cli.config_overrides.raw_overrides); } fn print_completion(cmd: CompletionCommand) { let mut app = MultitoolCli::command(); let name = "codex"; generate(cmd.shell, &mut app, name, &mut std::io::stdout()); } #[cfg(test)] mod tests { use super::*; use assert_matches::assert_matches; use codex_core::protocol::TokenUsage; use codex_protocol::ConversationId; use pretty_assertions::assert_eq; fn finalize_from_args(args: &[&str]) -> TuiCli { let cli = MultitoolCli::try_parse_from(args).expect("parse"); let MultitoolCli { interactive, config_overrides: root_overrides, subcommand, feature_toggles: _, } = cli; let Subcommand::Resume(ResumeCommand { session_id, last, all, config_overrides: resume_cli, }) = subcommand.expect("resume present") else { unreachable!() }; finalize_resume_interactive( interactive, root_overrides, session_id, last, all, resume_cli, ) } fn sample_exit_info(conversation: Option<&str>) -> AppExitInfo { let token_usage = TokenUsage { output_tokens: 2, total_tokens: 2, ..Default::default() }; AppExitInfo { token_usage, conversation_id: conversation .map(ConversationId::from_string) .map(Result::unwrap), update_action: None, } } #[test] fn format_exit_messages_skips_zero_usage() { let exit_info = AppExitInfo { token_usage: TokenUsage::default(), conversation_id: None, update_action: None, }; let lines = format_exit_messages(exit_info, false); assert!(lines.is_empty()); } #[test] fn format_exit_messages_includes_resume_hint_without_color() { let exit_info = sample_exit_info(Some("123e4567-e89b-12d3-a456-426614174000")); let lines = format_exit_messages(exit_info, false); assert_eq!( lines, vec![ "Token usage: total=2 input=0 output=2".to_string(), "To continue this session, run codex resume 123e4567-e89b-12d3-a456-426614174000" .to_string(), ] ); } #[test] fn format_exit_messages_applies_color_when_enabled() { let exit_info = sample_exit_info(Some("123e4567-e89b-12d3-a456-426614174000")); let lines = format_exit_messages(exit_info, true); assert_eq!(lines.len(), 2); assert!(lines[1].contains("\u{1b}[36m")); } #[test] fn resume_model_flag_applies_when_no_root_flags() { let interactive = finalize_from_args(["codex", "resume", "-m", "gpt-5.1-test"].as_ref()); assert_eq!(interactive.model.as_deref(), Some("gpt-5.1-test")); assert!(interactive.resume_picker); assert!(!interactive.resume_last); assert_eq!(interactive.resume_session_id, None); } #[test] fn resume_picker_logic_none_and_not_last() { let interactive = finalize_from_args(["codex", "resume"].as_ref()); assert!(interactive.resume_picker); assert!(!interactive.resume_last); assert_eq!(interactive.resume_session_id, None); assert!(!interactive.resume_show_all); } #[test] fn resume_picker_logic_last() { let interactive = finalize_from_args(["codex", "resume", "--last"].as_ref()); assert!(!interactive.resume_picker); assert!(interactive.resume_last); assert_eq!(interactive.resume_session_id, None); assert!(!interactive.resume_show_all); } #[test] fn resume_picker_logic_with_session_id() { let interactive = finalize_from_args(["codex", "resume", "1234"].as_ref()); assert!(!interactive.resume_picker); assert!(!interactive.resume_last); assert_eq!(interactive.resume_session_id.as_deref(), Some("1234")); assert!(!interactive.resume_show_all); } #[test] fn resume_all_flag_sets_show_all() { let interactive = finalize_from_args(["codex", "resume", "--all"].as_ref()); assert!(interactive.resume_picker); assert!(interactive.resume_show_all); } #[test] fn resume_merges_option_flags_and_full_auto() { let interactive = finalize_from_args( [ "codex", "resume", "sid", "--oss", "--full-auto", "--search", "--sandbox", "workspace-write", "--ask-for-approval", "on-request", "-m", "gpt-5.1-test", "-p", "my-profile", "-C", "/tmp", "-i", "/tmp/a.png,/tmp/b.png", ] .as_ref(), ); assert_eq!(interactive.model.as_deref(), Some("gpt-5.1-test")); assert!(interactive.oss); assert_eq!(interactive.config_profile.as_deref(), Some("my-profile")); assert_matches!( interactive.sandbox_mode, Some(codex_common::SandboxModeCliArg::WorkspaceWrite) ); assert_matches!( interactive.approval_policy, Some(codex_common::ApprovalModeCliArg::OnRequest) ); assert!(interactive.full_auto); assert_eq!( interactive.cwd.as_deref(), Some(std::path::Path::new("/tmp")) ); assert!(interactive.web_search); let has_a = interactive .images .iter() .any(|p| p == std::path::Path::new("/tmp/a.png")); let has_b = interactive .images .iter() .any(|p| p == std::path::Path::new("/tmp/b.png")); assert!(has_a && has_b); assert!(!interactive.resume_picker); assert!(!interactive.resume_last);
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cli/src/mcp_cmd.rs
codex-rs/cli/src/mcp_cmd.rs
use std::collections::HashMap; use anyhow::Context; use anyhow::Result; use anyhow::anyhow; use anyhow::bail; use clap::ArgGroup; use codex_common::CliConfigOverrides; use codex_common::format_env_display::format_env_display; use codex_core::config::Config; use codex_core::config::edit::ConfigEditsBuilder; use codex_core::config::find_codex_home; use codex_core::config::load_global_mcp_servers; use codex_core::config::types::McpServerConfig; use codex_core::config::types::McpServerTransportConfig; use codex_core::mcp::auth::compute_auth_statuses; use codex_core::protocol::McpAuthStatus; use codex_rmcp_client::delete_oauth_tokens; use codex_rmcp_client::perform_oauth_login; use codex_rmcp_client::supports_oauth_login; /// Subcommands: /// - `serve` — run the MCP server on stdio /// - `list` — list configured servers (with `--json`) /// - `get` — show a single server (with `--json`) /// - `add` — add a server launcher entry to `~/.codex/config.toml` /// - `remove` — delete a server entry #[derive(Debug, clap::Parser)] pub struct McpCli { #[clap(flatten)] pub config_overrides: CliConfigOverrides, #[command(subcommand)] pub subcommand: McpSubcommand, } #[derive(Debug, clap::Subcommand)] pub enum McpSubcommand { List(ListArgs), Get(GetArgs), Add(AddArgs), Remove(RemoveArgs), Login(LoginArgs), Logout(LogoutArgs), } #[derive(Debug, clap::Parser)] pub struct ListArgs { /// Output the configured servers as JSON. #[arg(long)] pub json: bool, } #[derive(Debug, clap::Parser)] pub struct GetArgs { /// Name of the MCP server to display. pub name: String, /// Output the server configuration as JSON. #[arg(long)] pub json: bool, } #[derive(Debug, clap::Parser)] #[command(override_usage = "codex mcp add [OPTIONS] <NAME> (--url <URL> | -- <COMMAND>...)")] pub struct AddArgs { /// Name for the MCP server configuration. pub name: String, #[command(flatten)] pub transport_args: AddMcpTransportArgs, } #[derive(Debug, clap::Args)] #[command( group( ArgGroup::new("transport") .args(["command", "url"]) .required(true) .multiple(false) ) )] pub struct AddMcpTransportArgs { #[command(flatten)] pub stdio: Option<AddMcpStdioArgs>, #[command(flatten)] pub streamable_http: Option<AddMcpStreamableHttpArgs>, } #[derive(Debug, clap::Args)] pub struct AddMcpStdioArgs { /// Command to launch the MCP server. /// Use --url for a streamable HTTP server. #[arg( trailing_var_arg = true, num_args = 0.., )] pub command: Vec<String>, /// Environment variables to set when launching the server. /// Only valid with stdio servers. #[arg( long, value_parser = parse_env_pair, value_name = "KEY=VALUE", )] pub env: Vec<(String, String)>, } #[derive(Debug, clap::Args)] pub struct AddMcpStreamableHttpArgs { /// URL for a streamable HTTP MCP server. #[arg(long)] pub url: String, /// Optional environment variable to read for a bearer token. /// Only valid with streamable HTTP servers. #[arg( long = "bearer-token-env-var", value_name = "ENV_VAR", requires = "url" )] pub bearer_token_env_var: Option<String>, } #[derive(Debug, clap::Parser)] pub struct RemoveArgs { /// Name of the MCP server configuration to remove. pub name: String, } #[derive(Debug, clap::Parser)] pub struct LoginArgs { /// Name of the MCP server to authenticate with oauth. pub name: String, /// Comma-separated list of OAuth scopes to request. #[arg(long, value_delimiter = ',', value_name = "SCOPE,SCOPE")] pub scopes: Vec<String>, } #[derive(Debug, clap::Parser)] pub struct LogoutArgs { /// Name of the MCP server to deauthenticate. pub name: String, } impl McpCli { pub async fn run(self) -> Result<()> { let McpCli { config_overrides, subcommand, } = self; match subcommand { McpSubcommand::List(args) => { run_list(&config_overrides, args).await?; } McpSubcommand::Get(args) => { run_get(&config_overrides, args).await?; } McpSubcommand::Add(args) => { run_add(&config_overrides, args).await?; } McpSubcommand::Remove(args) => { run_remove(&config_overrides, args).await?; } McpSubcommand::Login(args) => { run_login(&config_overrides, args).await?; } McpSubcommand::Logout(args) => { run_logout(&config_overrides, args).await?; } } Ok(()) } } async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Result<()> { // Validate any provided overrides even though they are not currently applied. let overrides = config_overrides .parse_overrides() .map_err(anyhow::Error::msg)?; let config = Config::load_with_cli_overrides(overrides) .await .context("failed to load configuration")?; let AddArgs { name, transport_args, } = add_args; validate_server_name(&name)?; let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?; let mut servers = load_global_mcp_servers(&codex_home) .await .with_context(|| format!("failed to load MCP servers from {}", codex_home.display()))?; let transport = match transport_args { AddMcpTransportArgs { stdio: Some(stdio), .. } => { let mut command_parts = stdio.command.into_iter(); let command_bin = command_parts .next() .ok_or_else(|| anyhow!("command is required"))?; let command_args: Vec<String> = command_parts.collect(); let env_map = if stdio.env.is_empty() { None } else { Some(stdio.env.into_iter().collect::<HashMap<_, _>>()) }; McpServerTransportConfig::Stdio { command: command_bin, args: command_args, env: env_map, env_vars: Vec::new(), cwd: None, } } AddMcpTransportArgs { streamable_http: Some(AddMcpStreamableHttpArgs { url, bearer_token_env_var, }), .. } => McpServerTransportConfig::StreamableHttp { url, bearer_token_env_var, http_headers: None, env_http_headers: None, }, AddMcpTransportArgs { .. } => bail!("exactly one of --command or --url must be provided"), }; let new_entry = McpServerConfig { transport: transport.clone(), enabled: true, startup_timeout_sec: None, tool_timeout_sec: None, enabled_tools: None, disabled_tools: None, }; servers.insert(name.clone(), new_entry); ConfigEditsBuilder::new(&codex_home) .replace_mcp_servers(&servers) .apply() .await .with_context(|| format!("failed to write MCP servers to {}", codex_home.display()))?; println!("Added global MCP server '{name}'."); if let McpServerTransportConfig::StreamableHttp { url, bearer_token_env_var: None, http_headers, env_http_headers, } = transport { match supports_oauth_login(&url).await { Ok(true) => { println!("Detected OAuth support. Starting OAuth flow…"); perform_oauth_login( &name, &url, config.mcp_oauth_credentials_store_mode, http_headers.clone(), env_http_headers.clone(), &Vec::new(), ) .await?; println!("Successfully logged in."); } Ok(false) => {} Err(_) => println!( "MCP server may or may not require login. Run `codex mcp login {name}` to login." ), } } Ok(()) } async fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveArgs) -> Result<()> { config_overrides .parse_overrides() .map_err(anyhow::Error::msg)?; let RemoveArgs { name } = remove_args; validate_server_name(&name)?; let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?; let mut servers = load_global_mcp_servers(&codex_home) .await .with_context(|| format!("failed to load MCP servers from {}", codex_home.display()))?; let removed = servers.remove(&name).is_some(); if removed { ConfigEditsBuilder::new(&codex_home) .replace_mcp_servers(&servers) .apply() .await .with_context(|| format!("failed to write MCP servers to {}", codex_home.display()))?; } if removed { println!("Removed global MCP server '{name}'."); } else { println!("No MCP server named '{name}' found."); } Ok(()) } async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs) -> Result<()> { let overrides = config_overrides .parse_overrides() .map_err(anyhow::Error::msg)?; let config = Config::load_with_cli_overrides(overrides) .await .context("failed to load configuration")?; let LoginArgs { name, scopes } = login_args; let Some(server) = config.mcp_servers.get(&name) else { bail!("No MCP server named '{name}' found."); }; let (url, http_headers, env_http_headers) = match &server.transport { McpServerTransportConfig::StreamableHttp { url, http_headers, env_http_headers, .. } => (url.clone(), http_headers.clone(), env_http_headers.clone()), _ => bail!("OAuth login is only supported for streamable HTTP servers."), }; perform_oauth_login( &name, &url, config.mcp_oauth_credentials_store_mode, http_headers, env_http_headers, &scopes, ) .await?; println!("Successfully logged in to MCP server '{name}'."); Ok(()) } async fn run_logout(config_overrides: &CliConfigOverrides, logout_args: LogoutArgs) -> Result<()> { let overrides = config_overrides .parse_overrides() .map_err(anyhow::Error::msg)?; let config = Config::load_with_cli_overrides(overrides) .await .context("failed to load configuration")?; let LogoutArgs { name } = logout_args; let server = config .mcp_servers .get(&name) .ok_or_else(|| anyhow!("No MCP server named '{name}' found in configuration."))?; let url = match &server.transport { McpServerTransportConfig::StreamableHttp { url, .. } => url.clone(), _ => bail!("OAuth logout is only supported for streamable_http transports."), }; match delete_oauth_tokens(&name, &url, config.mcp_oauth_credentials_store_mode) { Ok(true) => println!("Removed OAuth credentials for '{name}'."), Ok(false) => println!("No OAuth credentials stored for '{name}'."), Err(err) => return Err(anyhow!("failed to delete OAuth credentials: {err}")), } Ok(()) } async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Result<()> { let overrides = config_overrides .parse_overrides() .map_err(anyhow::Error::msg)?; let config = Config::load_with_cli_overrides(overrides) .await .context("failed to load configuration")?; let mut entries: Vec<_> = config.mcp_servers.iter().collect(); entries.sort_by(|(a, _), (b, _)| a.cmp(b)); let auth_statuses = compute_auth_statuses( config.mcp_servers.iter(), config.mcp_oauth_credentials_store_mode, ) .await; if list_args.json { let json_entries: Vec<_> = entries .into_iter() .map(|(name, cfg)| { let auth_status = auth_statuses .get(name.as_str()) .map(|entry| entry.auth_status) .unwrap_or(McpAuthStatus::Unsupported); let transport = match &cfg.transport { McpServerTransportConfig::Stdio { command, args, env, env_vars, cwd, } => serde_json::json!({ "type": "stdio", "command": command, "args": args, "env": env, "env_vars": env_vars, "cwd": cwd, }), McpServerTransportConfig::StreamableHttp { url, bearer_token_env_var, http_headers, env_http_headers, } => { serde_json::json!({ "type": "streamable_http", "url": url, "bearer_token_env_var": bearer_token_env_var, "http_headers": http_headers, "env_http_headers": env_http_headers, }) } }; serde_json::json!({ "name": name, "enabled": cfg.enabled, "transport": transport, "startup_timeout_sec": cfg .startup_timeout_sec .map(|timeout| timeout.as_secs_f64()), "tool_timeout_sec": cfg .tool_timeout_sec .map(|timeout| timeout.as_secs_f64()), "auth_status": auth_status, }) }) .collect(); let output = serde_json::to_string_pretty(&json_entries)?; println!("{output}"); return Ok(()); } if entries.is_empty() { println!("No MCP servers configured yet. Try `codex mcp add my-tool -- my-command`."); return Ok(()); } let mut stdio_rows: Vec<[String; 7]> = Vec::new(); let mut http_rows: Vec<[String; 5]> = Vec::new(); for (name, cfg) in entries { match &cfg.transport { McpServerTransportConfig::Stdio { command, args, env, env_vars, cwd, } => { let args_display = if args.is_empty() { "-".to_string() } else { args.join(" ") }; let env_display = format_env_display(env.as_ref(), env_vars); let cwd_display = cwd .as_ref() .map(|path| path.display().to_string()) .filter(|value| !value.is_empty()) .unwrap_or_else(|| "-".to_string()); let status = if cfg.enabled { "enabled".to_string() } else { "disabled".to_string() }; let auth_status = auth_statuses .get(name.as_str()) .map(|entry| entry.auth_status) .unwrap_or(McpAuthStatus::Unsupported) .to_string(); stdio_rows.push([ name.clone(), command.clone(), args_display, env_display, cwd_display, status, auth_status, ]); } McpServerTransportConfig::StreamableHttp { url, bearer_token_env_var, .. } => { let status = if cfg.enabled { "enabled".to_string() } else { "disabled".to_string() }; let auth_status = auth_statuses .get(name.as_str()) .map(|entry| entry.auth_status) .unwrap_or(McpAuthStatus::Unsupported) .to_string(); let bearer_token_display = bearer_token_env_var.as_deref().unwrap_or("-").to_string(); http_rows.push([ name.clone(), url.clone(), bearer_token_display, status, auth_status, ]); } } } if !stdio_rows.is_empty() { let mut widths = [ "Name".len(), "Command".len(), "Args".len(), "Env".len(), "Cwd".len(), "Status".len(), "Auth".len(), ]; for row in &stdio_rows { for (i, cell) in row.iter().enumerate() { widths[i] = widths[i].max(cell.len()); } } println!( "{name:<name_w$} {command:<cmd_w$} {args:<args_w$} {env:<env_w$} {cwd:<cwd_w$} {status:<status_w$} {auth:<auth_w$}", name = "Name", command = "Command", args = "Args", env = "Env", cwd = "Cwd", status = "Status", auth = "Auth", name_w = widths[0], cmd_w = widths[1], args_w = widths[2], env_w = widths[3], cwd_w = widths[4], status_w = widths[5], auth_w = widths[6], ); for row in &stdio_rows { println!( "{name:<name_w$} {command:<cmd_w$} {args:<args_w$} {env:<env_w$} {cwd:<cwd_w$} {status:<status_w$} {auth:<auth_w$}", name = row[0].as_str(), command = row[1].as_str(), args = row[2].as_str(), env = row[3].as_str(), cwd = row[4].as_str(), status = row[5].as_str(), auth = row[6].as_str(), name_w = widths[0], cmd_w = widths[1], args_w = widths[2], env_w = widths[3], cwd_w = widths[4], status_w = widths[5], auth_w = widths[6], ); } } if !stdio_rows.is_empty() && !http_rows.is_empty() { println!(); } if !http_rows.is_empty() { let mut widths = [ "Name".len(), "Url".len(), "Bearer Token Env Var".len(), "Status".len(), "Auth".len(), ]; for row in &http_rows { for (i, cell) in row.iter().enumerate() { widths[i] = widths[i].max(cell.len()); } } println!( "{name:<name_w$} {url:<url_w$} {token:<token_w$} {status:<status_w$} {auth:<auth_w$}", name = "Name", url = "Url", token = "Bearer Token Env Var", status = "Status", auth = "Auth", name_w = widths[0], url_w = widths[1], token_w = widths[2], status_w = widths[3], auth_w = widths[4], ); for row in &http_rows { println!( "{name:<name_w$} {url:<url_w$} {token:<token_w$} {status:<status_w$} {auth:<auth_w$}", name = row[0].as_str(), url = row[1].as_str(), token = row[2].as_str(), status = row[3].as_str(), auth = row[4].as_str(), name_w = widths[0], url_w = widths[1], token_w = widths[2], status_w = widths[3], auth_w = widths[4], ); } } Ok(()) } async fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Result<()> { let overrides = config_overrides .parse_overrides() .map_err(anyhow::Error::msg)?; let config = Config::load_with_cli_overrides(overrides) .await .context("failed to load configuration")?; let Some(server) = config.mcp_servers.get(&get_args.name) else { bail!("No MCP server named '{name}' found.", name = get_args.name); }; if get_args.json { let transport = match &server.transport { McpServerTransportConfig::Stdio { command, args, env, env_vars, cwd, } => serde_json::json!({ "type": "stdio", "command": command, "args": args, "env": env, "env_vars": env_vars, "cwd": cwd, }), McpServerTransportConfig::StreamableHttp { url, bearer_token_env_var, http_headers, env_http_headers, } => serde_json::json!({ "type": "streamable_http", "url": url, "bearer_token_env_var": bearer_token_env_var, "http_headers": http_headers, "env_http_headers": env_http_headers, }), }; let output = serde_json::to_string_pretty(&serde_json::json!({ "name": get_args.name, "enabled": server.enabled, "transport": transport, "enabled_tools": server.enabled_tools.clone(), "disabled_tools": server.disabled_tools.clone(), "startup_timeout_sec": server .startup_timeout_sec .map(|timeout| timeout.as_secs_f64()), "tool_timeout_sec": server .tool_timeout_sec .map(|timeout| timeout.as_secs_f64()), }))?; println!("{output}"); return Ok(()); } if !server.enabled { println!("{} (disabled)", get_args.name); return Ok(()); } println!("{}", get_args.name); println!(" enabled: {}", server.enabled); let format_tool_list = |tools: &Option<Vec<String>>| -> String { match tools { Some(list) if list.is_empty() => "[]".to_string(), Some(list) => list.join(", "), None => "-".to_string(), } }; if server.enabled_tools.is_some() { let enabled_tools_display = format_tool_list(&server.enabled_tools); println!(" enabled_tools: {enabled_tools_display}"); } if server.disabled_tools.is_some() { let disabled_tools_display = format_tool_list(&server.disabled_tools); println!(" disabled_tools: {disabled_tools_display}"); } match &server.transport { McpServerTransportConfig::Stdio { command, args, env, env_vars, cwd, } => { println!(" transport: stdio"); println!(" command: {command}"); let args_display = if args.is_empty() { "-".to_string() } else { args.join(" ") }; println!(" args: {args_display}"); let cwd_display = cwd .as_ref() .map(|path| path.display().to_string()) .filter(|value| !value.is_empty()) .unwrap_or_else(|| "-".to_string()); println!(" cwd: {cwd_display}"); let env_display = format_env_display(env.as_ref(), env_vars); println!(" env: {env_display}"); } McpServerTransportConfig::StreamableHttp { url, bearer_token_env_var, http_headers, env_http_headers, } => { println!(" transport: streamable_http"); println!(" url: {url}"); let bearer_token_display = bearer_token_env_var.as_deref().unwrap_or("-"); println!(" bearer_token_env_var: {bearer_token_display}"); let headers_display = match http_headers { Some(map) if !map.is_empty() => { let mut pairs: Vec<_> = map.iter().collect(); pairs.sort_by(|(a, _), (b, _)| a.cmp(b)); pairs .into_iter() .map(|(k, _)| format!("{k}=*****")) .collect::<Vec<_>>() .join(", ") } _ => "-".to_string(), }; println!(" http_headers: {headers_display}"); let env_headers_display = match env_http_headers { Some(map) if !map.is_empty() => { let mut pairs: Vec<_> = map.iter().collect(); pairs.sort_by(|(a, _), (b, _)| a.cmp(b)); pairs .into_iter() .map(|(k, var)| format!("{k}={var}")) .collect::<Vec<_>>() .join(", ") } _ => "-".to_string(), }; println!(" env_http_headers: {env_headers_display}"); } } if let Some(timeout) = server.startup_timeout_sec { println!(" startup_timeout_sec: {}", timeout.as_secs_f64()); } if let Some(timeout) = server.tool_timeout_sec { println!(" tool_timeout_sec: {}", timeout.as_secs_f64()); } println!(" remove: codex mcp remove {}", get_args.name); Ok(()) } fn parse_env_pair(raw: &str) -> Result<(String, String), String> { let mut parts = raw.splitn(2, '='); let key = parts .next() .map(str::trim) .filter(|s| !s.is_empty()) .ok_or_else(|| "environment entries must be in KEY=VALUE form".to_string())?; let value = parts .next() .map(str::to_string) .ok_or_else(|| "environment entries must be in KEY=VALUE form".to_string())?; Ok((key.to_string(), value)) } fn validate_server_name(name: &str) -> Result<()> { let is_valid = !name.is_empty() && name .chars() .all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_'); if is_valid { Ok(()) } else { bail!("invalid server name '{name}' (use letters, numbers, '-', '_')"); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cli/src/debug_sandbox/seatbelt.rs
codex-rs/cli/src/debug_sandbox/seatbelt.rs
use std::collections::HashSet; use tokio::io::AsyncBufReadExt; use tokio::process::Child; use tokio::task::JoinHandle; use super::pid_tracker::PidTracker; pub struct SandboxDenial { pub name: String, pub capability: String, } pub struct DenialLogger { log_stream: Child, pid_tracker: Option<PidTracker>, log_reader: Option<JoinHandle<Vec<u8>>>, } impl DenialLogger { pub(crate) fn new() -> Option<Self> { let mut log_stream = start_log_stream()?; let stdout = log_stream.stdout.take()?; let log_reader = tokio::spawn(async move { let mut reader = tokio::io::BufReader::new(stdout); let mut logs = Vec::new(); let mut chunk = Vec::new(); loop { match reader.read_until(b'\n', &mut chunk).await { Ok(0) | Err(_) => break, Ok(_) => { logs.extend_from_slice(&chunk); chunk.clear(); } } } logs }); Some(Self { log_stream, pid_tracker: None, log_reader: Some(log_reader), }) } pub(crate) fn on_child_spawn(&mut self, child: &Child) { if let Some(root_pid) = child.id() { self.pid_tracker = PidTracker::new(root_pid as i32); } } pub(crate) async fn finish(mut self) -> Vec<SandboxDenial> { let pid_set = match self.pid_tracker { Some(tracker) => tracker.stop().await, None => Default::default(), }; if pid_set.is_empty() { return Vec::new(); } let _ = self.log_stream.kill().await; let _ = self.log_stream.wait().await; let logs_bytes = match self.log_reader.take() { Some(handle) => handle.await.unwrap_or_default(), None => Vec::new(), }; let logs = String::from_utf8_lossy(&logs_bytes); let mut seen: HashSet<(String, String)> = HashSet::new(); let mut denials: Vec<SandboxDenial> = Vec::new(); for line in logs.lines() { if let Ok(json) = serde_json::from_str::<serde_json::Value>(line) && let Some(msg) = json.get("eventMessage").and_then(|v| v.as_str()) && let Some((pid, name, capability)) = parse_message(msg) && pid_set.contains(&pid) && seen.insert((name.clone(), capability.clone())) { denials.push(SandboxDenial { name, capability }); } } denials } } fn start_log_stream() -> Option<Child> { use std::process::Stdio; const PREDICATE: &str = r#"(((processID == 0) AND (senderImagePath CONTAINS "/Sandbox")) OR (subsystem == "com.apple.sandbox.reporting"))"#; tokio::process::Command::new("log") .args(["stream", "--style", "ndjson", "--predicate", PREDICATE]) .stdin(Stdio::null()) .stdout(Stdio::piped()) .stderr(Stdio::null()) .kill_on_drop(true) .spawn() .ok() } fn parse_message(msg: &str) -> Option<(i32, String, String)> { // Example message: // Sandbox: processname(1234) deny(1) capability-name args... static RE: std::sync::OnceLock<regex_lite::Regex> = std::sync::OnceLock::new(); let re = RE.get_or_init(|| { #[expect(clippy::unwrap_used)] regex_lite::Regex::new(r"^Sandbox:\s*(.+?)\((\d+)\)\s+deny\(.*?\)\s*(.+)$").unwrap() }); let (_, [name, pid_str, capability]) = re.captures(msg)?.extract(); let pid = pid_str.trim().parse::<i32>().ok()?; Some((pid, name.to_string(), capability.to_string())) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cli/src/debug_sandbox/pid_tracker.rs
codex-rs/cli/src/debug_sandbox/pid_tracker.rs
use std::collections::HashSet; use tokio::task::JoinHandle; use tracing::warn; /// Tracks the (recursive) descendants of a process by using `kqueue` to watch for fork events, and /// `proc_listchildpids` to list the children of a process. pub(crate) struct PidTracker { kq: libc::c_int, handle: JoinHandle<HashSet<i32>>, } impl PidTracker { pub(crate) fn new(root_pid: i32) -> Option<Self> { if root_pid <= 0 { return None; } let kq = unsafe { libc::kqueue() }; let handle = tokio::task::spawn_blocking(move || track_descendants(kq, root_pid)); Some(Self { kq, handle }) } pub(crate) async fn stop(self) -> HashSet<i32> { trigger_stop_event(self.kq); self.handle.await.unwrap_or_default() } } unsafe extern "C" { fn proc_listchildpids( ppid: libc::c_int, buffer: *mut libc::c_void, buffersize: libc::c_int, ) -> libc::c_int; } /// Wrap proc_listchildpids. fn list_child_pids(parent: i32) -> Vec<i32> { unsafe { let mut capacity: usize = 16; loop { let mut buf: Vec<i32> = vec![0; capacity]; let count = proc_listchildpids( parent as libc::c_int, buf.as_mut_ptr() as *mut libc::c_void, (buf.len() * std::mem::size_of::<i32>()) as libc::c_int, ); if count <= 0 { return Vec::new(); } let returned = count as usize; if returned < capacity { buf.truncate(returned); return buf; } capacity = capacity.saturating_mul(2).max(returned + 16); } } } fn pid_is_alive(pid: i32) -> bool { if pid <= 0 { return false; } let res = unsafe { libc::kill(pid as libc::pid_t, 0) }; if res == 0 { true } else { matches!( std::io::Error::last_os_error().raw_os_error(), Some(libc::EPERM) ) } } enum WatchPidError { ProcessGone, Other(std::io::Error), } /// Add `pid` to the watch list in `kq`. fn watch_pid(kq: libc::c_int, pid: i32) -> Result<(), WatchPidError> { if pid <= 0 { return Err(WatchPidError::ProcessGone); } let kev = libc::kevent { ident: pid as libc::uintptr_t, filter: libc::EVFILT_PROC, flags: libc::EV_ADD | libc::EV_CLEAR, fflags: libc::NOTE_FORK | libc::NOTE_EXEC | libc::NOTE_EXIT, data: 0, udata: std::ptr::null_mut(), }; let res = unsafe { libc::kevent(kq, &kev, 1, std::ptr::null_mut(), 0, std::ptr::null()) }; if res < 0 { let err = std::io::Error::last_os_error(); if err.raw_os_error() == Some(libc::ESRCH) { Err(WatchPidError::ProcessGone) } else { Err(WatchPidError::Other(err)) } } else { Ok(()) } } fn watch_children( kq: libc::c_int, parent: i32, seen: &mut HashSet<i32>, active: &mut HashSet<i32>, ) { for child_pid in list_child_pids(parent) { add_pid_watch(kq, child_pid, seen, active); } } /// Watch `pid` and its children, updating `seen` and `active` sets. fn add_pid_watch(kq: libc::c_int, pid: i32, seen: &mut HashSet<i32>, active: &mut HashSet<i32>) { if pid <= 0 { return; } let newly_seen = seen.insert(pid); let mut should_recurse = newly_seen; if active.insert(pid) { match watch_pid(kq, pid) { Ok(()) => { should_recurse = true; } Err(WatchPidError::ProcessGone) => { active.remove(&pid); return; } Err(WatchPidError::Other(err)) => { warn!("failed to watch pid {pid}: {err}"); active.remove(&pid); return; } } } if should_recurse { watch_children(kq, pid, seen, active); } } const STOP_IDENT: libc::uintptr_t = 1; fn register_stop_event(kq: libc::c_int) -> bool { let kev = libc::kevent { ident: STOP_IDENT, filter: libc::EVFILT_USER, flags: libc::EV_ADD | libc::EV_CLEAR, fflags: 0, data: 0, udata: std::ptr::null_mut(), }; let res = unsafe { libc::kevent(kq, &kev, 1, std::ptr::null_mut(), 0, std::ptr::null()) }; res >= 0 } fn trigger_stop_event(kq: libc::c_int) { if kq < 0 { return; } let kev = libc::kevent { ident: STOP_IDENT, filter: libc::EVFILT_USER, flags: 0, fflags: libc::NOTE_TRIGGER, data: 0, udata: std::ptr::null_mut(), }; let _ = unsafe { libc::kevent(kq, &kev, 1, std::ptr::null_mut(), 0, std::ptr::null()) }; } /// Put all of the above together to track all the descendants of `root_pid`. fn track_descendants(kq: libc::c_int, root_pid: i32) -> HashSet<i32> { if kq < 0 { let mut seen = HashSet::new(); seen.insert(root_pid); return seen; } if !register_stop_event(kq) { let mut seen = HashSet::new(); seen.insert(root_pid); let _ = unsafe { libc::close(kq) }; return seen; } let mut seen: HashSet<i32> = HashSet::new(); let mut active: HashSet<i32> = HashSet::new(); add_pid_watch(kq, root_pid, &mut seen, &mut active); const EVENTS_CAP: usize = 32; let mut events: [libc::kevent; EVENTS_CAP] = unsafe { std::mem::MaybeUninit::zeroed().assume_init() }; let mut stop_requested = false; loop { if active.is_empty() { if !pid_is_alive(root_pid) { break; } add_pid_watch(kq, root_pid, &mut seen, &mut active); if active.is_empty() { continue; } } let nev = unsafe { libc::kevent( kq, std::ptr::null::<libc::kevent>(), 0, events.as_mut_ptr(), EVENTS_CAP as libc::c_int, std::ptr::null(), ) }; if nev < 0 { let err = std::io::Error::last_os_error(); if err.kind() == std::io::ErrorKind::Interrupted { continue; } break; } if nev == 0 { continue; } for ev in events.iter().take(nev as usize) { let pid = ev.ident as i32; if ev.filter == libc::EVFILT_USER && ev.ident == STOP_IDENT { stop_requested = true; break; } if (ev.flags & libc::EV_ERROR) != 0 { if ev.data == libc::ESRCH as isize { active.remove(&pid); } continue; } if (ev.fflags & libc::NOTE_FORK) != 0 { watch_children(kq, pid, &mut seen, &mut active); } if (ev.fflags & libc::NOTE_EXIT) != 0 { active.remove(&pid); } } if stop_requested { break; } } let _ = unsafe { libc::close(kq) }; seen } #[cfg(test)] mod tests { use super::*; use std::process::Command; use std::process::Stdio; use std::time::Duration; #[test] fn pid_is_alive_detects_current_process() { let pid = std::process::id() as i32; assert!(pid_is_alive(pid)); } #[cfg(target_os = "macos")] #[test] fn list_child_pids_includes_spawned_child() { let mut child = Command::new("/bin/sleep") .arg("5") .stdin(Stdio::null()) .spawn() .expect("failed to spawn child process"); let child_pid = child.id() as i32; let parent_pid = std::process::id() as i32; let mut found = false; for _ in 0..100 { if list_child_pids(parent_pid).contains(&child_pid) { found = true; break; } std::thread::sleep(Duration::from_millis(10)); } let _ = child.kill(); let _ = child.wait(); assert!(found, "expected to find child pid {child_pid} in list"); } #[cfg(target_os = "macos")] #[tokio::test] async fn pid_tracker_collects_spawned_children() { let tracker = PidTracker::new(std::process::id() as i32).expect("failed to create tracker"); let mut child = Command::new("/bin/sleep") .arg("0.1") .stdin(Stdio::null()) .spawn() .expect("failed to spawn child process"); let child_pid = child.id() as i32; let parent_pid = std::process::id() as i32; let _ = child.wait(); let seen = tracker.stop().await; assert!( seen.contains(&parent_pid), "expected tracker to include parent pid {parent_pid}" ); assert!( seen.contains(&child_pid), "expected tracker to include child pid {child_pid}" ); } #[cfg(target_os = "macos")] #[tokio::test] async fn pid_tracker_collects_bash_subshell_descendants() { let tracker = PidTracker::new(std::process::id() as i32).expect("failed to create tracker"); let child = Command::new("/bin/bash") .arg("-c") .arg("(sleep 0.1 & echo $!; wait)") .stdin(Stdio::null()) .stdout(Stdio::piped()) .stderr(Stdio::null()) .spawn() .expect("failed to spawn bash"); let output = child.wait_with_output().unwrap().stdout; let subshell_pid = String::from_utf8_lossy(&output) .trim() .parse::<i32>() .expect("failed to parse subshell pid"); let seen = tracker.stop().await; assert!( seen.contains(&subshell_pid), "expected tracker to include subshell pid {subshell_pid}" ); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false