repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/send_message.rs
codex-rs/app-server/tests/suite/send_message.rs
use anyhow::Result; use app_test_support::McpProcess; use app_test_support::create_final_assistant_message_sse_response; use app_test_support::create_mock_chat_completions_server; use app_test_support::to_response; use codex_app_server_protocol::AddConversationListenerParams; use codex_app_server_protocol::AddConversationSubscriptionResponse; use codex_app_server_protocol::InputItem; use codex_app_server_protocol::JSONRPCNotification; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::NewConversationParams; use codex_app_server_protocol::NewConversationResponse; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::SendUserMessageParams; use codex_app_server_protocol::SendUserMessageResponse; use codex_protocol::ConversationId; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseItem; use codex_protocol::protocol::RawResponseItemEvent; use pretty_assertions::assert_eq; use std::path::Path; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); #[tokio::test] async fn test_send_message_success() -> Result<()> { // Spin up a mock completions server that immediately ends the Codex turn. // Two Codex turns hit the mock model (session start + send-user-message). Provide two SSE responses. let responses = vec![ create_final_assistant_message_sse_response("Done")?, create_final_assistant_message_sse_response("Done")?, ]; let server = create_mock_chat_completions_server(responses).await; // Create a temporary Codex home with config pointing at the mock server. let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri())?; // Start MCP server process and initialize. let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; // Start a conversation using the new wire API. let new_conv_id = mcp .send_new_conversation_request(NewConversationParams { ..Default::default() }) .await?; let new_conv_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)), ) .await??; let NewConversationResponse { conversation_id, .. } = to_response::<_>(new_conv_resp)?; // 2) addConversationListener let add_listener_id = mcp .send_add_conversation_listener_request(AddConversationListenerParams { conversation_id, experimental_raw_events: false, }) .await?; let add_listener_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)), ) .await??; let AddConversationSubscriptionResponse { subscription_id: _ } = to_response::<_>(add_listener_resp)?; // Now exercise sendUserMessage twice. send_message("Hello", conversation_id, &mut mcp).await?; send_message("Hello again", conversation_id, &mut mcp).await?; Ok(()) } #[expect(clippy::expect_used)] async fn send_message( message: &str, conversation_id: ConversationId, mcp: &mut McpProcess, ) -> Result<()> { // Now exercise sendUserMessage. let send_id = mcp .send_send_user_message_request(SendUserMessageParams { conversation_id, items: vec![InputItem::Text { text: message.to_string(), }], }) .await?; let response: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(send_id)), ) .await??; let _ok: SendUserMessageResponse = to_response::<SendUserMessageResponse>(response)?; // Verify the task_finished notification is received. // Note this also ensures that the final request to the server was made. let task_finished_notification: JSONRPCNotification = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("codex/event/task_complete"), ) .await??; let serde_json::Value::Object(map) = task_finished_notification .params .expect("notification should have params") else { panic!("task_finished_notification should have params"); }; assert_eq!( map.get("conversationId") .expect("should have conversationId"), &serde_json::Value::String(conversation_id.to_string()) ); let raw_attempt = tokio::time::timeout( std::time::Duration::from_millis(200), mcp.read_stream_until_notification_message("codex/event/raw_response_item"), ) .await; assert!( raw_attempt.is_err(), "unexpected raw item notification when not opted in" ); Ok(()) } #[tokio::test] async fn test_send_message_raw_notifications_opt_in() -> Result<()> { let responses = vec![create_final_assistant_message_sse_response("Done")?]; let server = create_mock_chat_completions_server(responses).await; let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let new_conv_id = mcp .send_new_conversation_request(NewConversationParams { developer_instructions: Some("Use the test harness tools.".to_string()), ..Default::default() }) .await?; let new_conv_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)), ) .await??; let NewConversationResponse { conversation_id, .. } = to_response::<_>(new_conv_resp)?; let add_listener_id = mcp .send_add_conversation_listener_request(AddConversationListenerParams { conversation_id, experimental_raw_events: true, }) .await?; let add_listener_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)), ) .await??; let AddConversationSubscriptionResponse { subscription_id: _ } = to_response::<_>(add_listener_resp)?; let send_id = mcp .send_send_user_message_request(SendUserMessageParams { conversation_id, items: vec![InputItem::Text { text: "Hello".to_string(), }], }) .await?; let developer = read_raw_response_item(&mut mcp, conversation_id).await; assert_developer_message(&developer, "Use the test harness tools."); let instructions = read_raw_response_item(&mut mcp, conversation_id).await; assert_instructions_message(&instructions); let environment = read_raw_response_item(&mut mcp, conversation_id).await; assert_environment_message(&environment); let response: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(send_id)), ) .await??; let _ok: SendUserMessageResponse = to_response::<SendUserMessageResponse>(response)?; let user_message = read_raw_response_item(&mut mcp, conversation_id).await; assert_user_message(&user_message, "Hello"); let assistant_message = read_raw_response_item(&mut mcp, conversation_id).await; assert_assistant_message(&assistant_message, "Done"); let _ = tokio::time::timeout( std::time::Duration::from_millis(250), mcp.read_stream_until_notification_message("codex/event/task_complete"), ) .await; Ok(()) } #[tokio::test] async fn test_send_message_session_not_found() -> Result<()> { // Start MCP without creating a Codex session let codex_home = TempDir::new()?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let unknown = ConversationId::new(); let req_id = mcp .send_send_user_message_request(SendUserMessageParams { conversation_id: unknown, items: vec![InputItem::Text { text: "ping".to_string(), }], }) .await?; // Expect an error response for unknown conversation. let err = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_error_message(RequestId::Integer(req_id)), ) .await??; assert_eq!(err.id, RequestId::Integer(req_id)); Ok(()) } // --------------------------------------------------------------------------- // Helpers // --------------------------------------------------------------------------- fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); std::fs::write( config_toml, format!( r#" model = "mock-model" approval_policy = "never" sandbox_mode = "danger-full-access" model_provider = "mock_provider" [model_providers.mock_provider] name = "Mock provider for test" base_url = "{server_uri}/v1" wire_api = "chat" request_max_retries = 0 stream_max_retries = 0 "# ), ) } #[expect(clippy::expect_used)] async fn read_raw_response_item( mcp: &mut McpProcess, conversation_id: ConversationId, ) -> ResponseItem { loop { let raw_notification: JSONRPCNotification = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("codex/event/raw_response_item"), ) .await .expect("codex/event/raw_response_item notification timeout") .expect("codex/event/raw_response_item notification resp"); let serde_json::Value::Object(params) = raw_notification .params .expect("codex/event/raw_response_item should have params") else { panic!("codex/event/raw_response_item should have params"); }; let conversation_id_value = params .get("conversationId") .and_then(|value| value.as_str()) .expect("raw response item should include conversationId"); assert_eq!( conversation_id_value, conversation_id.to_string(), "raw response item conversation mismatch" ); let msg_value = params .get("msg") .cloned() .expect("raw response item should include msg payload"); // Ghost snapshots are produced concurrently and may arrive before the model reply. let event: RawResponseItemEvent = serde_json::from_value(msg_value).expect("deserialize raw response item"); if !matches!(event.item, ResponseItem::GhostSnapshot { .. }) { return event.item; } } } fn assert_instructions_message(item: &ResponseItem) { match item { ResponseItem::Message { role, content, .. } => { assert_eq!(role, "user"); let texts = content_texts(content); let is_instructions = texts .iter() .any(|text| text.starts_with("# AGENTS.md instructions for ")); assert!( is_instructions, "expected instructions message, got {texts:?}" ); } other => panic!("expected instructions message, got {other:?}"), } } fn assert_developer_message(item: &ResponseItem, expected_text: &str) { match item { ResponseItem::Message { role, content, .. } => { assert_eq!(role, "developer"); let texts = content_texts(content); assert_eq!( texts, vec![expected_text], "expected developer instructions message, got {texts:?}" ); } other => panic!("expected developer instructions message, got {other:?}"), } } fn assert_environment_message(item: &ResponseItem) { match item { ResponseItem::Message { role, content, .. } => { assert_eq!(role, "user"); let texts = content_texts(content); assert!( texts .iter() .any(|text| text.contains("<environment_context>")), "expected environment context message, got {texts:?}" ); } other => panic!("expected environment message, got {other:?}"), } } fn assert_user_message(item: &ResponseItem, expected_text: &str) { match item { ResponseItem::Message { role, content, .. } => { assert_eq!(role, "user"); let texts = content_texts(content); assert_eq!(texts, vec![expected_text]); } other => panic!("expected user message, got {other:?}"), } } fn assert_assistant_message(item: &ResponseItem, expected_text: &str) { match item { ResponseItem::Message { role, content, .. } => { assert_eq!(role, "assistant"); let texts = content_texts(content); assert_eq!(texts, vec![expected_text]); } other => panic!("expected assistant message, got {other:?}"), } } fn content_texts(content: &[ContentItem]) -> Vec<&str> { content .iter() .filter_map(|item| match item { ContentItem::InputText { text } | ContentItem::OutputText { text } => { Some(text.as_str()) } _ => None, }) .collect() }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/codex_message_processor_flow.rs
codex-rs/app-server/tests/suite/codex_message_processor_flow.rs
use anyhow::Result; use app_test_support::McpProcess; use app_test_support::create_final_assistant_message_sse_response; use app_test_support::create_mock_chat_completions_server; use app_test_support::create_shell_command_sse_response; use app_test_support::format_with_current_shell; use app_test_support::to_response; use codex_app_server_protocol::AddConversationListenerParams; use codex_app_server_protocol::AddConversationSubscriptionResponse; use codex_app_server_protocol::ExecCommandApprovalParams; use codex_app_server_protocol::InputItem; use codex_app_server_protocol::JSONRPCNotification; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::NewConversationParams; use codex_app_server_protocol::NewConversationResponse; use codex_app_server_protocol::RemoveConversationListenerParams; use codex_app_server_protocol::RemoveConversationSubscriptionResponse; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::SendUserMessageParams; use codex_app_server_protocol::SendUserMessageResponse; use codex_app_server_protocol::SendUserTurnParams; use codex_app_server_protocol::SendUserTurnResponse; use codex_app_server_protocol::ServerRequest; use codex_core::protocol::AskForApproval; use codex_core::protocol::SandboxPolicy; use codex_core::protocol_config_types::ReasoningSummary; use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR; use codex_protocol::config_types::SandboxMode; use codex_protocol::openai_models::ReasoningEffort; use codex_protocol::parse_command::ParsedCommand; use codex_protocol::protocol::Event; use codex_protocol::protocol::EventMsg; use pretty_assertions::assert_eq; use std::env; use std::path::Path; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn test_codex_jsonrpc_conversation_flow() -> Result<()> { if env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() { println!( "Skipping test because it cannot execute when network is disabled in a Codex sandbox." ); return Ok(()); } let tmp = TempDir::new()?; // Temporary Codex home with config pointing at the mock server. let codex_home = tmp.path().join("codex_home"); std::fs::create_dir(&codex_home)?; let working_directory = tmp.path().join("workdir"); std::fs::create_dir(&working_directory)?; // Create a mock model server that immediately ends each turn. // Two turns are expected: initial session configure + one user message. let responses = vec![ create_shell_command_sse_response( vec!["ls".to_string()], Some(&working_directory), Some(5000), "call1234", )?, create_final_assistant_message_sse_response("Enjoy your new git repo!")?, ]; let server = create_mock_chat_completions_server(responses).await; create_config_toml(&codex_home, &server.uri())?; // Start MCP server and initialize. let mut mcp = McpProcess::new(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; // 1) newConversation let new_conv_id = mcp .send_new_conversation_request(NewConversationParams { cwd: Some(working_directory.to_string_lossy().into_owned()), ..Default::default() }) .await?; let new_conv_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)), ) .await??; let new_conv_resp = to_response::<NewConversationResponse>(new_conv_resp)?; let NewConversationResponse { conversation_id, model, reasoning_effort: _, rollout_path: _, } = new_conv_resp; assert_eq!(model, "mock-model"); // 2) addConversationListener let add_listener_id = mcp .send_add_conversation_listener_request(AddConversationListenerParams { conversation_id, experimental_raw_events: false, }) .await?; let add_listener_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)), ) .await??; let AddConversationSubscriptionResponse { subscription_id } = to_response::<AddConversationSubscriptionResponse>(add_listener_resp)?; // 3) sendUserMessage (should trigger notifications; we only validate an OK response) let send_user_id = mcp .send_send_user_message_request(SendUserMessageParams { conversation_id, items: vec![codex_app_server_protocol::InputItem::Text { text: "text".to_string(), }], }) .await?; let send_user_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(send_user_id)), ) .await??; let SendUserMessageResponse {} = to_response::<SendUserMessageResponse>(send_user_resp)?; // Verify the task_finished notification is received. // Note this also ensures that the final request to the server was made. let task_finished_notification: JSONRPCNotification = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("codex/event/task_complete"), ) .await??; let serde_json::Value::Object(map) = task_finished_notification .params .expect("notification should have params") else { panic!("task_finished_notification should have params"); }; assert_eq!( map.get("conversationId") .expect("should have conversationId"), &serde_json::Value::String(conversation_id.to_string()) ); // 4) removeConversationListener let remove_listener_id = mcp .send_remove_conversation_listener_request(RemoveConversationListenerParams { subscription_id, }) .await?; let remove_listener_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(remove_listener_id)), ) .await??; let RemoveConversationSubscriptionResponse {} = to_response(remove_listener_resp)?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn test_send_user_turn_changes_approval_policy_behavior() -> Result<()> { if env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() { println!( "Skipping test because it cannot execute when network is disabled in a Codex sandbox." ); return Ok(()); } let tmp = TempDir::new()?; let codex_home = tmp.path().join("codex_home"); std::fs::create_dir(&codex_home)?; let working_directory = tmp.path().join("workdir"); std::fs::create_dir(&working_directory)?; // Mock server will request a python shell call for the first and second turn, then finish. let responses = vec![ create_shell_command_sse_response( vec![ "python3".to_string(), "-c".to_string(), "print(42)".to_string(), ], Some(&working_directory), Some(5000), "call1", )?, create_final_assistant_message_sse_response("done 1")?, create_shell_command_sse_response( vec![ "python3".to_string(), "-c".to_string(), "print(42)".to_string(), ], Some(&working_directory), Some(5000), "call2", )?, create_final_assistant_message_sse_response("done 2")?, ]; let server = create_mock_chat_completions_server(responses).await; create_config_toml(&codex_home, &server.uri())?; // Start MCP server and initialize. let mut mcp = McpProcess::new(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; // 1) Start conversation with approval_policy=untrusted let new_conv_id = mcp .send_new_conversation_request(NewConversationParams { cwd: Some(working_directory.to_string_lossy().into_owned()), ..Default::default() }) .await?; let new_conv_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)), ) .await??; let NewConversationResponse { conversation_id, .. } = to_response::<NewConversationResponse>(new_conv_resp)?; // 2) addConversationListener let add_listener_id = mcp .send_add_conversation_listener_request(AddConversationListenerParams { conversation_id, experimental_raw_events: false, }) .await?; let _: AddConversationSubscriptionResponse = to_response::<AddConversationSubscriptionResponse>( timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)), ) .await??, )?; // 3) sendUserMessage triggers a shell call; approval policy is Untrusted so we should get an elicitation let send_user_id = mcp .send_send_user_message_request(SendUserMessageParams { conversation_id, items: vec![codex_app_server_protocol::InputItem::Text { text: "run python".to_string(), }], }) .await?; let _send_user_resp: SendUserMessageResponse = to_response::<SendUserMessageResponse>( timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(send_user_id)), ) .await??, )?; // Expect an ExecCommandApproval request (elicitation) let request = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_request_message(), ) .await??; let ServerRequest::ExecCommandApproval { request_id, params } = request else { panic!("expected ExecCommandApproval request, got: {request:?}"); }; assert_eq!( ExecCommandApprovalParams { conversation_id, call_id: "call1".to_string(), command: format_with_current_shell("python3 -c 'print(42)'"), cwd: working_directory.clone(), reason: None, parsed_cmd: vec![ParsedCommand::Unknown { cmd: "python3 -c 'print(42)'".to_string() }], }, params ); // Approve so the first turn can complete mcp.send_response( request_id, serde_json::json!({ "decision": codex_core::protocol::ReviewDecision::Approved }), ) .await?; // Wait for first TaskComplete let _ = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("codex/event/task_complete"), ) .await??; // 4) sendUserTurn with approval_policy=never should run without elicitation let send_turn_id = mcp .send_send_user_turn_request(SendUserTurnParams { conversation_id, items: vec![codex_app_server_protocol::InputItem::Text { text: "run python again".to_string(), }], cwd: working_directory.clone(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::new_read_only_policy(), model: "mock-model".to_string(), effort: Some(ReasoningEffort::Medium), summary: ReasoningSummary::Auto, }) .await?; // Acknowledge sendUserTurn let _send_turn_resp: SendUserTurnResponse = to_response::<SendUserTurnResponse>( timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(send_turn_id)), ) .await??, )?; // Ensure we do NOT receive an ExecCommandApproval request before the task completes. // If any Request is seen while waiting for task_complete, the helper will error and the test fails. let _ = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("codex/event/task_complete"), ) .await??; Ok(()) } // Helper: minimal config.toml pointing at mock provider. #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn test_send_user_turn_updates_sandbox_and_cwd_between_turns() -> Result<()> { if env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() { println!( "Skipping test because it cannot execute when network is disabled in a Codex sandbox." ); return Ok(()); } let tmp = TempDir::new()?; let codex_home = tmp.path().join("codex_home"); std::fs::create_dir(&codex_home)?; let workspace_root = tmp.path().join("workspace"); std::fs::create_dir(&workspace_root)?; let first_cwd = workspace_root.join("turn1"); let second_cwd = workspace_root.join("turn2"); std::fs::create_dir(&first_cwd)?; std::fs::create_dir(&second_cwd)?; let responses = vec![ create_shell_command_sse_response( vec!["echo".to_string(), "first".to_string(), "turn".to_string()], None, Some(5000), "call-first", )?, create_final_assistant_message_sse_response("done first")?, create_shell_command_sse_response( vec!["echo".to_string(), "second".to_string(), "turn".to_string()], None, Some(5000), "call-second", )?, create_final_assistant_message_sse_response("done second")?, ]; let server = create_mock_chat_completions_server(responses).await; create_config_toml(&codex_home, &server.uri())?; let mut mcp = McpProcess::new(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let new_conv_id = mcp .send_new_conversation_request(NewConversationParams { cwd: Some(first_cwd.to_string_lossy().into_owned()), approval_policy: Some(AskForApproval::Never), sandbox: Some(SandboxMode::WorkspaceWrite), ..Default::default() }) .await?; let new_conv_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)), ) .await??; let NewConversationResponse { conversation_id, model, .. } = to_response::<NewConversationResponse>(new_conv_resp)?; let add_listener_id = mcp .send_add_conversation_listener_request(AddConversationListenerParams { conversation_id, experimental_raw_events: false, }) .await?; timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)), ) .await??; let first_turn_id = mcp .send_send_user_turn_request(SendUserTurnParams { conversation_id, items: vec![InputItem::Text { text: "first turn".to_string(), }], cwd: first_cwd.clone(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::WorkspaceWrite { writable_roots: vec![first_cwd.try_into()?], network_access: false, exclude_tmpdir_env_var: false, exclude_slash_tmp: false, }, model: model.clone(), effort: Some(ReasoningEffort::Medium), summary: ReasoningSummary::Auto, }) .await?; timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(first_turn_id)), ) .await??; timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("codex/event/task_complete"), ) .await??; let second_turn_id = mcp .send_send_user_turn_request(SendUserTurnParams { conversation_id, items: vec![InputItem::Text { text: "second turn".to_string(), }], cwd: second_cwd.clone(), approval_policy: AskForApproval::Never, sandbox_policy: SandboxPolicy::DangerFullAccess, model: model.clone(), effort: Some(ReasoningEffort::Medium), summary: ReasoningSummary::Auto, }) .await?; timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(second_turn_id)), ) .await??; let exec_begin_notification = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("codex/event/exec_command_begin"), ) .await??; let params = exec_begin_notification .params .clone() .expect("exec_command_begin params"); let event: Event = serde_json::from_value(params).expect("deserialize exec begin event"); let exec_begin = match event.msg { EventMsg::ExecCommandBegin(exec_begin) => exec_begin, other => panic!("expected ExecCommandBegin event, got {other:?}"), }; assert_eq!( exec_begin.cwd, second_cwd, "exec turn should run from updated cwd" ); let expected_command = format_with_current_shell("echo second turn"); assert_eq!( exec_begin.command, expected_command, "exec turn should run expected command" ); timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("codex/event/task_complete"), ) .await??; Ok(()) } fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); std::fs::write( config_toml, format!( r#" model = "mock-model" approval_policy = "untrusted" model_provider = "mock_provider" [model_providers.mock_provider] name = "Mock provider for test" base_url = "{server_uri}/v1" wire_api = "chat" request_max_retries = 0 stream_max_retries = 0 "# ), ) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/create_conversation.rs
codex-rs/app-server/tests/suite/create_conversation.rs
use anyhow::Result; use app_test_support::McpProcess; use app_test_support::create_final_assistant_message_sse_response; use app_test_support::create_mock_chat_completions_server; use app_test_support::to_response; use codex_app_server_protocol::AddConversationListenerParams; use codex_app_server_protocol::AddConversationSubscriptionResponse; use codex_app_server_protocol::InputItem; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::NewConversationParams; use codex_app_server_protocol::NewConversationResponse; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::SendUserMessageParams; use codex_app_server_protocol::SendUserMessageResponse; use pretty_assertions::assert_eq; use serde_json::json; use std::path::Path; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_conversation_create_and_send_message_ok() -> Result<()> { // Mock server – we won't strictly rely on it, but provide one to satisfy any model wiring. let responses = vec![create_final_assistant_message_sse_response("Done")?]; let server = create_mock_chat_completions_server(responses).await; // Temporary Codex home with config pointing at the mock server. let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri())?; // Start MCP server process and initialize. let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; // Create a conversation via the new JSON-RPC API. let new_conv_id = mcp .send_new_conversation_request(NewConversationParams { model: Some("o3".to_string()), ..Default::default() }) .await?; let new_conv_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)), ) .await??; let NewConversationResponse { conversation_id, model, reasoning_effort: _, rollout_path: _, } = to_response::<NewConversationResponse>(new_conv_resp)?; assert_eq!(model, "o3"); // Add a listener so we receive notifications for this conversation (not strictly required for this test). let add_listener_id = mcp .send_add_conversation_listener_request(AddConversationListenerParams { conversation_id, experimental_raw_events: false, }) .await?; let _sub: AddConversationSubscriptionResponse = to_response::<AddConversationSubscriptionResponse>( timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)), ) .await??, )?; // Now send a user message via the wire API and expect an OK (empty object) result. let send_id = mcp .send_send_user_message_request(SendUserMessageParams { conversation_id, items: vec![InputItem::Text { text: "Hello".to_string(), }], }) .await?; let send_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(send_id)), ) .await??; let _ok: SendUserMessageResponse = to_response::<SendUserMessageResponse>(send_resp)?; // avoid race condition by waiting for the mock server to receive the chat.completions request let deadline = std::time::Instant::now() + DEFAULT_READ_TIMEOUT; let requests = loop { let requests = server.received_requests().await.unwrap_or_default(); if !requests.is_empty() { break requests; } if std::time::Instant::now() >= deadline { panic!("mock server did not receive the chat.completions request in time"); } tokio::time::sleep(std::time::Duration::from_millis(10)).await; }; // Verify the outbound request body matches expectations for Chat Completions. let request = requests .first() .expect("mock server should have received at least one request"); let body = request.body_json::<serde_json::Value>()?; assert_eq!(body["model"], json!("o3")); assert!(body["stream"].as_bool().unwrap_or(false)); let messages = body["messages"] .as_array() .expect("messages should be array"); let last = messages.last().expect("at least one message"); assert_eq!(last["role"], json!("user")); assert_eq!(last["content"], json!("Hello")); drop(server); Ok(()) } // Helper to create a config.toml pointing at the mock model server. fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); std::fs::write( config_toml, format!( r#" model = "mock-model" approval_policy = "never" sandbox_mode = "danger-full-access" model_provider = "mock_provider" [model_providers.mock_provider] name = "Mock provider for test" base_url = "{server_uri}/v1" wire_api = "chat" request_max_retries = 0 stream_max_retries = 0 "# ), ) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/archive_conversation.rs
codex-rs/app-server/tests/suite/archive_conversation.rs
use anyhow::Result; use app_test_support::McpProcess; use app_test_support::to_response; use codex_app_server_protocol::ArchiveConversationParams; use codex_app_server_protocol::ArchiveConversationResponse; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::NewConversationParams; use codex_app_server_protocol::NewConversationResponse; use codex_app_server_protocol::RequestId; use codex_core::ARCHIVED_SESSIONS_SUBDIR; use std::path::Path; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(20); #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn archive_conversation_moves_rollout_into_archived_directory() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml(codex_home.path())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let new_request_id = mcp .send_new_conversation_request(NewConversationParams { model: Some("mock-model".to_string()), ..Default::default() }) .await?; let new_response: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(new_request_id)), ) .await??; let NewConversationResponse { conversation_id, rollout_path, .. } = to_response::<NewConversationResponse>(new_response)?; assert!( rollout_path.exists(), "expected rollout path {} to exist", rollout_path.display() ); let archive_request_id = mcp .send_archive_conversation_request(ArchiveConversationParams { conversation_id, rollout_path: rollout_path.clone(), }) .await?; let archive_response: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(archive_request_id)), ) .await??; let _: ArchiveConversationResponse = to_response::<ArchiveConversationResponse>(archive_response)?; let archived_directory = codex_home.path().join(ARCHIVED_SESSIONS_SUBDIR); let archived_rollout_path = archived_directory.join(rollout_path.file_name().unwrap_or_else(|| { panic!("rollout path {} missing file name", rollout_path.display()) })); assert!( !rollout_path.exists(), "expected rollout path {} to be moved", rollout_path.display() ); assert!( archived_rollout_path.exists(), "expected archived rollout path {} to exist", archived_rollout_path.display() ); Ok(()) } fn create_config_toml(codex_home: &Path) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); std::fs::write(config_toml, config_contents()) } fn config_contents() -> &'static str { r#"model = "mock-model" approval_policy = "never" sandbox_mode = "read-only" "# }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/user_info.rs
codex-rs/app-server/tests/suite/user_info.rs
use anyhow::Result; use app_test_support::ChatGptAuthFixture; use app_test_support::McpProcess; use app_test_support::to_response; use app_test_support::write_chatgpt_auth; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::UserInfoResponse; use codex_core::auth::AuthCredentialsStoreMode; use pretty_assertions::assert_eq; use std::time::Duration; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(10); #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn user_info_returns_email_from_auth_json() -> Result<()> { let codex_home = TempDir::new()?; write_chatgpt_auth( codex_home.path(), ChatGptAuthFixture::new("access") .refresh_token("refresh") .email("user@example.com"), AuthCredentialsStoreMode::File, )?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let request_id = mcp.send_user_info_request().await?; let response: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let received: UserInfoResponse = to_response(response)?; let expected = UserInfoResponse { alleged_user_email: Some("user@example.com".to_string()), }; assert_eq!(received, expected); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/login.rs
codex-rs/app-server/tests/suite/login.rs
use anyhow::Result; use app_test_support::McpProcess; use app_test_support::to_response; use codex_app_server_protocol::GetAuthStatusParams; use codex_app_server_protocol::GetAuthStatusResponse; use codex_app_server_protocol::JSONRPCError; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::LoginChatGptResponse; use codex_app_server_protocol::LogoutChatGptResponse; use codex_app_server_protocol::RequestId; use codex_core::auth::AuthCredentialsStoreMode; use codex_login::login_with_api_key; use serial_test::serial; use std::path::Path; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); // Helper to create a config.toml; mirrors create_conversation.rs fn create_config_toml(codex_home: &Path) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); std::fs::write( config_toml, r#" model = "mock-model" approval_policy = "never" sandbox_mode = "danger-full-access" model_provider = "mock_provider" [model_providers.mock_provider] name = "Mock provider for test" base_url = "http://127.0.0.1:0/v1" wire_api = "chat" request_max_retries = 0 stream_max_retries = 0 "#, ) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn logout_chatgpt_removes_auth() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml(codex_home.path())?; login_with_api_key( codex_home.path(), "sk-test-key", AuthCredentialsStoreMode::File, )?; assert!(codex_home.path().join("auth.json").exists()); let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let id = mcp.send_logout_chat_gpt_request().await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(id)), ) .await??; let _ok: LogoutChatGptResponse = to_response(resp)?; assert!( !codex_home.path().join("auth.json").exists(), "auth.json should be deleted" ); // Verify status reflects signed-out state. let status_id = mcp .send_get_auth_status_request(GetAuthStatusParams { include_token: Some(true), refresh_token: Some(false), }) .await?; let status_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(status_id)), ) .await??; let status: GetAuthStatusResponse = to_response(status_resp)?; assert_eq!(status.auth_method, None); assert_eq!(status.auth_token, None); Ok(()) } fn create_config_toml_forced_login(codex_home: &Path, forced_method: &str) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); let contents = format!( r#" model = "mock-model" approval_policy = "never" sandbox_mode = "danger-full-access" forced_login_method = "{forced_method}" "# ); std::fs::write(config_toml, contents) } fn create_config_toml_forced_workspace( codex_home: &Path, workspace_id: &str, ) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); let contents = format!( r#" model = "mock-model" approval_policy = "never" sandbox_mode = "danger-full-access" forced_chatgpt_workspace_id = "{workspace_id}" "# ); std::fs::write(config_toml, contents) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn login_chatgpt_rejected_when_forced_api() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml_forced_login(codex_home.path(), "api")?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let request_id = mcp.send_login_chat_gpt_request().await?; let err: JSONRPCError = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_error_message(RequestId::Integer(request_id)), ) .await??; assert_eq!( err.error.message, "ChatGPT login is disabled. Use API key login instead." ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] // Serialize tests that launch the login server since it binds to a fixed port. #[serial(login_port)] async fn login_chatgpt_includes_forced_workspace_query_param() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml_forced_workspace(codex_home.path(), "ws-forced")?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let request_id = mcp.send_login_chat_gpt_request().await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let login: LoginChatGptResponse = to_response(resp)?; assert!( login.auth_url.contains("allowed_workspace_id=ws-forced"), "auth URL should include forced workspace" ); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/user_agent.rs
codex-rs/app-server/tests/suite/user_agent.rs
use anyhow::Result; use app_test_support::McpProcess; use app_test_support::to_response; use codex_app_server_protocol::GetUserAgentResponse; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::RequestId; use pretty_assertions::assert_eq; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_user_agent_returns_current_codex_user_agent() -> Result<()> { let codex_home = TempDir::new()?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let request_id = mcp.send_get_user_agent_request().await?; let response: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let os_info = os_info::get(); let originator = codex_core::default_client::originator().value.as_str(); let os_type = os_info.os_type(); let os_version = os_info.version(); let architecture = os_info.architecture().unwrap_or("unknown"); let terminal_ua = codex_core::terminal::user_agent(); let user_agent = format!( "{originator}/0.0.0 ({os_type} {os_version}; {architecture}) {terminal_ua} (codex-app-server-tests; 0.1.0)" ); let received: GetUserAgentResponse = to_response(response)?; let expected = GetUserAgentResponse { user_agent }; assert_eq!(received, expected); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/list_resume.rs
codex-rs/app-server/tests/suite/list_resume.rs
use anyhow::Result; use app_test_support::McpProcess; use app_test_support::create_fake_rollout; use app_test_support::to_response; use codex_app_server_protocol::JSONRPCNotification; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::ListConversationsParams; use codex_app_server_protocol::ListConversationsResponse; use codex_app_server_protocol::NewConversationParams; // reused for overrides shape use codex_app_server_protocol::RequestId; use codex_app_server_protocol::ResumeConversationParams; use codex_app_server_protocol::ResumeConversationResponse; use codex_app_server_protocol::ServerNotification; use codex_app_server_protocol::SessionConfiguredNotification; use codex_core::protocol::EventMsg; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseItem; use pretty_assertions::assert_eq; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_list_and_resume_conversations() -> Result<()> { // Prepare a temporary CODEX_HOME with a few fake rollout files. let codex_home = TempDir::new()?; create_fake_rollout( codex_home.path(), "2025-01-02T12-00-00", "2025-01-02T12:00:00Z", "Hello A", Some("openai"), None, )?; create_fake_rollout( codex_home.path(), "2025-01-01T13-00-00", "2025-01-01T13:00:00Z", "Hello B", Some("openai"), None, )?; create_fake_rollout( codex_home.path(), "2025-01-01T12-00-00", "2025-01-01T12:00:00Z", "Hello C", None, None, )?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; // Request first page with size 2 let req_id = mcp .send_list_conversations_request(ListConversationsParams { page_size: Some(2), cursor: None, model_providers: None, }) .await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(req_id)), ) .await??; let ListConversationsResponse { items, next_cursor } = to_response::<ListConversationsResponse>(resp)?; assert_eq!(items.len(), 2); // Newest first; preview text should match assert_eq!(items[0].preview, "Hello A"); assert_eq!(items[1].preview, "Hello B"); assert_eq!(items[0].model_provider, "openai"); assert_eq!(items[1].model_provider, "openai"); assert!(items[0].path.is_absolute()); assert!(next_cursor.is_some()); // Request the next page using the cursor let req_id2 = mcp .send_list_conversations_request(ListConversationsParams { page_size: Some(2), cursor: next_cursor, model_providers: None, }) .await?; let resp2: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(req_id2)), ) .await??; let ListConversationsResponse { items: items2, next_cursor: next2, .. } = to_response::<ListConversationsResponse>(resp2)?; assert_eq!(items2.len(), 1); assert_eq!(items2[0].preview, "Hello C"); assert_eq!(items2[0].model_provider, "openai"); assert_eq!(next2, None); // Add a conversation with an explicit non-OpenAI provider for filter tests. create_fake_rollout( codex_home.path(), "2025-01-01T11-30-00", "2025-01-01T11:30:00Z", "Hello TP", Some("test-provider"), None, )?; // Filtering by model provider should return only matching sessions. let filter_req_id = mcp .send_list_conversations_request(ListConversationsParams { page_size: Some(10), cursor: None, model_providers: Some(vec!["test-provider".to_string()]), }) .await?; let filter_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(filter_req_id)), ) .await??; let ListConversationsResponse { items: filtered_items, next_cursor: filtered_next, } = to_response::<ListConversationsResponse>(filter_resp)?; assert_eq!(filtered_items.len(), 1); assert_eq!(filtered_next, None); assert_eq!(filtered_items[0].preview, "Hello TP"); assert_eq!(filtered_items[0].model_provider, "test-provider"); // Empty filter should include every session regardless of provider metadata. let unfiltered_req_id = mcp .send_list_conversations_request(ListConversationsParams { page_size: Some(10), cursor: None, model_providers: Some(Vec::new()), }) .await?; let unfiltered_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(unfiltered_req_id)), ) .await??; let ListConversationsResponse { items: unfiltered_items, next_cursor: unfiltered_next, } = to_response::<ListConversationsResponse>(unfiltered_resp)?; assert_eq!(unfiltered_items.len(), 4); assert!(unfiltered_next.is_none()); let empty_req_id = mcp .send_list_conversations_request(ListConversationsParams { page_size: Some(10), cursor: None, model_providers: Some(vec!["other".to_string()]), }) .await?; let empty_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(empty_req_id)), ) .await??; let ListConversationsResponse { items: empty_items, next_cursor: empty_next, } = to_response::<ListConversationsResponse>(empty_resp)?; assert!(empty_items.is_empty()); assert!(empty_next.is_none()); let first_item = &items[0]; // Now resume one of the sessions from an explicit rollout path. let resume_req_id = mcp .send_resume_conversation_request(ResumeConversationParams { path: Some(first_item.path.clone()), conversation_id: None, history: None, overrides: Some(NewConversationParams { model: Some("o3".to_string()), ..Default::default() }), }) .await?; // Expect a codex/event notification with msg.type == sessionConfigured let notification: JSONRPCNotification = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("sessionConfigured"), ) .await??; let session_configured: ServerNotification = notification.try_into()?; let ServerNotification::SessionConfigured(SessionConfiguredNotification { model, rollout_path, initial_messages: session_initial_messages, .. }) = session_configured else { unreachable!("expected sessionConfigured notification"); }; assert_eq!(model, "o3"); assert_eq!(rollout_path, first_item.path.clone()); let session_initial_messages = session_initial_messages .expect("expected initial messages when resuming from rollout path"); match session_initial_messages.as_slice() { [EventMsg::UserMessage(message)] => { assert_eq!(message.message, first_item.preview.clone()); } other => panic!("unexpected initial messages from rollout resume: {other:#?}"), } // Then the response for resumeConversation let resume_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(resume_req_id)), ) .await??; let ResumeConversationResponse { conversation_id, model: resume_model, initial_messages: response_initial_messages, .. } = to_response::<ResumeConversationResponse>(resume_resp)?; // conversation id should be a valid UUID assert!(!conversation_id.to_string().is_empty()); assert_eq!(resume_model, "o3"); let response_initial_messages = response_initial_messages.expect("expected initial messages in resume response"); match response_initial_messages.as_slice() { [EventMsg::UserMessage(message)] => { assert_eq!(message.message, first_item.preview.clone()); } other => panic!("unexpected initial messages in resume response: {other:#?}"), } // Resuming with only a conversation id should locate the rollout automatically. let resume_by_id_req_id = mcp .send_resume_conversation_request(ResumeConversationParams { path: None, conversation_id: Some(first_item.conversation_id), history: None, overrides: Some(NewConversationParams { model: Some("o3".to_string()), ..Default::default() }), }) .await?; let notification: JSONRPCNotification = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("sessionConfigured"), ) .await??; let session_configured: ServerNotification = notification.try_into()?; let ServerNotification::SessionConfigured(SessionConfiguredNotification { model, rollout_path, initial_messages: session_initial_messages, .. }) = session_configured else { unreachable!("expected sessionConfigured notification"); }; assert_eq!(model, "o3"); assert_eq!(rollout_path, first_item.path.clone()); let session_initial_messages = session_initial_messages .expect("expected initial messages when resuming from conversation id"); match session_initial_messages.as_slice() { [EventMsg::UserMessage(message)] => { assert_eq!(message.message, first_item.preview.clone()); } other => panic!("unexpected initial messages from conversation id resume: {other:#?}"), } let resume_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(resume_by_id_req_id)), ) .await??; let ResumeConversationResponse { conversation_id: by_id_conversation_id, model: by_id_model, initial_messages: by_id_initial_messages, .. } = to_response::<ResumeConversationResponse>(resume_resp)?; assert!(!by_id_conversation_id.to_string().is_empty()); assert_eq!(by_id_model, "o3"); let by_id_initial_messages = by_id_initial_messages .expect("expected initial messages when resuming from conversation id response"); match by_id_initial_messages.as_slice() { [EventMsg::UserMessage(message)] => { assert_eq!(message.message, first_item.preview.clone()); } other => { panic!("unexpected initial messages in conversation id resume response: {other:#?}") } } // Resuming with explicit history should succeed even without a stored rollout. let fork_history_text = "Hello from history"; let history = vec![ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: fork_history_text.to_string(), }], }]; let resume_with_history_req_id = mcp .send_resume_conversation_request(ResumeConversationParams { path: None, conversation_id: None, history: Some(history), overrides: Some(NewConversationParams { model: Some("o3".to_string()), ..Default::default() }), }) .await?; let notification: JSONRPCNotification = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("sessionConfigured"), ) .await??; let session_configured: ServerNotification = notification.try_into()?; let ServerNotification::SessionConfigured(SessionConfiguredNotification { model, initial_messages: session_initial_messages, .. }) = session_configured else { unreachable!("expected sessionConfigured notification"); }; assert_eq!(model, "o3"); assert!( session_initial_messages.as_ref().is_none_or(Vec::is_empty), "expected no initial messages when resuming from explicit history but got {session_initial_messages:#?}" ); let resume_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(resume_with_history_req_id)), ) .await??; let ResumeConversationResponse { conversation_id: history_conversation_id, model: history_model, initial_messages: history_initial_messages, .. } = to_response::<ResumeConversationResponse>(resume_resp)?; assert!(!history_conversation_id.to_string().is_empty()); assert_eq!(history_model, "o3"); assert!( history_initial_messages.as_ref().is_none_or(Vec::is_empty), "expected no initial messages in resume response when history is provided but got {history_initial_messages:#?}" ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn list_conversations_fetches_through_filtered_pages() -> Result<()> { let codex_home = TempDir::new()?; // Only the last 3 conversations match the provider filter; request 3 and // ensure pagination keeps fetching past non-matching pages. let cases = [ ( "2025-03-04T12-00-00", "2025-03-04T12:00:00Z", "skip_provider", ), ( "2025-03-03T12-00-00", "2025-03-03T12:00:00Z", "skip_provider", ), ( "2025-03-02T12-00-00", "2025-03-02T12:00:00Z", "target_provider", ), ( "2025-03-01T12-00-00", "2025-03-01T12:00:00Z", "target_provider", ), ( "2025-02-28T12-00-00", "2025-02-28T12:00:00Z", "target_provider", ), ]; for (ts_file, ts_rfc, provider) in cases { create_fake_rollout( codex_home.path(), ts_file, ts_rfc, "Hello", Some(provider), None, )?; } let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let req_id = mcp .send_list_conversations_request(ListConversationsParams { page_size: Some(3), cursor: None, model_providers: Some(vec!["target_provider".to_string()]), }) .await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(req_id)), ) .await??; let ListConversationsResponse { items, next_cursor } = to_response::<ListConversationsResponse>(resp)?; assert_eq!( items.len(), 3, "should fetch across pages to satisfy the limit" ); assert!( items .iter() .all(|item| item.model_provider == "target_provider") ); assert_eq!(next_cursor, None); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/auth.rs
codex-rs/app-server/tests/suite/auth.rs
use anyhow::Result; use app_test_support::McpProcess; use app_test_support::to_response; use codex_app_server_protocol::AuthMode; use codex_app_server_protocol::GetAuthStatusParams; use codex_app_server_protocol::GetAuthStatusResponse; use codex_app_server_protocol::JSONRPCError; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::LoginApiKeyParams; use codex_app_server_protocol::LoginApiKeyResponse; use codex_app_server_protocol::RequestId; use pretty_assertions::assert_eq; use std::path::Path; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); fn create_config_toml_custom_provider( codex_home: &Path, requires_openai_auth: bool, ) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); let requires_line = if requires_openai_auth { "requires_openai_auth = true\n" } else { "" }; let contents = format!( r#" model = "mock-model" approval_policy = "never" sandbox_mode = "danger-full-access" model_provider = "mock_provider" [model_providers.mock_provider] name = "Mock provider for test" base_url = "http://127.0.0.1:0/v1" wire_api = "chat" request_max_retries = 0 stream_max_retries = 0 {requires_line} "# ); std::fs::write(config_toml, contents) } fn create_config_toml(codex_home: &Path) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); std::fs::write( config_toml, r#" model = "mock-model" approval_policy = "never" sandbox_mode = "danger-full-access" "#, ) } fn create_config_toml_forced_login(codex_home: &Path, forced_method: &str) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); let contents = format!( r#" model = "mock-model" approval_policy = "never" sandbox_mode = "danger-full-access" forced_login_method = "{forced_method}" "# ); std::fs::write(config_toml, contents) } async fn login_with_api_key_via_request(mcp: &mut McpProcess, api_key: &str) -> Result<()> { let request_id = mcp .send_login_api_key_request(LoginApiKeyParams { api_key: api_key.to_string(), }) .await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let _: LoginApiKeyResponse = to_response(resp)?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_auth_status_no_auth() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml(codex_home.path())?; let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let request_id = mcp .send_get_auth_status_request(GetAuthStatusParams { include_token: Some(true), refresh_token: Some(false), }) .await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let status: GetAuthStatusResponse = to_response(resp)?; assert_eq!(status.auth_method, None, "expected no auth method"); assert_eq!(status.auth_token, None, "expected no token"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_auth_status_with_api_key() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml(codex_home.path())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; login_with_api_key_via_request(&mut mcp, "sk-test-key").await?; let request_id = mcp .send_get_auth_status_request(GetAuthStatusParams { include_token: Some(true), refresh_token: Some(false), }) .await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let status: GetAuthStatusResponse = to_response(resp)?; assert_eq!(status.auth_method, Some(AuthMode::ApiKey)); assert_eq!(status.auth_token, Some("sk-test-key".to_string())); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_auth_status_with_api_key_when_auth_not_required() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml_custom_provider(codex_home.path(), false)?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; login_with_api_key_via_request(&mut mcp, "sk-test-key").await?; let request_id = mcp .send_get_auth_status_request(GetAuthStatusParams { include_token: Some(true), refresh_token: Some(false), }) .await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let status: GetAuthStatusResponse = to_response(resp)?; assert_eq!(status.auth_method, None, "expected no auth method"); assert_eq!(status.auth_token, None, "expected no token"); assert_eq!( status.requires_openai_auth, Some(false), "requires_openai_auth should be false", ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_auth_status_with_api_key_no_include_token() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml(codex_home.path())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; login_with_api_key_via_request(&mut mcp, "sk-test-key").await?; // Build params via struct so None field is omitted in wire JSON. let params = GetAuthStatusParams { include_token: None, refresh_token: Some(false), }; let request_id = mcp.send_get_auth_status_request(params).await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let status: GetAuthStatusResponse = to_response(resp)?; assert_eq!(status.auth_method, Some(AuthMode::ApiKey)); assert!(status.auth_token.is_none(), "token must be omitted"); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn login_api_key_rejected_when_forced_chatgpt() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml_forced_login(codex_home.path(), "chatgpt")?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let request_id = mcp .send_login_api_key_request(LoginApiKeyParams { api_key: "sk-test-key".to_string(), }) .await?; let err: JSONRPCError = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_error_message(RequestId::Integer(request_id)), ) .await??; assert_eq!( err.error.message, "API key login is disabled. Use ChatGPT login instead." ); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/set_default_model.rs
codex-rs/app-server/tests/suite/set_default_model.rs
use anyhow::Result; use app_test_support::McpProcess; use app_test_support::to_response; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::SetDefaultModelParams; use codex_app_server_protocol::SetDefaultModelResponse; use codex_core::config::ConfigToml; use pretty_assertions::assert_eq; use std::path::Path; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn set_default_model_persists_overrides() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml(codex_home.path())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let params = SetDefaultModelParams { model: Some("gpt-4.1".to_string()), reasoning_effort: None, }; let request_id = mcp.send_set_default_model_request(params).await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let _: SetDefaultModelResponse = to_response(resp)?; let config_path = codex_home.path().join("config.toml"); let config_contents = tokio::fs::read_to_string(&config_path).await?; let config_toml: ConfigToml = toml::from_str(&config_contents)?; assert_eq!( ConfigToml { model: Some("gpt-4.1".to_string()), model_reasoning_effort: None, ..Default::default() }, config_toml, ); Ok(()) } // Helper to create a config.toml; mirrors create_conversation.rs fn create_config_toml(codex_home: &Path) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); std::fs::write( config_toml, r#" model = "gpt-5.1-codex-max" model_reasoning_effort = "medium" "#, ) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/mod.rs
codex-rs/app-server/tests/suite/mod.rs
mod archive_conversation; mod auth; mod codex_message_processor_flow; mod config; mod create_conversation; mod fuzzy_file_search; mod interrupt; mod list_resume; mod login; mod send_message; mod set_default_model; mod user_agent; mod user_info; mod v2;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/interrupt.rs
codex-rs/app-server/tests/suite/interrupt.rs
#![cfg(unix)] // Support code lives in the `app_test_support` crate under tests/common. use std::path::Path; use codex_app_server_protocol::AddConversationListenerParams; use codex_app_server_protocol::InterruptConversationParams; use codex_app_server_protocol::InterruptConversationResponse; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::NewConversationParams; use codex_app_server_protocol::NewConversationResponse; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::SendUserMessageParams; use codex_app_server_protocol::SendUserMessageResponse; use codex_core::protocol::TurnAbortReason; use core_test_support::skip_if_no_network; use tempfile::TempDir; use tokio::time::timeout; use app_test_support::McpProcess; use app_test_support::create_mock_chat_completions_server; use app_test_support::create_shell_command_sse_response; use app_test_support::to_response; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_shell_command_interruption() { skip_if_no_network!(); if let Err(err) = shell_command_interruption().await { panic!("failure: {err}"); } } async fn shell_command_interruption() -> anyhow::Result<()> { // Use a cross-platform blocking command. On Windows plain `sleep` is not guaranteed to exist // (MSYS/GNU coreutils may be absent) and the failure causes the tool call to finish immediately, // which triggers a second model request before the test sends the explicit follow-up. That // prematurely consumes the second mocked SSE response and leads to a third POST (panic: no response for 2). // Powershell Start-Sleep is always available on Windows runners. On Unix we keep using `sleep`. #[cfg(target_os = "windows")] let shell_command = vec![ "powershell".to_string(), "-Command".to_string(), "Start-Sleep -Seconds 10".to_string(), ]; #[cfg(not(target_os = "windows"))] let shell_command = vec!["sleep".to_string(), "10".to_string()]; let tmp = TempDir::new()?; // Temporary Codex home with config pointing at the mock server. let codex_home = tmp.path().join("codex_home"); std::fs::create_dir(&codex_home)?; let working_directory = tmp.path().join("workdir"); std::fs::create_dir(&working_directory)?; // Create mock server with a single SSE response: the long sleep command let server = create_mock_chat_completions_server(vec![create_shell_command_sse_response( shell_command.clone(), Some(&working_directory), Some(10_000), // 10 seconds timeout in ms "call_sleep", )?]) .await; create_config_toml(&codex_home, server.uri())?; // Start MCP server and initialize. let mut mcp = McpProcess::new(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; // 1) newConversation let new_conv_id = mcp .send_new_conversation_request(NewConversationParams { cwd: Some(working_directory.to_string_lossy().into_owned()), ..Default::default() }) .await?; let new_conv_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(new_conv_id)), ) .await??; let new_conv_resp = to_response::<NewConversationResponse>(new_conv_resp)?; let NewConversationResponse { conversation_id, .. } = new_conv_resp; // 2) addConversationListener let add_listener_id = mcp .send_add_conversation_listener_request(AddConversationListenerParams { conversation_id, experimental_raw_events: false, }) .await?; let _add_listener_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(add_listener_id)), ) .await??; // 3) sendUserMessage (should trigger notifications; we only validate an OK response) let send_user_id = mcp .send_send_user_message_request(SendUserMessageParams { conversation_id, items: vec![codex_app_server_protocol::InputItem::Text { text: "run first sleep command".to_string(), }], }) .await?; let send_user_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(send_user_id)), ) .await??; let SendUserMessageResponse {} = to_response::<SendUserMessageResponse>(send_user_resp)?; // Give the command a moment to start tokio::time::sleep(std::time::Duration::from_secs(1)).await; // 4) send interrupt request let interrupt_id = mcp .send_interrupt_conversation_request(InterruptConversationParams { conversation_id }) .await?; let interrupt_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(interrupt_id)), ) .await??; let InterruptConversationResponse { abort_reason } = to_response::<InterruptConversationResponse>(interrupt_resp)?; assert_eq!(TurnAbortReason::Interrupted, abort_reason); Ok(()) } // --------------------------------------------------------------------------- // Helpers // --------------------------------------------------------------------------- fn create_config_toml(codex_home: &Path, server_uri: String) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); std::fs::write( config_toml, format!( r#" model = "mock-model" approval_policy = "never" sandbox_mode = "read-only" model_provider = "mock_provider" [model_providers.mock_provider] name = "Mock provider for test" base_url = "{server_uri}/v1" wire_api = "chat" request_max_retries = 0 stream_max_retries = 0 "# ), ) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/fuzzy_file_search.rs
codex-rs/app-server/tests/suite/fuzzy_file_search.rs
use anyhow::Result; use anyhow::anyhow; use app_test_support::McpProcess; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::RequestId; use pretty_assertions::assert_eq; use serde_json::json; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_fuzzy_file_search_sorts_and_includes_indices() -> Result<()> { // Prepare a temporary Codex home and a separate root with test files. let codex_home = TempDir::new()?; let root = TempDir::new()?; // Create files designed to have deterministic ordering for query "abe". std::fs::write(root.path().join("abc"), "x")?; std::fs::write(root.path().join("abcde"), "x")?; std::fs::write(root.path().join("abexy"), "x")?; std::fs::write(root.path().join("zzz.txt"), "x")?; let sub_dir = root.path().join("sub"); std::fs::create_dir_all(&sub_dir)?; let sub_abce_path = sub_dir.join("abce"); std::fs::write(&sub_abce_path, "x")?; let sub_abce_rel = sub_abce_path .strip_prefix(root.path())? .to_string_lossy() .to_string(); // Start MCP server and initialize. let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let root_path = root.path().to_string_lossy().to_string(); // Send fuzzyFileSearch request. let request_id = mcp .send_fuzzy_file_search_request("abe", vec![root_path.clone()], None) .await?; // Read response and verify shape and ordering. let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let value = resp.result; // The path separator on Windows affects the score. let expected_score = if cfg!(windows) { 69 } else { 72 }; assert_eq!( value, json!({ "files": [ { "root": root_path.clone(), "path": "abexy", "file_name": "abexy", "score": 88, "indices": [0, 1, 2], }, { "root": root_path.clone(), "path": "abcde", "file_name": "abcde", "score": 74, "indices": [0, 1, 4], }, { "root": root_path.clone(), "path": sub_abce_rel, "file_name": "abce", "score": expected_score, "indices": [4, 5, 7], }, ] }) ); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_fuzzy_file_search_accepts_cancellation_token() -> Result<()> { let codex_home = TempDir::new()?; let root = TempDir::new()?; std::fs::write(root.path().join("alpha.txt"), "contents")?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let root_path = root.path().to_string_lossy().to_string(); let request_id = mcp .send_fuzzy_file_search_request("alp", vec![root_path.clone()], None) .await?; let request_id_2 = mcp .send_fuzzy_file_search_request( "alp", vec![root_path.clone()], Some(request_id.to_string()), ) .await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id_2)), ) .await??; let files = resp .result .get("files") .ok_or_else(|| anyhow!("files key missing"))? .as_array() .ok_or_else(|| anyhow!("files not array"))? .clone(); assert_eq!(files.len(), 1); assert_eq!(files[0]["root"], root_path); assert_eq!(files[0]["path"], "alpha.txt"); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/v2/thread_archive.rs
codex-rs/app-server/tests/suite/v2/thread_archive.rs
use anyhow::Result; use app_test_support::McpProcess; use app_test_support::to_response; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::ThreadArchiveParams; use codex_app_server_protocol::ThreadArchiveResponse; use codex_app_server_protocol::ThreadStartParams; use codex_app_server_protocol::ThreadStartResponse; use codex_core::ARCHIVED_SESSIONS_SUBDIR; use codex_core::find_conversation_path_by_id_str; use std::path::Path; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); #[tokio::test] async fn thread_archive_moves_rollout_into_archived_directory() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml(codex_home.path())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; // Start a thread. let start_id = mcp .send_thread_start_request(ThreadStartParams { model: Some("mock-model".to_string()), ..Default::default() }) .await?; let start_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(start_id)), ) .await??; let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?; assert!(!thread.id.is_empty()); // Locate the rollout path recorded for this thread id. let rollout_path = find_conversation_path_by_id_str(codex_home.path(), &thread.id) .await? .expect("expected rollout path for thread id to exist"); assert!( rollout_path.exists(), "expected {} to exist", rollout_path.display() ); // Archive the thread. let archive_id = mcp .send_thread_archive_request(ThreadArchiveParams { thread_id: thread.id.clone(), }) .await?; let archive_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(archive_id)), ) .await??; let _: ThreadArchiveResponse = to_response::<ThreadArchiveResponse>(archive_resp)?; // Verify file moved. let archived_directory = codex_home.path().join(ARCHIVED_SESSIONS_SUBDIR); // The archived file keeps the original filename (rollout-...-<id>.jsonl). let archived_rollout_path = archived_directory.join(rollout_path.file_name().expect("rollout file name")); assert!( !rollout_path.exists(), "expected rollout path {} to be moved", rollout_path.display() ); assert!( archived_rollout_path.exists(), "expected archived rollout path {} to exist", archived_rollout_path.display() ); Ok(()) } fn create_config_toml(codex_home: &Path) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); std::fs::write(config_toml, config_contents()) } fn config_contents() -> &'static str { r#"model = "mock-model" approval_policy = "never" sandbox_mode = "read-only" "# }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/v2/thread_resume.rs
codex-rs/app-server/tests/suite/v2/thread_resume.rs
use anyhow::Result; use app_test_support::McpProcess; use app_test_support::create_fake_rollout; use app_test_support::create_mock_chat_completions_server; use app_test_support::to_response; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::SessionSource; use codex_app_server_protocol::ThreadItem; use codex_app_server_protocol::ThreadResumeParams; use codex_app_server_protocol::ThreadResumeResponse; use codex_app_server_protocol::ThreadStartParams; use codex_app_server_protocol::ThreadStartResponse; use codex_app_server_protocol::TurnStatus; use codex_app_server_protocol::UserInput; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseItem; use std::path::PathBuf; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); #[tokio::test] async fn thread_resume_returns_original_thread() -> Result<()> { let server = create_mock_chat_completions_server(vec![]).await; let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; // Start a thread. let start_id = mcp .send_thread_start_request(ThreadStartParams { model: Some("gpt-5.1-codex-max".to_string()), ..Default::default() }) .await?; let start_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(start_id)), ) .await??; let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?; // Resume it via v2 API. let resume_id = mcp .send_thread_resume_request(ThreadResumeParams { thread_id: thread.id.clone(), ..Default::default() }) .await?; let resume_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(resume_id)), ) .await??; let ThreadResumeResponse { thread: resumed, .. } = to_response::<ThreadResumeResponse>(resume_resp)?; assert_eq!(resumed, thread); Ok(()) } #[tokio::test] async fn thread_resume_returns_rollout_history() -> Result<()> { let server = create_mock_chat_completions_server(vec![]).await; let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri())?; let preview = "Saved user message"; let conversation_id = create_fake_rollout( codex_home.path(), "2025-01-05T12-00-00", "2025-01-05T12:00:00Z", preview, Some("mock_provider"), None, )?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let resume_id = mcp .send_thread_resume_request(ThreadResumeParams { thread_id: conversation_id.clone(), ..Default::default() }) .await?; let resume_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(resume_id)), ) .await??; let ThreadResumeResponse { thread, .. } = to_response::<ThreadResumeResponse>(resume_resp)?; assert_eq!(thread.id, conversation_id); assert_eq!(thread.preview, preview); assert_eq!(thread.model_provider, "mock_provider"); assert!(thread.path.is_absolute()); assert_eq!(thread.cwd, PathBuf::from("/")); assert_eq!(thread.cli_version, "0.0.0"); assert_eq!(thread.source, SessionSource::Cli); assert_eq!(thread.git_info, None); assert_eq!( thread.turns.len(), 1, "expected rollouts to include one turn" ); let turn = &thread.turns[0]; assert_eq!(turn.status, TurnStatus::Completed); assert_eq!(turn.items.len(), 1, "expected user message item"); match &turn.items[0] { ThreadItem::UserMessage { content, .. } => { assert_eq!( content, &vec![UserInput::Text { text: preview.to_string() }] ); } other => panic!("expected user message item, got {other:?}"), } Ok(()) } #[tokio::test] async fn thread_resume_prefers_path_over_thread_id() -> Result<()> { let server = create_mock_chat_completions_server(vec![]).await; let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let start_id = mcp .send_thread_start_request(ThreadStartParams { model: Some("gpt-5.1-codex-max".to_string()), ..Default::default() }) .await?; let start_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(start_id)), ) .await??; let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?; let thread_path = thread.path.clone(); let resume_id = mcp .send_thread_resume_request(ThreadResumeParams { thread_id: "not-a-valid-thread-id".to_string(), path: Some(thread_path), ..Default::default() }) .await?; let resume_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(resume_id)), ) .await??; let ThreadResumeResponse { thread: resumed, .. } = to_response::<ThreadResumeResponse>(resume_resp)?; assert_eq!(resumed, thread); Ok(()) } #[tokio::test] async fn thread_resume_supports_history_and_overrides() -> Result<()> { let server = create_mock_chat_completions_server(vec![]).await; let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; // Start a thread. let start_id = mcp .send_thread_start_request(ThreadStartParams { model: Some("gpt-5.1-codex-max".to_string()), ..Default::default() }) .await?; let start_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(start_id)), ) .await??; let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?; let history_text = "Hello from history"; let history = vec![ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: history_text.to_string(), }], }]; // Resume with explicit history and override the model. let resume_id = mcp .send_thread_resume_request(ThreadResumeParams { thread_id: thread.id, history: Some(history), model: Some("mock-model".to_string()), model_provider: Some("mock_provider".to_string()), ..Default::default() }) .await?; let resume_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(resume_id)), ) .await??; let ThreadResumeResponse { thread: resumed, model_provider, .. } = to_response::<ThreadResumeResponse>(resume_resp)?; assert!(!resumed.id.is_empty()); assert_eq!(model_provider, "mock_provider"); assert_eq!(resumed.preview, history_text); Ok(()) } // Helper to create a config.toml pointing at the mock model server. fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); std::fs::write( config_toml, format!( r#" model = "mock-model" approval_policy = "never" sandbox_mode = "read-only" model_provider = "mock_provider" [model_providers.mock_provider] name = "Mock provider for test" base_url = "{server_uri}/v1" wire_api = "chat" request_max_retries = 0 stream_max_retries = 0 "# ), ) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/v2/turn_interrupt.rs
codex-rs/app-server/tests/suite/v2/turn_interrupt.rs
#![cfg(unix)] use anyhow::Result; use app_test_support::McpProcess; use app_test_support::create_mock_chat_completions_server; use app_test_support::create_shell_command_sse_response; use app_test_support::to_response; use codex_app_server_protocol::JSONRPCNotification; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::ThreadStartParams; use codex_app_server_protocol::ThreadStartResponse; use codex_app_server_protocol::TurnCompletedNotification; use codex_app_server_protocol::TurnInterruptParams; use codex_app_server_protocol::TurnInterruptResponse; use codex_app_server_protocol::TurnStartParams; use codex_app_server_protocol::TurnStartResponse; use codex_app_server_protocol::TurnStatus; use codex_app_server_protocol::UserInput as V2UserInput; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); #[tokio::test] async fn turn_interrupt_aborts_running_turn() -> Result<()> { // Use a portable sleep command to keep the turn running. #[cfg(target_os = "windows")] let shell_command = vec![ "powershell".to_string(), "-Command".to_string(), "Start-Sleep -Seconds 10".to_string(), ]; #[cfg(not(target_os = "windows"))] let shell_command = vec!["sleep".to_string(), "10".to_string()]; let tmp = TempDir::new()?; let codex_home = tmp.path().join("codex_home"); std::fs::create_dir(&codex_home)?; let working_directory = tmp.path().join("workdir"); std::fs::create_dir(&working_directory)?; // Mock server: long-running shell command then (after abort) nothing else needed. let server = create_mock_chat_completions_server(vec![create_shell_command_sse_response( shell_command.clone(), Some(&working_directory), Some(10_000), "call_sleep", )?]) .await; create_config_toml(&codex_home, &server.uri())?; let mut mcp = McpProcess::new(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; // Start a v2 thread and capture its id. let thread_req = mcp .send_thread_start_request(ThreadStartParams { model: Some("mock-model".to_string()), ..Default::default() }) .await?; let thread_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(thread_req)), ) .await??; let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?; // Start a turn that triggers a long-running command. let turn_req = mcp .send_turn_start_request(TurnStartParams { thread_id: thread.id.clone(), input: vec![V2UserInput::Text { text: "run sleep".to_string(), }], cwd: Some(working_directory.clone()), ..Default::default() }) .await?; let turn_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(turn_req)), ) .await??; let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?; // Give the command a brief moment to start. tokio::time::sleep(std::time::Duration::from_secs(1)).await; let thread_id = thread.id.clone(); // Interrupt the in-progress turn by id (v2 API). let interrupt_id = mcp .send_turn_interrupt_request(TurnInterruptParams { thread_id: thread_id.clone(), turn_id: turn.id, }) .await?; let interrupt_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(interrupt_id)), ) .await??; let _resp: TurnInterruptResponse = to_response::<TurnInterruptResponse>(interrupt_resp)?; let completed_notif: JSONRPCNotification = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("turn/completed"), ) .await??; let completed: TurnCompletedNotification = serde_json::from_value( completed_notif .params .expect("turn/completed params must be present"), )?; assert_eq!(completed.thread_id, thread_id); assert_eq!(completed.turn.status, TurnStatus::Interrupted); Ok(()) } // Helper to create a config.toml pointing at the mock model server. fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); std::fs::write( config_toml, format!( r#" model = "mock-model" approval_policy = "never" sandbox_mode = "workspace-write" model_provider = "mock_provider" [model_providers.mock_provider] name = "Mock provider for test" base_url = "{server_uri}/v1" wire_api = "chat" request_max_retries = 0 stream_max_retries = 0 "# ), ) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/v2/model_list.rs
codex-rs/app-server/tests/suite/v2/model_list.rs
use std::time::Duration; use anyhow::Result; use anyhow::anyhow; use app_test_support::McpProcess; use app_test_support::to_response; use app_test_support::write_models_cache; use codex_app_server_protocol::JSONRPCError; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::Model; use codex_app_server_protocol::ModelListParams; use codex_app_server_protocol::ModelListResponse; use codex_app_server_protocol::ReasoningEffortOption; use codex_app_server_protocol::RequestId; use codex_protocol::openai_models::ReasoningEffort; use pretty_assertions::assert_eq; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); const INVALID_REQUEST_ERROR_CODE: i64 = -32600; #[tokio::test] async fn list_models_returns_all_models_with_large_limit() -> Result<()> { let codex_home = TempDir::new()?; write_models_cache(codex_home.path())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??; let request_id = mcp .send_list_models_request(ModelListParams { limit: Some(100), cursor: None, }) .await?; let response: JSONRPCResponse = timeout( DEFAULT_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let ModelListResponse { data: items, next_cursor, } = to_response::<ModelListResponse>(response)?; let expected_models = vec![ Model { id: "gpt-5.2".to_string(), model: "gpt-5.2".to_string(), display_name: "gpt-5.2".to_string(), description: "Latest frontier model with improvements across knowledge, reasoning and coding" .to_string(), supported_reasoning_efforts: vec![ ReasoningEffortOption { reasoning_effort: ReasoningEffort::Low, description: "Balances speed with some reasoning; useful for straightforward \ queries and short explanations" .to_string(), }, ReasoningEffortOption { reasoning_effort: ReasoningEffort::Medium, description: "Provides a solid balance of reasoning depth and latency for \ general-purpose tasks" .to_string(), }, ReasoningEffortOption { reasoning_effort: ReasoningEffort::High, description: "Maximizes reasoning depth for complex or ambiguous problems" .to_string(), }, ReasoningEffortOption { reasoning_effort: ReasoningEffort::XHigh, description: "Extra high reasoning for complex problems".to_string(), }, ], default_reasoning_effort: ReasoningEffort::Medium, is_default: true, }, Model { id: "gpt-5.1-codex-mini".to_string(), model: "gpt-5.1-codex-mini".to_string(), display_name: "gpt-5.1-codex-mini".to_string(), description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(), supported_reasoning_efforts: vec![ ReasoningEffortOption { reasoning_effort: ReasoningEffort::Medium, description: "Dynamically adjusts reasoning based on the task".to_string(), }, ReasoningEffortOption { reasoning_effort: ReasoningEffort::High, description: "Maximizes reasoning depth for complex or ambiguous problems" .to_string(), }, ], default_reasoning_effort: ReasoningEffort::Medium, is_default: false, }, Model { id: "gpt-5.1-codex-max".to_string(), model: "gpt-5.1-codex-max".to_string(), display_name: "gpt-5.1-codex-max".to_string(), description: "Codex-optimized flagship for deep and fast reasoning.".to_string(), supported_reasoning_efforts: vec![ ReasoningEffortOption { reasoning_effort: ReasoningEffort::Low, description: "Fast responses with lighter reasoning".to_string(), }, ReasoningEffortOption { reasoning_effort: ReasoningEffort::Medium, description: "Balances speed and reasoning depth for everyday tasks" .to_string(), }, ReasoningEffortOption { reasoning_effort: ReasoningEffort::High, description: "Greater reasoning depth for complex problems".to_string(), }, ReasoningEffortOption { reasoning_effort: ReasoningEffort::XHigh, description: "Extra high reasoning depth for complex problems".to_string(), }, ], default_reasoning_effort: ReasoningEffort::Medium, is_default: false, }, Model { id: "gpt-5.2-codex".to_string(), model: "gpt-5.2-codex".to_string(), display_name: "gpt-5.2-codex".to_string(), description: "Latest frontier agentic coding model.".to_string(), supported_reasoning_efforts: vec![ ReasoningEffortOption { reasoning_effort: ReasoningEffort::Low, description: "Fast responses with lighter reasoning".to_string(), }, ReasoningEffortOption { reasoning_effort: ReasoningEffort::Medium, description: "Balances speed and reasoning depth for everyday tasks" .to_string(), }, ReasoningEffortOption { reasoning_effort: ReasoningEffort::High, description: "Greater reasoning depth for complex problems".to_string(), }, ReasoningEffortOption { reasoning_effort: ReasoningEffort::XHigh, description: "Extra high reasoning depth for complex problems".to_string(), }, ], default_reasoning_effort: ReasoningEffort::Medium, is_default: false, }, ]; assert_eq!(items, expected_models); assert!(next_cursor.is_none()); Ok(()) } #[tokio::test] async fn list_models_pagination_works() -> Result<()> { let codex_home = TempDir::new()?; write_models_cache(codex_home.path())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??; let first_request = mcp .send_list_models_request(ModelListParams { limit: Some(1), cursor: None, }) .await?; let first_response: JSONRPCResponse = timeout( DEFAULT_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(first_request)), ) .await??; let ModelListResponse { data: first_items, next_cursor: first_cursor, } = to_response::<ModelListResponse>(first_response)?; assert_eq!(first_items.len(), 1); assert_eq!(first_items[0].id, "gpt-5.2"); let next_cursor = first_cursor.ok_or_else(|| anyhow!("cursor for second page"))?; let second_request = mcp .send_list_models_request(ModelListParams { limit: Some(1), cursor: Some(next_cursor.clone()), }) .await?; let second_response: JSONRPCResponse = timeout( DEFAULT_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(second_request)), ) .await??; let ModelListResponse { data: second_items, next_cursor: second_cursor, } = to_response::<ModelListResponse>(second_response)?; assert_eq!(second_items.len(), 1); assert_eq!(second_items[0].id, "gpt-5.1-codex-mini"); let third_cursor = second_cursor.ok_or_else(|| anyhow!("cursor for third page"))?; let third_request = mcp .send_list_models_request(ModelListParams { limit: Some(1), cursor: Some(third_cursor.clone()), }) .await?; let third_response: JSONRPCResponse = timeout( DEFAULT_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(third_request)), ) .await??; let ModelListResponse { data: third_items, next_cursor: third_cursor, } = to_response::<ModelListResponse>(third_response)?; assert_eq!(third_items.len(), 1); assert_eq!(third_items[0].id, "gpt-5.1-codex-max"); let fourth_cursor = third_cursor.ok_or_else(|| anyhow!("cursor for fourth page"))?; let fourth_request = mcp .send_list_models_request(ModelListParams { limit: Some(1), cursor: Some(fourth_cursor.clone()), }) .await?; let fourth_response: JSONRPCResponse = timeout( DEFAULT_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(fourth_request)), ) .await??; let ModelListResponse { data: fourth_items, next_cursor: fourth_cursor, } = to_response::<ModelListResponse>(fourth_response)?; assert_eq!(fourth_items.len(), 1); assert_eq!(fourth_items[0].id, "gpt-5.2-codex"); assert!(fourth_cursor.is_none()); Ok(()) } #[tokio::test] async fn list_models_rejects_invalid_cursor() -> Result<()> { let codex_home = TempDir::new()?; write_models_cache(codex_home.path())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??; let request_id = mcp .send_list_models_request(ModelListParams { limit: None, cursor: Some("invalid".to_string()), }) .await?; let error: JSONRPCError = timeout( DEFAULT_TIMEOUT, mcp.read_stream_until_error_message(RequestId::Integer(request_id)), ) .await??; assert_eq!(error.id, RequestId::Integer(request_id)); assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE); assert_eq!(error.error.message, "invalid cursor: invalid"); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/v2/thread_start.rs
codex-rs/app-server/tests/suite/v2/thread_start.rs
use anyhow::Result; use app_test_support::McpProcess; use app_test_support::create_mock_chat_completions_server; use app_test_support::to_response; use codex_app_server_protocol::JSONRPCNotification; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::ThreadStartParams; use codex_app_server_protocol::ThreadStartResponse; use codex_app_server_protocol::ThreadStartedNotification; use std::path::Path; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); #[tokio::test] async fn thread_start_creates_thread_and_emits_started() -> Result<()> { // Provide a mock server and config so model wiring is valid. let server = create_mock_chat_completions_server(vec![]).await; let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri())?; // Start server and initialize. let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; // Start a v2 thread with an explicit model override. let req_id = mcp .send_thread_start_request(ThreadStartParams { model: Some("gpt-5.1".to_string()), ..Default::default() }) .await?; // Expect a proper JSON-RPC response with a thread id. let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(req_id)), ) .await??; let ThreadStartResponse { thread, model_provider, .. } = to_response::<ThreadStartResponse>(resp)?; assert!(!thread.id.is_empty(), "thread id should not be empty"); assert!( thread.preview.is_empty(), "new threads should start with an empty preview" ); assert_eq!(model_provider, "mock_provider"); assert!( thread.created_at > 0, "created_at should be a positive UNIX timestamp" ); // A corresponding thread/started notification should arrive. let notif: JSONRPCNotification = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("thread/started"), ) .await??; let started: ThreadStartedNotification = serde_json::from_value(notif.params.expect("params must be present"))?; assert_eq!(started.thread, thread); Ok(()) } // Helper to create a config.toml pointing at the mock model server. fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); std::fs::write( config_toml, format!( r#" model = "mock-model" approval_policy = "never" sandbox_mode = "read-only" model_provider = "mock_provider" [model_providers.mock_provider] name = "Mock provider for test" base_url = "{server_uri}/v1" wire_api = "chat" request_max_retries = 0 stream_max_retries = 0 "# ), ) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/v2/mod.rs
codex-rs/app-server/tests/suite/v2/mod.rs
mod account; mod config_rpc; mod model_list; mod rate_limits; mod review; mod thread_archive; mod thread_list; mod thread_resume; mod thread_start; mod turn_interrupt; mod turn_start;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/v2/config_rpc.rs
codex-rs/app-server/tests/suite/v2/config_rpc.rs
use anyhow::Result; use app_test_support::McpProcess; use app_test_support::test_path_buf_with_windows; use app_test_support::test_tmp_path_buf; use app_test_support::to_response; use codex_app_server_protocol::AskForApproval; use codex_app_server_protocol::ConfigBatchWriteParams; use codex_app_server_protocol::ConfigEdit; use codex_app_server_protocol::ConfigLayerSource; use codex_app_server_protocol::ConfigReadParams; use codex_app_server_protocol::ConfigReadResponse; use codex_app_server_protocol::ConfigValueWriteParams; use codex_app_server_protocol::ConfigWriteResponse; use codex_app_server_protocol::JSONRPCError; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::MergeStrategy; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::SandboxMode; use codex_app_server_protocol::ToolsV2; use codex_app_server_protocol::WriteStatus; use codex_core::config_loader::SYSTEM_CONFIG_TOML_FILE_UNIX; use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use serde_json::json; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); fn write_config(codex_home: &TempDir, contents: &str) -> Result<()> { Ok(std::fs::write( codex_home.path().join("config.toml"), contents, )?) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn config_read_returns_effective_and_layers() -> Result<()> { let codex_home = TempDir::new()?; write_config( &codex_home, r#" model = "gpt-user" sandbox_mode = "workspace-write" "#, )?; let codex_home_path = codex_home.path().canonicalize()?; let user_file = AbsolutePathBuf::try_from(codex_home_path.join("config.toml"))?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let request_id = mcp .send_config_read_request(ConfigReadParams { include_layers: true, }) .await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let ConfigReadResponse { config, origins, layers, } = to_response(resp)?; assert_eq!(config.model.as_deref(), Some("gpt-user")); assert_eq!( origins.get("model").expect("origin").name, ConfigLayerSource::User { file: user_file.clone(), } ); let layers = layers.expect("layers present"); assert_layers_user_then_optional_system(&layers, user_file)?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn config_read_includes_tools() -> Result<()> { let codex_home = TempDir::new()?; write_config( &codex_home, r#" model = "gpt-user" [tools] web_search = true view_image = false "#, )?; let codex_home_path = codex_home.path().canonicalize()?; let user_file = AbsolutePathBuf::try_from(codex_home_path.join("config.toml"))?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let request_id = mcp .send_config_read_request(ConfigReadParams { include_layers: true, }) .await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let ConfigReadResponse { config, origins, layers, } = to_response(resp)?; let tools = config.tools.expect("tools present"); assert_eq!( tools, ToolsV2 { web_search: Some(true), view_image: Some(false), } ); assert_eq!( origins.get("tools.web_search").expect("origin").name, ConfigLayerSource::User { file: user_file.clone(), } ); assert_eq!( origins.get("tools.view_image").expect("origin").name, ConfigLayerSource::User { file: user_file.clone(), } ); let layers = layers.expect("layers present"); assert_layers_user_then_optional_system(&layers, user_file)?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn config_read_includes_system_layer_and_overrides() -> Result<()> { let codex_home = TempDir::new()?; let user_dir = test_path_buf_with_windows("/user", Some(r"C:\Users\user")); let system_dir = test_path_buf_with_windows("/system", Some(r"C:\System")); write_config( &codex_home, &format!( r#" model = "gpt-user" approval_policy = "on-request" sandbox_mode = "workspace-write" [sandbox_workspace_write] writable_roots = [{}] network_access = true "#, serde_json::json!(user_dir) ), )?; let codex_home_path = codex_home.path().canonicalize()?; let user_file = AbsolutePathBuf::try_from(codex_home_path.join("config.toml"))?; let managed_path = codex_home.path().join("managed_config.toml"); let managed_file = AbsolutePathBuf::try_from(managed_path.clone())?; std::fs::write( &managed_path, format!( r#" model = "gpt-system" approval_policy = "never" [sandbox_workspace_write] writable_roots = [{}] "#, serde_json::json!(system_dir.clone()) ), )?; let managed_path_str = managed_path.display().to_string(); let mut mcp = McpProcess::new_with_env( codex_home.path(), &[("CODEX_MANAGED_CONFIG_PATH", Some(&managed_path_str))], ) .await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let request_id = mcp .send_config_read_request(ConfigReadParams { include_layers: true, }) .await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let ConfigReadResponse { config, origins, layers, } = to_response(resp)?; assert_eq!(config.model.as_deref(), Some("gpt-system")); assert_eq!( origins.get("model").expect("origin").name, ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file.clone(), } ); assert_eq!(config.approval_policy, Some(AskForApproval::Never)); assert_eq!( origins.get("approval_policy").expect("origin").name, ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file.clone(), } ); assert_eq!(config.sandbox_mode, Some(SandboxMode::WorkspaceWrite)); assert_eq!( origins.get("sandbox_mode").expect("origin").name, ConfigLayerSource::User { file: user_file.clone(), } ); let sandbox = config .sandbox_workspace_write .as_ref() .expect("sandbox workspace write"); assert_eq!(sandbox.writable_roots, vec![system_dir]); assert_eq!( origins .get("sandbox_workspace_write.writable_roots.0") .expect("origin") .name, ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file.clone(), } ); assert!(sandbox.network_access); assert_eq!( origins .get("sandbox_workspace_write.network_access") .expect("origin") .name, ConfigLayerSource::User { file: user_file.clone(), } ); let layers = layers.expect("layers present"); assert_layers_managed_user_then_optional_system(&layers, managed_file, user_file)?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn config_value_write_replaces_value() -> Result<()> { let temp_dir = TempDir::new()?; let codex_home = temp_dir.path().canonicalize()?; write_config( &temp_dir, r#" model = "gpt-old" "#, )?; let mut mcp = McpProcess::new(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let read_id = mcp .send_config_read_request(ConfigReadParams { include_layers: false, }) .await?; let read_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(read_id)), ) .await??; let read: ConfigReadResponse = to_response(read_resp)?; let expected_version = read.origins.get("model").map(|m| m.version.clone()); let write_id = mcp .send_config_value_write_request(ConfigValueWriteParams { file_path: None, key_path: "model".to_string(), value: json!("gpt-new"), merge_strategy: MergeStrategy::Replace, expected_version, }) .await?; let write_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(write_id)), ) .await??; let write: ConfigWriteResponse = to_response(write_resp)?; let expected_file_path = AbsolutePathBuf::resolve_path_against_base("config.toml", codex_home)?; assert_eq!(write.status, WriteStatus::Ok); assert_eq!(write.file_path, expected_file_path); assert!(write.overridden_metadata.is_none()); let verify_id = mcp .send_config_read_request(ConfigReadParams { include_layers: false, }) .await?; let verify_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(verify_id)), ) .await??; let verify: ConfigReadResponse = to_response(verify_resp)?; assert_eq!(verify.config.model.as_deref(), Some("gpt-new")); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn config_value_write_rejects_version_conflict() -> Result<()> { let codex_home = TempDir::new()?; write_config( &codex_home, r#" model = "gpt-old" "#, )?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let write_id = mcp .send_config_value_write_request(ConfigValueWriteParams { file_path: Some(codex_home.path().join("config.toml").display().to_string()), key_path: "model".to_string(), value: json!("gpt-new"), merge_strategy: MergeStrategy::Replace, expected_version: Some("sha256:stale".to_string()), }) .await?; let err: JSONRPCError = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_error_message(RequestId::Integer(write_id)), ) .await??; let code = err .error .data .as_ref() .and_then(|d| d.get("config_write_error_code")) .and_then(|v| v.as_str()); assert_eq!(code, Some("configVersionConflict")); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn config_batch_write_applies_multiple_edits() -> Result<()> { let tmp_dir = TempDir::new()?; let codex_home = tmp_dir.path().canonicalize()?; write_config(&tmp_dir, "")?; let mut mcp = McpProcess::new(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let writable_root = test_tmp_path_buf(); let batch_id = mcp .send_config_batch_write_request(ConfigBatchWriteParams { file_path: Some(codex_home.join("config.toml").display().to_string()), edits: vec![ ConfigEdit { key_path: "sandbox_mode".to_string(), value: json!("workspace-write"), merge_strategy: MergeStrategy::Replace, }, ConfigEdit { key_path: "sandbox_workspace_write".to_string(), value: json!({ "writable_roots": [writable_root.clone()], "network_access": false }), merge_strategy: MergeStrategy::Replace, }, ], expected_version: None, }) .await?; let batch_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(batch_id)), ) .await??; let batch_write: ConfigWriteResponse = to_response(batch_resp)?; assert_eq!(batch_write.status, WriteStatus::Ok); let expected_file_path = AbsolutePathBuf::resolve_path_against_base("config.toml", codex_home)?; assert_eq!(batch_write.file_path, expected_file_path); let read_id = mcp .send_config_read_request(ConfigReadParams { include_layers: false, }) .await?; let read_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(read_id)), ) .await??; let read: ConfigReadResponse = to_response(read_resp)?; assert_eq!(read.config.sandbox_mode, Some(SandboxMode::WorkspaceWrite)); let sandbox = read .config .sandbox_workspace_write .as_ref() .expect("sandbox workspace write"); assert_eq!(sandbox.writable_roots, vec![writable_root]); assert!(!sandbox.network_access); Ok(()) } fn assert_layers_user_then_optional_system( layers: &[codex_app_server_protocol::ConfigLayer], user_file: AbsolutePathBuf, ) -> Result<()> { if cfg!(unix) { let system_file = AbsolutePathBuf::from_absolute_path(SYSTEM_CONFIG_TOML_FILE_UNIX)?; assert_eq!(layers.len(), 2); assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file }); assert_eq!( layers[1].name, ConfigLayerSource::System { file: system_file } ); } else { assert_eq!(layers.len(), 1); assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file }); } Ok(()) } fn assert_layers_managed_user_then_optional_system( layers: &[codex_app_server_protocol::ConfigLayer], managed_file: AbsolutePathBuf, user_file: AbsolutePathBuf, ) -> Result<()> { if cfg!(unix) { let system_file = AbsolutePathBuf::from_absolute_path(SYSTEM_CONFIG_TOML_FILE_UNIX)?; assert_eq!(layers.len(), 3); assert_eq!( layers[0].name, ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file } ); assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file }); assert_eq!( layers[2].name, ConfigLayerSource::System { file: system_file } ); } else { assert_eq!(layers.len(), 2); assert_eq!( layers[0].name, ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file } ); assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file }); } Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/v2/turn_start.rs
codex-rs/app-server/tests/suite/v2/turn_start.rs
use anyhow::Result; use app_test_support::McpProcess; use app_test_support::create_apply_patch_sse_response; use app_test_support::create_exec_command_sse_response; use app_test_support::create_final_assistant_message_sse_response; use app_test_support::create_mock_chat_completions_server; use app_test_support::create_mock_chat_completions_server_unchecked; use app_test_support::create_shell_command_sse_response; use app_test_support::format_with_current_shell_display; use app_test_support::to_response; use codex_app_server_protocol::ApprovalDecision; use codex_app_server_protocol::CommandExecutionRequestApprovalResponse; use codex_app_server_protocol::CommandExecutionStatus; use codex_app_server_protocol::FileChangeOutputDeltaNotification; use codex_app_server_protocol::FileChangeRequestApprovalResponse; use codex_app_server_protocol::ItemCompletedNotification; use codex_app_server_protocol::ItemStartedNotification; use codex_app_server_protocol::JSONRPCNotification; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::PatchApplyStatus; use codex_app_server_protocol::PatchChangeKind; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::ServerRequest; use codex_app_server_protocol::ThreadItem; use codex_app_server_protocol::ThreadStartParams; use codex_app_server_protocol::ThreadStartResponse; use codex_app_server_protocol::TurnCompletedNotification; use codex_app_server_protocol::TurnStartParams; use codex_app_server_protocol::TurnStartResponse; use codex_app_server_protocol::TurnStartedNotification; use codex_app_server_protocol::TurnStatus; use codex_app_server_protocol::UserInput as V2UserInput; use codex_core::protocol_config_types::ReasoningSummary; use codex_protocol::openai_models::ReasoningEffort; use core_test_support::skip_if_no_network; use pretty_assertions::assert_eq; use std::path::Path; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); #[tokio::test] async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<()> { // Provide a mock server and config so model wiring is valid. // Three Codex turns hit the mock model (session start + two turn/start calls). let responses = vec![ create_final_assistant_message_sse_response("Done")?, create_final_assistant_message_sse_response("Done")?, create_final_assistant_message_sse_response("Done")?, ]; let server = create_mock_chat_completions_server_unchecked(responses).await; let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri(), "never")?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; // Start a thread (v2) and capture its id. let thread_req = mcp .send_thread_start_request(ThreadStartParams { model: Some("mock-model".to_string()), ..Default::default() }) .await?; let thread_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(thread_req)), ) .await??; let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?; // Start a turn with only input and thread_id set (no overrides). let turn_req = mcp .send_turn_start_request(TurnStartParams { thread_id: thread.id.clone(), input: vec![V2UserInput::Text { text: "Hello".to_string(), }], ..Default::default() }) .await?; let turn_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(turn_req)), ) .await??; let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?; assert!(!turn.id.is_empty()); // Expect a turn/started notification. let notif: JSONRPCNotification = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("turn/started"), ) .await??; let started: TurnStartedNotification = serde_json::from_value(notif.params.expect("params must be present"))?; assert_eq!(started.thread_id, thread.id); assert_eq!( started.turn.status, codex_app_server_protocol::TurnStatus::InProgress ); // Send a second turn that exercises the overrides path: change the model. let turn_req2 = mcp .send_turn_start_request(TurnStartParams { thread_id: thread.id.clone(), input: vec![V2UserInput::Text { text: "Second".to_string(), }], model: Some("mock-model-override".to_string()), ..Default::default() }) .await?; let turn_resp2: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(turn_req2)), ) .await??; let TurnStartResponse { turn: turn2 } = to_response::<TurnStartResponse>(turn_resp2)?; assert!(!turn2.id.is_empty()); // Ensure the second turn has a different id than the first. assert_ne!(turn.id, turn2.id); // Expect a second turn/started notification as well. let _notif2: JSONRPCNotification = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("turn/started"), ) .await??; let completed_notif: JSONRPCNotification = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("turn/completed"), ) .await??; let completed: TurnCompletedNotification = serde_json::from_value( completed_notif .params .expect("turn/completed params must be present"), )?; assert_eq!(completed.thread_id, thread.id); assert_eq!(completed.turn.status, TurnStatus::Completed); Ok(()) } #[tokio::test] async fn turn_start_accepts_local_image_input() -> Result<()> { // Two Codex turns hit the mock model (session start + turn/start). let responses = vec![ create_final_assistant_message_sse_response("Done")?, create_final_assistant_message_sse_response("Done")?, ]; // Use the unchecked variant because the request payload includes a LocalImage // which the strict matcher does not currently cover. let server = create_mock_chat_completions_server_unchecked(responses).await; let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri(), "never")?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let thread_req = mcp .send_thread_start_request(ThreadStartParams { model: Some("mock-model".to_string()), ..Default::default() }) .await?; let thread_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(thread_req)), ) .await??; let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?; let image_path = codex_home.path().join("image.png"); // No need to actually write the file; we just exercise the input path. let turn_req = mcp .send_turn_start_request(TurnStartParams { thread_id: thread.id.clone(), input: vec![V2UserInput::LocalImage { path: image_path }], ..Default::default() }) .await?; let turn_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(turn_req)), ) .await??; let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?; assert!(!turn.id.is_empty()); // This test only validates that turn/start responds and returns a turn. Ok(()) } #[tokio::test] async fn turn_start_exec_approval_toggle_v2() -> Result<()> { skip_if_no_network!(Ok(())); let tmp = TempDir::new()?; let codex_home = tmp.path().to_path_buf(); // Mock server: first turn requests a shell call (elicitation), then completes. // Second turn same, but we'll set approval_policy=never to avoid elicitation. let responses = vec![ create_shell_command_sse_response( vec![ "python3".to_string(), "-c".to_string(), "print(42)".to_string(), ], None, Some(5000), "call1", )?, create_final_assistant_message_sse_response("done 1")?, create_shell_command_sse_response( vec![ "python3".to_string(), "-c".to_string(), "print(42)".to_string(), ], None, Some(5000), "call2", )?, create_final_assistant_message_sse_response("done 2")?, ]; let server = create_mock_chat_completions_server(responses).await; // Default approval is untrusted to force elicitation on first turn. create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?; let mut mcp = McpProcess::new(codex_home.as_path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; // thread/start let start_id = mcp .send_thread_start_request(ThreadStartParams { model: Some("mock-model".to_string()), ..Default::default() }) .await?; let start_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(start_id)), ) .await??; let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?; // turn/start — expect CommandExecutionRequestApproval request from server let first_turn_id = mcp .send_turn_start_request(TurnStartParams { thread_id: thread.id.clone(), input: vec![V2UserInput::Text { text: "run python".to_string(), }], ..Default::default() }) .await?; // Acknowledge RPC timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(first_turn_id)), ) .await??; // Receive elicitation let server_req = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_request_message(), ) .await??; let ServerRequest::CommandExecutionRequestApproval { request_id, params } = server_req else { panic!("expected CommandExecutionRequestApproval request"); }; assert_eq!(params.item_id, "call1"); // Approve and wait for task completion mcp.send_response( request_id, serde_json::json!({ "decision": codex_core::protocol::ReviewDecision::Approved }), ) .await?; timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("codex/event/task_complete"), ) .await??; timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("turn/completed"), ) .await??; // Second turn with approval_policy=never should not elicit approval let second_turn_id = mcp .send_turn_start_request(TurnStartParams { thread_id: thread.id.clone(), input: vec![V2UserInput::Text { text: "run python again".to_string(), }], approval_policy: Some(codex_app_server_protocol::AskForApproval::Never), sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::DangerFullAccess), model: Some("mock-model".to_string()), effort: Some(ReasoningEffort::Medium), summary: Some(ReasoningSummary::Auto), ..Default::default() }) .await?; timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(second_turn_id)), ) .await??; // Ensure we do NOT receive a CommandExecutionRequestApproval request before task completes timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("codex/event/task_complete"), ) .await??; timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("turn/completed"), ) .await??; Ok(()) } #[tokio::test] async fn turn_start_exec_approval_decline_v2() -> Result<()> { skip_if_no_network!(Ok(())); let tmp = TempDir::new()?; let codex_home = tmp.path().to_path_buf(); let workspace = tmp.path().join("workspace"); std::fs::create_dir(&workspace)?; let responses = vec![ create_shell_command_sse_response( vec![ "python3".to_string(), "-c".to_string(), "print(42)".to_string(), ], None, Some(5000), "call-decline", )?, create_final_assistant_message_sse_response("done")?, ]; let server = create_mock_chat_completions_server(responses).await; create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?; let mut mcp = McpProcess::new(codex_home.as_path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let start_id = mcp .send_thread_start_request(ThreadStartParams { model: Some("mock-model".to_string()), ..Default::default() }) .await?; let start_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(start_id)), ) .await??; let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?; let turn_id = mcp .send_turn_start_request(TurnStartParams { thread_id: thread.id.clone(), input: vec![V2UserInput::Text { text: "run python".to_string(), }], cwd: Some(workspace.clone()), ..Default::default() }) .await?; let turn_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(turn_id)), ) .await??; let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?; let started_command_execution = timeout(DEFAULT_READ_TIMEOUT, async { loop { let started_notif = mcp .read_stream_until_notification_message("item/started") .await?; let started: ItemStartedNotification = serde_json::from_value(started_notif.params.clone().expect("item/started params"))?; if let ThreadItem::CommandExecution { .. } = started.item { return Ok::<ThreadItem, anyhow::Error>(started.item); } } }) .await??; let ThreadItem::CommandExecution { id, status, .. } = started_command_execution else { unreachable!("loop ensures we break on command execution items"); }; assert_eq!(id, "call-decline"); assert_eq!(status, CommandExecutionStatus::InProgress); let server_req = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_request_message(), ) .await??; let ServerRequest::CommandExecutionRequestApproval { request_id, params } = server_req else { panic!("expected CommandExecutionRequestApproval request") }; assert_eq!(params.item_id, "call-decline"); assert_eq!(params.thread_id, thread.id); assert_eq!(params.turn_id, turn.id); mcp.send_response( request_id, serde_json::to_value(CommandExecutionRequestApprovalResponse { decision: ApprovalDecision::Decline, })?, ) .await?; let completed_command_execution = timeout(DEFAULT_READ_TIMEOUT, async { loop { let completed_notif = mcp .read_stream_until_notification_message("item/completed") .await?; let completed: ItemCompletedNotification = serde_json::from_value( completed_notif .params .clone() .expect("item/completed params"), )?; if let ThreadItem::CommandExecution { .. } = completed.item { return Ok::<ThreadItem, anyhow::Error>(completed.item); } } }) .await??; let ThreadItem::CommandExecution { id, status, exit_code, aggregated_output, .. } = completed_command_execution else { unreachable!("loop ensures we break on command execution items"); }; assert_eq!(id, "call-decline"); assert_eq!(status, CommandExecutionStatus::Declined); assert!(exit_code.is_none()); assert!(aggregated_output.is_none()); timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("codex/event/task_complete"), ) .await??; Ok(()) } #[tokio::test] async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> { skip_if_no_network!(Ok(())); let tmp = TempDir::new()?; let codex_home = tmp.path().join("codex_home"); std::fs::create_dir(&codex_home)?; let workspace_root = tmp.path().join("workspace"); std::fs::create_dir(&workspace_root)?; let first_cwd = workspace_root.join("turn1"); let second_cwd = workspace_root.join("turn2"); std::fs::create_dir(&first_cwd)?; std::fs::create_dir(&second_cwd)?; let responses = vec![ create_shell_command_sse_response( vec!["echo".to_string(), "first".to_string(), "turn".to_string()], None, Some(5000), "call-first", )?, create_final_assistant_message_sse_response("done first")?, create_shell_command_sse_response( vec!["echo".to_string(), "second".to_string(), "turn".to_string()], None, Some(5000), "call-second", )?, create_final_assistant_message_sse_response("done second")?, ]; let server = create_mock_chat_completions_server(responses).await; create_config_toml(&codex_home, &server.uri(), "untrusted")?; let mut mcp = McpProcess::new(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; // thread/start let start_id = mcp .send_thread_start_request(ThreadStartParams { model: Some("mock-model".to_string()), ..Default::default() }) .await?; let start_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(start_id)), ) .await??; let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?; // first turn with workspace-write sandbox and first_cwd let first_turn = mcp .send_turn_start_request(TurnStartParams { thread_id: thread.id.clone(), input: vec![V2UserInput::Text { text: "first turn".to_string(), }], cwd: Some(first_cwd.clone()), approval_policy: Some(codex_app_server_protocol::AskForApproval::Never), sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::WorkspaceWrite { writable_roots: vec![first_cwd.try_into()?], network_access: false, exclude_tmpdir_env_var: false, exclude_slash_tmp: false, }), model: Some("mock-model".to_string()), effort: Some(ReasoningEffort::Medium), summary: Some(ReasoningSummary::Auto), }) .await?; timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(first_turn)), ) .await??; timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("codex/event/task_complete"), ) .await??; // second turn with workspace-write and second_cwd, ensure exec begins in second_cwd let second_turn = mcp .send_turn_start_request(TurnStartParams { thread_id: thread.id.clone(), input: vec![V2UserInput::Text { text: "second turn".to_string(), }], cwd: Some(second_cwd.clone()), approval_policy: Some(codex_app_server_protocol::AskForApproval::Never), sandbox_policy: Some(codex_app_server_protocol::SandboxPolicy::DangerFullAccess), model: Some("mock-model".to_string()), effort: Some(ReasoningEffort::Medium), summary: Some(ReasoningSummary::Auto), }) .await?; timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(second_turn)), ) .await??; let command_exec_item = timeout(DEFAULT_READ_TIMEOUT, async { loop { let item_started_notification = mcp .read_stream_until_notification_message("item/started") .await?; let params = item_started_notification .params .clone() .expect("item/started params"); let item_started: ItemStartedNotification = serde_json::from_value(params).expect("deserialize item/started notification"); if matches!(item_started.item, ThreadItem::CommandExecution { .. }) { return Ok::<ThreadItem, anyhow::Error>(item_started.item); } } }) .await??; let ThreadItem::CommandExecution { cwd, command, status, .. } = command_exec_item else { unreachable!("loop ensures we break on command execution items"); }; assert_eq!(cwd, second_cwd); let expected_command = format_with_current_shell_display("echo second turn"); assert_eq!(command, expected_command); assert_eq!(status, CommandExecutionStatus::InProgress); timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("codex/event/task_complete"), ) .await??; Ok(()) } #[tokio::test] async fn turn_start_file_change_approval_v2() -> Result<()> { skip_if_no_network!(Ok(())); let tmp = TempDir::new()?; let codex_home = tmp.path().join("codex_home"); std::fs::create_dir(&codex_home)?; let workspace = tmp.path().join("workspace"); std::fs::create_dir(&workspace)?; let patch = r#"*** Begin Patch *** Add File: README.md +new line *** End Patch "#; let responses = vec![ create_apply_patch_sse_response(patch, "patch-call")?, create_final_assistant_message_sse_response("patch applied")?, ]; let server = create_mock_chat_completions_server(responses).await; create_config_toml(&codex_home, &server.uri(), "untrusted")?; let mut mcp = McpProcess::new(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let start_req = mcp .send_thread_start_request(ThreadStartParams { model: Some("mock-model".to_string()), cwd: Some(workspace.to_string_lossy().into_owned()), ..Default::default() }) .await?; let start_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(start_req)), ) .await??; let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?; let turn_req = mcp .send_turn_start_request(TurnStartParams { thread_id: thread.id.clone(), input: vec![V2UserInput::Text { text: "apply patch".into(), }], cwd: Some(workspace.clone()), ..Default::default() }) .await?; let turn_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(turn_req)), ) .await??; let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?; let started_file_change = timeout(DEFAULT_READ_TIMEOUT, async { loop { let started_notif = mcp .read_stream_until_notification_message("item/started") .await?; let started: ItemStartedNotification = serde_json::from_value(started_notif.params.clone().expect("item/started params"))?; if let ThreadItem::FileChange { .. } = started.item { return Ok::<ThreadItem, anyhow::Error>(started.item); } } }) .await??; let ThreadItem::FileChange { ref id, status, ref changes, } = started_file_change else { unreachable!("loop ensures we break on file change items"); }; assert_eq!(id, "patch-call"); assert_eq!(status, PatchApplyStatus::InProgress); let started_changes = changes.clone(); let server_req = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_request_message(), ) .await??; let ServerRequest::FileChangeRequestApproval { request_id, params } = server_req else { panic!("expected FileChangeRequestApproval request") }; assert_eq!(params.item_id, "patch-call"); assert_eq!(params.thread_id, thread.id); assert_eq!(params.turn_id, turn.id); let expected_readme_path = workspace.join("README.md"); let expected_readme_path = expected_readme_path.to_string_lossy().into_owned(); pretty_assertions::assert_eq!( started_changes, vec![codex_app_server_protocol::FileUpdateChange { path: expected_readme_path.clone(), kind: PatchChangeKind::Add, diff: "new line\n".to_string(), }] ); mcp.send_response( request_id, serde_json::to_value(FileChangeRequestApprovalResponse { decision: ApprovalDecision::Accept, })?, ) .await?; let output_delta_notif = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("item/fileChange/outputDelta"), ) .await??; let output_delta: FileChangeOutputDeltaNotification = serde_json::from_value( output_delta_notif .params .clone() .expect("item/fileChange/outputDelta params"), )?; assert_eq!(output_delta.thread_id, thread.id); assert_eq!(output_delta.turn_id, turn.id); assert_eq!(output_delta.item_id, "patch-call"); assert!( !output_delta.delta.is_empty(), "expected delta to be non-empty, got: {}", output_delta.delta ); let completed_file_change = timeout(DEFAULT_READ_TIMEOUT, async { loop { let completed_notif = mcp .read_stream_until_notification_message("item/completed") .await?; let completed: ItemCompletedNotification = serde_json::from_value( completed_notif .params .clone() .expect("item/completed params"), )?; if let ThreadItem::FileChange { .. } = completed.item { return Ok::<ThreadItem, anyhow::Error>(completed.item); } } }) .await??; let ThreadItem::FileChange { ref id, status, .. } = completed_file_change else { unreachable!("loop ensures we break on file change items"); }; assert_eq!(id, "patch-call"); assert_eq!(status, PatchApplyStatus::Completed); timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("codex/event/task_complete"), ) .await??; let readme_contents = std::fs::read_to_string(expected_readme_path)?; assert_eq!(readme_contents, "new line\n"); Ok(()) } #[tokio::test] async fn turn_start_file_change_approval_decline_v2() -> Result<()> { skip_if_no_network!(Ok(())); let tmp = TempDir::new()?; let codex_home = tmp.path().join("codex_home"); std::fs::create_dir(&codex_home)?; let workspace = tmp.path().join("workspace"); std::fs::create_dir(&workspace)?; let patch = r#"*** Begin Patch *** Add File: README.md +new line *** End Patch "#; let responses = vec![ create_apply_patch_sse_response(patch, "patch-call")?, create_final_assistant_message_sse_response("patch declined")?, ]; let server = create_mock_chat_completions_server(responses).await; create_config_toml(&codex_home, &server.uri(), "untrusted")?; let mut mcp = McpProcess::new(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let start_req = mcp .send_thread_start_request(ThreadStartParams { model: Some("mock-model".to_string()), cwd: Some(workspace.to_string_lossy().into_owned()), ..Default::default() }) .await?; let start_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(start_req)), ) .await??; let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?; let turn_req = mcp .send_turn_start_request(TurnStartParams { thread_id: thread.id.clone(), input: vec![V2UserInput::Text { text: "apply patch".into(), }], cwd: Some(workspace.clone()), ..Default::default() }) .await?; let turn_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(turn_req)), ) .await??; let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?; let started_file_change = timeout(DEFAULT_READ_TIMEOUT, async { loop { let started_notif = mcp .read_stream_until_notification_message("item/started") .await?; let started: ItemStartedNotification = serde_json::from_value(started_notif.params.clone().expect("item/started params"))?; if let ThreadItem::FileChange { .. } = started.item { return Ok::<ThreadItem, anyhow::Error>(started.item); } } }) .await??; let ThreadItem::FileChange { ref id, status, ref changes, } = started_file_change else { unreachable!("loop ensures we break on file change items"); }; assert_eq!(id, "patch-call"); assert_eq!(status, PatchApplyStatus::InProgress); let started_changes = changes.clone(); let server_req = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_request_message(), ) .await??; let ServerRequest::FileChangeRequestApproval { request_id, params } = server_req else { panic!("expected FileChangeRequestApproval request") }; assert_eq!(params.item_id, "patch-call"); assert_eq!(params.thread_id, thread.id); assert_eq!(params.turn_id, turn.id); let expected_readme_path = workspace.join("README.md"); let expected_readme_path_str = expected_readme_path.to_string_lossy().into_owned(); pretty_assertions::assert_eq!( started_changes, vec![codex_app_server_protocol::FileUpdateChange { path: expected_readme_path_str.clone(), kind: PatchChangeKind::Add, diff: "new line\n".to_string(), }] ); mcp.send_response( request_id, serde_json::to_value(FileChangeRequestApprovalResponse { decision: ApprovalDecision::Decline, })?, ) .await?; let completed_file_change = timeout(DEFAULT_READ_TIMEOUT, async { loop { let completed_notif = mcp .read_stream_until_notification_message("item/completed") .await?; let completed: ItemCompletedNotification = serde_json::from_value( completed_notif .params .clone() .expect("item/completed params"), )?; if let ThreadItem::FileChange { .. } = completed.item { return Ok::<ThreadItem, anyhow::Error>(completed.item); } } }) .await??; let ThreadItem::FileChange { ref id, status, .. } = completed_file_change else { unreachable!("loop ensures we break on file change items"); }; assert_eq!(id, "patch-call");
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/v2/thread_list.rs
codex-rs/app-server/tests/suite/v2/thread_list.rs
use anyhow::Result; use app_test_support::McpProcess; use app_test_support::create_fake_rollout; use app_test_support::to_response; use codex_app_server_protocol::GitInfo as ApiGitInfo; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::SessionSource; use codex_app_server_protocol::ThreadListResponse; use codex_protocol::protocol::GitInfo as CoreGitInfo; use std::path::Path; use std::path::PathBuf; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); async fn init_mcp(codex_home: &Path) -> Result<McpProcess> { let mut mcp = McpProcess::new(codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; Ok(mcp) } async fn list_threads( mcp: &mut McpProcess, cursor: Option<String>, limit: Option<u32>, providers: Option<Vec<String>>, ) -> Result<ThreadListResponse> { let request_id = mcp .send_thread_list_request(codex_app_server_protocol::ThreadListParams { cursor, limit, model_providers: providers, }) .await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; to_response::<ThreadListResponse>(resp) } fn create_fake_rollouts<F, G>( codex_home: &Path, count: usize, provider_for_index: F, timestamp_for_index: G, preview: &str, ) -> Result<Vec<String>> where F: Fn(usize) -> &'static str, G: Fn(usize) -> (String, String), { let mut ids = Vec::with_capacity(count); for i in 0..count { let (ts_file, ts_rfc) = timestamp_for_index(i); ids.push(create_fake_rollout( codex_home, &ts_file, &ts_rfc, preview, Some(provider_for_index(i)), None, )?); } Ok(ids) } fn timestamp_at( year: i32, month: u32, day: u32, hour: u32, minute: u32, second: u32, ) -> (String, String) { ( format!("{year:04}-{month:02}-{day:02}T{hour:02}-{minute:02}-{second:02}"), format!("{year:04}-{month:02}-{day:02}T{hour:02}:{minute:02}:{second:02}Z"), ) } #[tokio::test] async fn thread_list_basic_empty() -> Result<()> { let codex_home = TempDir::new()?; create_minimal_config(codex_home.path())?; let mut mcp = init_mcp(codex_home.path()).await?; let ThreadListResponse { data, next_cursor } = list_threads( &mut mcp, None, Some(10), Some(vec!["mock_provider".to_string()]), ) .await?; assert!(data.is_empty()); assert_eq!(next_cursor, None); Ok(()) } // Minimal config.toml for listing. fn create_minimal_config(codex_home: &std::path::Path) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); std::fs::write( config_toml, r#" model = "mock-model" approval_policy = "never" "#, ) } #[tokio::test] async fn thread_list_pagination_next_cursor_none_on_last_page() -> Result<()> { let codex_home = TempDir::new()?; create_minimal_config(codex_home.path())?; // Create three rollouts so we can paginate with limit=2. let _a = create_fake_rollout( codex_home.path(), "2025-01-02T12-00-00", "2025-01-02T12:00:00Z", "Hello", Some("mock_provider"), None, )?; let _b = create_fake_rollout( codex_home.path(), "2025-01-01T13-00-00", "2025-01-01T13:00:00Z", "Hello", Some("mock_provider"), None, )?; let _c = create_fake_rollout( codex_home.path(), "2025-01-01T12-00-00", "2025-01-01T12:00:00Z", "Hello", Some("mock_provider"), None, )?; let mut mcp = init_mcp(codex_home.path()).await?; // Page 1: limit 2 → expect next_cursor Some. let ThreadListResponse { data: data1, next_cursor: cursor1, } = list_threads( &mut mcp, None, Some(2), Some(vec!["mock_provider".to_string()]), ) .await?; assert_eq!(data1.len(), 2); for thread in &data1 { assert_eq!(thread.preview, "Hello"); assert_eq!(thread.model_provider, "mock_provider"); assert!(thread.created_at > 0); assert_eq!(thread.cwd, PathBuf::from("/")); assert_eq!(thread.cli_version, "0.0.0"); assert_eq!(thread.source, SessionSource::Cli); assert_eq!(thread.git_info, None); } let cursor1 = cursor1.expect("expected nextCursor on first page"); // Page 2: with cursor → expect next_cursor None when no more results. let ThreadListResponse { data: data2, next_cursor: cursor2, } = list_threads( &mut mcp, Some(cursor1), Some(2), Some(vec!["mock_provider".to_string()]), ) .await?; assert!(data2.len() <= 2); for thread in &data2 { assert_eq!(thread.preview, "Hello"); assert_eq!(thread.model_provider, "mock_provider"); assert!(thread.created_at > 0); assert_eq!(thread.cwd, PathBuf::from("/")); assert_eq!(thread.cli_version, "0.0.0"); assert_eq!(thread.source, SessionSource::Cli); assert_eq!(thread.git_info, None); } assert_eq!(cursor2, None, "expected nextCursor to be null on last page"); Ok(()) } #[tokio::test] async fn thread_list_respects_provider_filter() -> Result<()> { let codex_home = TempDir::new()?; create_minimal_config(codex_home.path())?; // Create rollouts under two providers. let _a = create_fake_rollout( codex_home.path(), "2025-01-02T10-00-00", "2025-01-02T10:00:00Z", "X", Some("mock_provider"), None, )?; // mock_provider let _b = create_fake_rollout( codex_home.path(), "2025-01-02T11-00-00", "2025-01-02T11:00:00Z", "X", Some("other_provider"), None, )?; let mut mcp = init_mcp(codex_home.path()).await?; // Filter to only other_provider; expect 1 item, nextCursor None. let ThreadListResponse { data, next_cursor } = list_threads( &mut mcp, None, Some(10), Some(vec!["other_provider".to_string()]), ) .await?; assert_eq!(data.len(), 1); assert_eq!(next_cursor, None); let thread = &data[0]; assert_eq!(thread.preview, "X"); assert_eq!(thread.model_provider, "other_provider"); let expected_ts = chrono::DateTime::parse_from_rfc3339("2025-01-02T11:00:00Z")?.timestamp(); assert_eq!(thread.created_at, expected_ts); assert_eq!(thread.cwd, PathBuf::from("/")); assert_eq!(thread.cli_version, "0.0.0"); assert_eq!(thread.source, SessionSource::Cli); assert_eq!(thread.git_info, None); Ok(()) } #[tokio::test] async fn thread_list_fetches_until_limit_or_exhausted() -> Result<()> { let codex_home = TempDir::new()?; create_minimal_config(codex_home.path())?; // Newest 16 conversations belong to a different provider; the older 8 are the // only ones that match the filter. We request 8 so the server must keep // paging past the first two pages to reach the desired count. create_fake_rollouts( codex_home.path(), 24, |i| { if i < 16 { "skip_provider" } else { "target_provider" } }, |i| timestamp_at(2025, 3, 30 - i as u32, 12, 0, 0), "Hello", )?; let mut mcp = init_mcp(codex_home.path()).await?; // Request 8 threads for the target provider; the matches only start on the // third page so we rely on pagination to reach the limit. let ThreadListResponse { data, next_cursor } = list_threads( &mut mcp, None, Some(8), Some(vec!["target_provider".to_string()]), ) .await?; assert_eq!( data.len(), 8, "should keep paging until the requested count is filled" ); assert!( data.iter() .all(|thread| thread.model_provider == "target_provider"), "all returned threads must match the requested provider" ); assert_eq!( next_cursor, None, "once the requested count is satisfied on the final page, nextCursor should be None" ); Ok(()) } #[tokio::test] async fn thread_list_enforces_max_limit() -> Result<()> { let codex_home = TempDir::new()?; create_minimal_config(codex_home.path())?; create_fake_rollouts( codex_home.path(), 105, |_| "mock_provider", |i| { let month = 5 + (i / 28); let day = (i % 28) + 1; timestamp_at(2025, month as u32, day as u32, 0, 0, 0) }, "Hello", )?; let mut mcp = init_mcp(codex_home.path()).await?; let ThreadListResponse { data, next_cursor } = list_threads( &mut mcp, None, Some(200), Some(vec!["mock_provider".to_string()]), ) .await?; assert_eq!( data.len(), 100, "limit should be clamped to the maximum page size" ); assert!( next_cursor.is_some(), "when more than the maximum exist, nextCursor should continue pagination" ); Ok(()) } #[tokio::test] async fn thread_list_stops_when_not_enough_filtered_results_exist() -> Result<()> { let codex_home = TempDir::new()?; create_minimal_config(codex_home.path())?; // Only the last 7 conversations match the provider filter; we ask for 10 to // ensure the server exhausts pagination without looping forever. create_fake_rollouts( codex_home.path(), 22, |i| { if i < 15 { "skip_provider" } else { "target_provider" } }, |i| timestamp_at(2025, 4, 28 - i as u32, 8, 0, 0), "Hello", )?; let mut mcp = init_mcp(codex_home.path()).await?; // Request more threads than exist after filtering; expect all matches to be // returned with nextCursor None. let ThreadListResponse { data, next_cursor } = list_threads( &mut mcp, None, Some(10), Some(vec!["target_provider".to_string()]), ) .await?; assert_eq!( data.len(), 7, "all available filtered threads should be returned" ); assert!( data.iter() .all(|thread| thread.model_provider == "target_provider"), "results should still respect the provider filter" ); assert_eq!( next_cursor, None, "when results are exhausted before reaching the limit, nextCursor should be None" ); Ok(()) } #[tokio::test] async fn thread_list_includes_git_info() -> Result<()> { let codex_home = TempDir::new()?; create_minimal_config(codex_home.path())?; let git_info = CoreGitInfo { commit_hash: Some("abc123".to_string()), branch: Some("main".to_string()), repository_url: Some("https://example.com/repo.git".to_string()), }; let conversation_id = create_fake_rollout( codex_home.path(), "2025-02-01T09-00-00", "2025-02-01T09:00:00Z", "Git info preview", Some("mock_provider"), Some(git_info), )?; let mut mcp = init_mcp(codex_home.path()).await?; let ThreadListResponse { data, .. } = list_threads( &mut mcp, None, Some(10), Some(vec!["mock_provider".to_string()]), ) .await?; let thread = data .iter() .find(|t| t.id == conversation_id) .expect("expected thread for created rollout"); let expected_git = ApiGitInfo { sha: Some("abc123".to_string()), branch: Some("main".to_string()), origin_url: Some("https://example.com/repo.git".to_string()), }; assert_eq!(thread.git_info, Some(expected_git)); assert_eq!(thread.source, SessionSource::Cli); assert_eq!(thread.cwd, PathBuf::from("/")); assert_eq!(thread.cli_version, "0.0.0"); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/v2/rate_limits.rs
codex-rs/app-server/tests/suite/v2/rate_limits.rs
use anyhow::Result; use app_test_support::ChatGptAuthFixture; use app_test_support::McpProcess; use app_test_support::to_response; use app_test_support::write_chatgpt_auth; use codex_app_server_protocol::GetAccountRateLimitsResponse; use codex_app_server_protocol::JSONRPCError; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::LoginApiKeyParams; use codex_app_server_protocol::RateLimitSnapshot; use codex_app_server_protocol::RateLimitWindow; use codex_app_server_protocol::RequestId; use codex_core::auth::AuthCredentialsStoreMode; use codex_protocol::account::PlanType as AccountPlanType; use pretty_assertions::assert_eq; use serde_json::json; use std::path::Path; use tempfile::TempDir; use tokio::time::timeout; use wiremock::Mock; use wiremock::MockServer; use wiremock::ResponseTemplate; use wiremock::matchers::header; use wiremock::matchers::method; use wiremock::matchers::path; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); const INVALID_REQUEST_ERROR_CODE: i64 = -32600; #[tokio::test] async fn get_account_rate_limits_requires_auth() -> Result<()> { let codex_home = TempDir::new()?; let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let request_id = mcp.send_get_account_rate_limits_request().await?; let error: JSONRPCError = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_error_message(RequestId::Integer(request_id)), ) .await??; assert_eq!(error.id, RequestId::Integer(request_id)); assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE); assert_eq!( error.error.message, "codex account authentication required to read rate limits" ); Ok(()) } #[tokio::test] async fn get_account_rate_limits_requires_chatgpt_auth() -> Result<()> { let codex_home = TempDir::new()?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; login_with_api_key(&mut mcp, "sk-test-key").await?; let request_id = mcp.send_get_account_rate_limits_request().await?; let error: JSONRPCError = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_error_message(RequestId::Integer(request_id)), ) .await??; assert_eq!(error.id, RequestId::Integer(request_id)); assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE); assert_eq!( error.error.message, "chatgpt authentication required to read rate limits" ); Ok(()) } #[tokio::test] async fn get_account_rate_limits_returns_snapshot() -> Result<()> { let codex_home = TempDir::new()?; write_chatgpt_auth( codex_home.path(), ChatGptAuthFixture::new("chatgpt-token") .account_id("account-123") .plan_type("pro"), AuthCredentialsStoreMode::File, )?; let server = MockServer::start().await; let server_url = server.uri(); write_chatgpt_base_url(codex_home.path(), &server_url)?; let primary_reset_timestamp = chrono::DateTime::parse_from_rfc3339("2025-01-01T00:02:00Z") .expect("parse primary reset timestamp") .timestamp(); let secondary_reset_timestamp = chrono::DateTime::parse_from_rfc3339("2025-01-01T01:00:00Z") .expect("parse secondary reset timestamp") .timestamp(); let response_body = json!({ "plan_type": "pro", "rate_limit": { "allowed": true, "limit_reached": false, "primary_window": { "used_percent": 42, "limit_window_seconds": 3600, "reset_after_seconds": 120, "reset_at": primary_reset_timestamp, }, "secondary_window": { "used_percent": 5, "limit_window_seconds": 86400, "reset_after_seconds": 43200, "reset_at": secondary_reset_timestamp, } } }); Mock::given(method("GET")) .and(path("/api/codex/usage")) .and(header("authorization", "Bearer chatgpt-token")) .and(header("chatgpt-account-id", "account-123")) .respond_with(ResponseTemplate::new(200).set_body_json(response_body)) .mount(&server) .await; let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let request_id = mcp.send_get_account_rate_limits_request().await?; let response: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let received: GetAccountRateLimitsResponse = to_response(response)?; let expected = GetAccountRateLimitsResponse { rate_limits: RateLimitSnapshot { primary: Some(RateLimitWindow { used_percent: 42, window_duration_mins: Some(60), resets_at: Some(primary_reset_timestamp), }), secondary: Some(RateLimitWindow { used_percent: 5, window_duration_mins: Some(1440), resets_at: Some(secondary_reset_timestamp), }), credits: None, plan_type: Some(AccountPlanType::Pro), }, }; assert_eq!(received, expected); Ok(()) } async fn login_with_api_key(mcp: &mut McpProcess, api_key: &str) -> Result<()> { let request_id = mcp .send_login_api_key_request(LoginApiKeyParams { api_key: api_key.to_string(), }) .await?; timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; Ok(()) } fn write_chatgpt_base_url(codex_home: &Path, base_url: &str) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); std::fs::write(config_toml, format!("chatgpt_base_url = \"{base_url}\"\n")) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/v2/review.rs
codex-rs/app-server/tests/suite/v2/review.rs
use anyhow::Result; use app_test_support::McpProcess; use app_test_support::create_final_assistant_message_sse_response; use app_test_support::create_mock_chat_completions_server_unchecked; use app_test_support::to_response; use codex_app_server_protocol::ItemCompletedNotification; use codex_app_server_protocol::ItemStartedNotification; use codex_app_server_protocol::JSONRPCError; use codex_app_server_protocol::JSONRPCNotification; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::ReviewDelivery; use codex_app_server_protocol::ReviewStartParams; use codex_app_server_protocol::ReviewStartResponse; use codex_app_server_protocol::ReviewTarget; use codex_app_server_protocol::ThreadItem; use codex_app_server_protocol::ThreadStartParams; use codex_app_server_protocol::ThreadStartResponse; use codex_app_server_protocol::TurnStatus; use serde_json::json; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); const INVALID_REQUEST_ERROR_CODE: i64 = -32600; #[tokio::test] async fn review_start_runs_review_turn_and_emits_code_review_item() -> Result<()> { let review_payload = json!({ "findings": [ { "title": "Prefer Stylize helpers", "body": "Use .dim()/.bold() chaining instead of manual Style.", "confidence_score": 0.9, "priority": 1, "code_location": { "absolute_file_path": "/tmp/file.rs", "line_range": {"start": 10, "end": 20} } } ], "overall_correctness": "good", "overall_explanation": "Looks solid overall with minor polish suggested.", "overall_confidence_score": 0.75 }) .to_string(); let responses = vec![create_final_assistant_message_sse_response( &review_payload, )?]; let server = create_mock_chat_completions_server_unchecked(responses).await; let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let thread_id = start_default_thread(&mut mcp).await?; let review_req = mcp .send_review_start_request(ReviewStartParams { thread_id: thread_id.clone(), delivery: Some(ReviewDelivery::Inline), target: ReviewTarget::Commit { sha: "1234567deadbeef".to_string(), title: Some("Tidy UI colors".to_string()), }, }) .await?; let review_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(review_req)), ) .await??; let ReviewStartResponse { turn, review_thread_id, } = to_response::<ReviewStartResponse>(review_resp)?; assert_eq!(review_thread_id, thread_id.clone()); let turn_id = turn.id.clone(); assert_eq!(turn.status, TurnStatus::InProgress); // Confirm we see the EnteredReviewMode marker on the main thread. let mut saw_entered_review_mode = false; for _ in 0..10 { let item_started: JSONRPCNotification = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("item/started"), ) .await??; let started: ItemStartedNotification = serde_json::from_value(item_started.params.expect("params must be present"))?; match started.item { ThreadItem::EnteredReviewMode { id, review } => { assert_eq!(id, turn_id); assert_eq!(review, "commit 1234567: Tidy UI colors"); saw_entered_review_mode = true; break; } _ => continue, } } assert!( saw_entered_review_mode, "did not observe enteredReviewMode item" ); // Confirm we see the ExitedReviewMode marker (with review text) // on the same turn. Ignore any other items the stream surfaces. let mut review_body: Option<String> = None; for _ in 0..10 { let review_notif: JSONRPCNotification = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("item/completed"), ) .await??; let completed: ItemCompletedNotification = serde_json::from_value(review_notif.params.expect("params must be present"))?; match completed.item { ThreadItem::ExitedReviewMode { id, review } => { assert_eq!(id, turn_id); review_body = Some(review); break; } _ => continue, } } let review = review_body.expect("did not observe a code review item"); assert!(review.contains("Prefer Stylize helpers")); assert!(review.contains("/tmp/file.rs:10-20")); Ok(()) } #[tokio::test] async fn review_start_rejects_empty_base_branch() -> Result<()> { let server = create_mock_chat_completions_server_unchecked(vec![]).await; let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let thread_id = start_default_thread(&mut mcp).await?; let request_id = mcp .send_review_start_request(ReviewStartParams { thread_id, delivery: Some(ReviewDelivery::Inline), target: ReviewTarget::BaseBranch { branch: " ".to_string(), }, }) .await?; let error: JSONRPCError = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_error_message(RequestId::Integer(request_id)), ) .await??; assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE); assert!( error.error.message.contains("branch must not be empty"), "unexpected message: {}", error.error.message ); Ok(()) } #[tokio::test] async fn review_start_with_detached_delivery_returns_new_thread_id() -> Result<()> { let review_payload = json!({ "findings": [], "overall_correctness": "ok", "overall_explanation": "detached review", "overall_confidence_score": 0.5 }) .to_string(); let responses = vec![create_final_assistant_message_sse_response( &review_payload, )?]; let server = create_mock_chat_completions_server_unchecked(responses).await; let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let thread_id = start_default_thread(&mut mcp).await?; let review_req = mcp .send_review_start_request(ReviewStartParams { thread_id: thread_id.clone(), delivery: Some(ReviewDelivery::Detached), target: ReviewTarget::Custom { instructions: "detached review".to_string(), }, }) .await?; let review_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(review_req)), ) .await??; let ReviewStartResponse { turn, review_thread_id, } = to_response::<ReviewStartResponse>(review_resp)?; assert_eq!(turn.status, TurnStatus::InProgress); assert_ne!( review_thread_id, thread_id, "detached review should run on a different thread" ); Ok(()) } #[tokio::test] async fn review_start_rejects_empty_commit_sha() -> Result<()> { let server = create_mock_chat_completions_server_unchecked(vec![]).await; let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let thread_id = start_default_thread(&mut mcp).await?; let request_id = mcp .send_review_start_request(ReviewStartParams { thread_id, delivery: Some(ReviewDelivery::Inline), target: ReviewTarget::Commit { sha: "\t".to_string(), title: None, }, }) .await?; let error: JSONRPCError = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_error_message(RequestId::Integer(request_id)), ) .await??; assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE); assert!( error.error.message.contains("sha must not be empty"), "unexpected message: {}", error.error.message ); Ok(()) } #[tokio::test] async fn review_start_rejects_empty_custom_instructions() -> Result<()> { let server = create_mock_chat_completions_server_unchecked(vec![]).await; let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), &server.uri())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let thread_id = start_default_thread(&mut mcp).await?; let request_id = mcp .send_review_start_request(ReviewStartParams { thread_id, delivery: Some(ReviewDelivery::Inline), target: ReviewTarget::Custom { instructions: "\n\n".to_string(), }, }) .await?; let error: JSONRPCError = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_error_message(RequestId::Integer(request_id)), ) .await??; assert_eq!(error.error.code, INVALID_REQUEST_ERROR_CODE); assert!( error .error .message .contains("instructions must not be empty"), "unexpected message: {}", error.error.message ); Ok(()) } async fn start_default_thread(mcp: &mut McpProcess) -> Result<String> { let thread_req = mcp .send_thread_start_request(ThreadStartParams { model: Some("mock-model".to_string()), ..Default::default() }) .await?; let thread_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(thread_req)), ) .await??; let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?; Ok(thread.id) } fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); std::fs::write( config_toml, format!( r#" model = "mock-model" approval_policy = "never" sandbox_mode = "read-only" model_provider = "mock_provider" [model_providers.mock_provider] name = "Mock provider" base_url = "{server_uri}/v1" wire_api = "chat" request_max_retries = 0 stream_max_retries = 0 "# ), ) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/suite/v2/account.rs
codex-rs/app-server/tests/suite/v2/account.rs
use anyhow::Result; use anyhow::bail; use app_test_support::McpProcess; use app_test_support::to_response; use app_test_support::ChatGptAuthFixture; use app_test_support::write_chatgpt_auth; use codex_app_server_protocol::Account; use codex_app_server_protocol::AuthMode; use codex_app_server_protocol::CancelLoginAccountParams; use codex_app_server_protocol::CancelLoginAccountResponse; use codex_app_server_protocol::GetAccountParams; use codex_app_server_protocol::GetAccountResponse; use codex_app_server_protocol::JSONRPCError; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::LoginAccountResponse; use codex_app_server_protocol::LogoutAccountResponse; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::ServerNotification; use codex_core::auth::AuthCredentialsStoreMode; use codex_login::login_with_api_key; use codex_protocol::account::PlanType as AccountPlanType; use pretty_assertions::assert_eq; use serial_test::serial; use std::path::Path; use std::time::Duration; use tempfile::TempDir; use tokio::time::timeout; const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); // Helper to create a minimal config.toml for the app server #[derive(Default)] struct CreateConfigTomlParams { forced_method: Option<String>, forced_workspace_id: Option<String>, requires_openai_auth: Option<bool>, } fn create_config_toml(codex_home: &Path, params: CreateConfigTomlParams) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); let forced_line = if let Some(method) = params.forced_method { format!("forced_login_method = \"{method}\"\n") } else { String::new() }; let forced_workspace_line = if let Some(ws) = params.forced_workspace_id { format!("forced_chatgpt_workspace_id = \"{ws}\"\n") } else { String::new() }; let requires_line = match params.requires_openai_auth { Some(true) => "requires_openai_auth = true\n".to_string(), Some(false) => String::new(), None => String::new(), }; let contents = format!( r#" model = "mock-model" approval_policy = "never" sandbox_mode = "danger-full-access" {forced_line} {forced_workspace_line} model_provider = "mock_provider" [model_providers.mock_provider] name = "Mock provider for test" base_url = "http://127.0.0.1:0/v1" wire_api = "chat" request_max_retries = 0 stream_max_retries = 0 {requires_line} "# ); std::fs::write(config_toml, contents) } #[tokio::test] async fn logout_account_removes_auth_and_notifies() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?; login_with_api_key( codex_home.path(), "sk-test-key", AuthCredentialsStoreMode::File, )?; assert!(codex_home.path().join("auth.json").exists()); let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let id = mcp.send_logout_account_request().await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(id)), ) .await??; let _ok: LogoutAccountResponse = to_response(resp)?; let note = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("account/updated"), ) .await??; let parsed: ServerNotification = note.try_into()?; let ServerNotification::AccountUpdated(payload) = parsed else { bail!("unexpected notification: {parsed:?}"); }; assert!( payload.auth_mode.is_none(), "auth_method should be None after logout" ); assert!( !codex_home.path().join("auth.json").exists(), "auth.json should be deleted" ); let get_id = mcp .send_get_account_request(GetAccountParams { refresh_token: false, }) .await?; let get_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(get_id)), ) .await??; let account: GetAccountResponse = to_response(get_resp)?; assert_eq!(account.account, None); Ok(()) } #[tokio::test] async fn login_account_api_key_succeeds_and_notifies() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let req_id = mcp .send_login_account_api_key_request("sk-test-key") .await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(req_id)), ) .await??; let login: LoginAccountResponse = to_response(resp)?; assert_eq!(login, LoginAccountResponse::ApiKey {}); let note = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("account/login/completed"), ) .await??; let parsed: ServerNotification = note.try_into()?; let ServerNotification::AccountLoginCompleted(payload) = parsed else { bail!("unexpected notification: {parsed:?}"); }; pretty_assertions::assert_eq!(payload.login_id, None); pretty_assertions::assert_eq!(payload.success, true); pretty_assertions::assert_eq!(payload.error, None); let note = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("account/updated"), ) .await??; let parsed: ServerNotification = note.try_into()?; let ServerNotification::AccountUpdated(payload) = parsed else { bail!("unexpected notification: {parsed:?}"); }; pretty_assertions::assert_eq!(payload.auth_mode, Some(AuthMode::ApiKey)); assert!(codex_home.path().join("auth.json").exists()); Ok(()) } #[tokio::test] async fn login_account_api_key_rejected_when_forced_chatgpt() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml( codex_home.path(), CreateConfigTomlParams { forced_method: Some("chatgpt".to_string()), ..Default::default() }, )?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let request_id = mcp .send_login_account_api_key_request("sk-test-key") .await?; let err: JSONRPCError = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_error_message(RequestId::Integer(request_id)), ) .await??; assert_eq!( err.error.message, "API key login is disabled. Use ChatGPT login instead." ); Ok(()) } #[tokio::test] async fn login_account_chatgpt_rejected_when_forced_api() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml( codex_home.path(), CreateConfigTomlParams { forced_method: Some("api".to_string()), ..Default::default() }, )?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let request_id = mcp.send_login_account_chatgpt_request().await?; let err: JSONRPCError = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_error_message(RequestId::Integer(request_id)), ) .await??; assert_eq!( err.error.message, "ChatGPT login is disabled. Use API key login instead." ); Ok(()) } #[tokio::test] // Serialize tests that launch the login server since it binds to a fixed port. #[serial(login_port)] async fn login_account_chatgpt_start_can_be_cancelled() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let request_id = mcp.send_login_account_chatgpt_request().await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let login: LoginAccountResponse = to_response(resp)?; let LoginAccountResponse::Chatgpt { login_id, auth_url } = login else { bail!("unexpected login response: {login:?}"); }; assert!( auth_url.contains("redirect_uri=http%3A%2F%2Flocalhost"), "auth_url should contain a redirect_uri to localhost" ); let cancel_id = mcp .send_cancel_login_account_request(CancelLoginAccountParams { login_id: login_id.clone(), }) .await?; let cancel_resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(cancel_id)), ) .await??; let _ok: CancelLoginAccountResponse = to_response(cancel_resp)?; let note = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_notification_message("account/login/completed"), ) .await??; let parsed: ServerNotification = note.try_into()?; let ServerNotification::AccountLoginCompleted(payload) = parsed else { bail!("unexpected notification: {parsed:?}"); }; pretty_assertions::assert_eq!(payload.login_id, Some(login_id)); pretty_assertions::assert_eq!(payload.success, false); assert!( payload.error.is_some(), "expected a non-empty error on cancel" ); let maybe_updated = timeout( Duration::from_millis(500), mcp.read_stream_until_notification_message("account/updated"), ) .await; assert!( maybe_updated.is_err(), "account/updated should not be emitted when login is cancelled" ); Ok(()) } #[tokio::test] // Serialize tests that launch the login server since it binds to a fixed port. #[serial(login_port)] async fn login_account_chatgpt_includes_forced_workspace_query_param() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml( codex_home.path(), CreateConfigTomlParams { forced_workspace_id: Some("ws-forced".to_string()), ..Default::default() }, )?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let request_id = mcp.send_login_account_chatgpt_request().await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let login: LoginAccountResponse = to_response(resp)?; let LoginAccountResponse::Chatgpt { auth_url, .. } = login else { bail!("unexpected login response: {login:?}"); }; assert!( auth_url.contains("allowed_workspace_id=ws-forced"), "auth URL should include forced workspace" ); Ok(()) } #[tokio::test] async fn get_account_no_auth() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml( codex_home.path(), CreateConfigTomlParams { requires_openai_auth: Some(true), ..Default::default() }, )?; let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let params = GetAccountParams { refresh_token: false, }; let request_id = mcp.send_get_account_request(params).await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let account: GetAccountResponse = to_response(resp)?; assert_eq!(account.account, None, "expected no account"); assert_eq!(account.requires_openai_auth, true); Ok(()) } #[tokio::test] async fn get_account_with_api_key() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml( codex_home.path(), CreateConfigTomlParams { requires_openai_auth: Some(true), ..Default::default() }, )?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let req_id = mcp .send_login_account_api_key_request("sk-test-key") .await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(req_id)), ) .await??; let _login_ok = to_response::<LoginAccountResponse>(resp)?; let params = GetAccountParams { refresh_token: false, }; let request_id = mcp.send_get_account_request(params).await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let received: GetAccountResponse = to_response(resp)?; let expected = GetAccountResponse { account: Some(Account::ApiKey {}), requires_openai_auth: true, }; assert_eq!(received, expected); Ok(()) } #[tokio::test] async fn get_account_when_auth_not_required() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml( codex_home.path(), CreateConfigTomlParams { requires_openai_auth: Some(false), ..Default::default() }, )?; let mut mcp = McpProcess::new(codex_home.path()).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let params = GetAccountParams { refresh_token: false, }; let request_id = mcp.send_get_account_request(params).await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let received: GetAccountResponse = to_response(resp)?; let expected = GetAccountResponse { account: None, requires_openai_auth: false, }; assert_eq!(received, expected); Ok(()) } #[tokio::test] async fn get_account_with_chatgpt() -> Result<()> { let codex_home = TempDir::new()?; create_config_toml( codex_home.path(), CreateConfigTomlParams { requires_openai_auth: Some(true), ..Default::default() }, )?; write_chatgpt_auth( codex_home.path(), ChatGptAuthFixture::new("access-chatgpt") .email("user@example.com") .plan_type("pro"), AuthCredentialsStoreMode::File, )?; let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; let params = GetAccountParams { refresh_token: false, }; let request_id = mcp.send_get_account_request(params).await?; let resp: JSONRPCResponse = timeout( DEFAULT_READ_TIMEOUT, mcp.read_stream_until_response_message(RequestId::Integer(request_id)), ) .await??; let received: GetAccountResponse = to_response(resp)?; let expected = GetAccountResponse { account: Some(Account::Chatgpt { email: "user@example.com".to_string(), plan_type: AccountPlanType::Pro, }), requires_openai_auth: true, }; assert_eq!(received, expected); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/common/responses.rs
codex-rs/app-server/tests/common/responses.rs
use serde_json::json; use std::path::Path; pub fn create_shell_command_sse_response( command: Vec<String>, workdir: Option<&Path>, timeout_ms: Option<u64>, call_id: &str, ) -> anyhow::Result<String> { // The `arguments` for the `shell_command` tool is a serialized JSON object. let command_str = shlex::try_join(command.iter().map(String::as_str))?; let tool_call_arguments = serde_json::to_string(&json!({ "command": command_str, "workdir": workdir.map(|w| w.to_string_lossy()), "timeout_ms": timeout_ms }))?; let tool_call = json!({ "choices": [ { "delta": { "tool_calls": [ { "id": call_id, "function": { "name": "shell_command", "arguments": tool_call_arguments } } ] }, "finish_reason": "tool_calls" } ] }); let sse = format!( "data: {}\n\ndata: DONE\n\n", serde_json::to_string(&tool_call)? ); Ok(sse) } pub fn create_final_assistant_message_sse_response(message: &str) -> anyhow::Result<String> { let assistant_message = json!({ "choices": [ { "delta": { "content": message }, "finish_reason": "stop" } ] }); let sse = format!( "data: {}\n\ndata: DONE\n\n", serde_json::to_string(&assistant_message)? ); Ok(sse) } pub fn create_apply_patch_sse_response( patch_content: &str, call_id: &str, ) -> anyhow::Result<String> { // Use shell_command to call apply_patch with heredoc format let command = format!("apply_patch <<'EOF'\n{patch_content}\nEOF"); let tool_call_arguments = serde_json::to_string(&json!({ "command": command }))?; let tool_call = json!({ "choices": [ { "delta": { "tool_calls": [ { "id": call_id, "function": { "name": "shell_command", "arguments": tool_call_arguments } } ] }, "finish_reason": "tool_calls" } ] }); let sse = format!( "data: {}\n\ndata: DONE\n\n", serde_json::to_string(&tool_call)? ); Ok(sse) } pub fn create_exec_command_sse_response(call_id: &str) -> anyhow::Result<String> { let (cmd, args) = if cfg!(windows) { ("cmd.exe", vec!["/d", "/c", "echo hi"]) } else { ("/bin/sh", vec!["-c", "echo hi"]) }; let command = std::iter::once(cmd.to_string()) .chain(args.into_iter().map(str::to_string)) .collect::<Vec<_>>(); let tool_call_arguments = serde_json::to_string(&json!({ "cmd": command.join(" "), "yield_time_ms": 500 }))?; let tool_call = json!({ "choices": [ { "delta": { "tool_calls": [ { "id": call_id, "function": { "name": "exec_command", "arguments": tool_call_arguments } } ] }, "finish_reason": "tool_calls" } ] }); let sse = format!( "data: {}\n\ndata: DONE\n\n", serde_json::to_string(&tool_call)? ); Ok(sse) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/common/lib.rs
codex-rs/app-server/tests/common/lib.rs
mod auth_fixtures; mod mcp_process; mod mock_model_server; mod models_cache; mod responses; mod rollout; pub use auth_fixtures::ChatGptAuthFixture; pub use auth_fixtures::ChatGptIdTokenClaims; pub use auth_fixtures::encode_id_token; pub use auth_fixtures::write_chatgpt_auth; use codex_app_server_protocol::JSONRPCResponse; pub use core_test_support::format_with_current_shell; pub use core_test_support::format_with_current_shell_display; pub use core_test_support::format_with_current_shell_display_non_login; pub use core_test_support::format_with_current_shell_non_login; pub use core_test_support::test_path_buf_with_windows; pub use core_test_support::test_tmp_path; pub use core_test_support::test_tmp_path_buf; pub use mcp_process::McpProcess; pub use mock_model_server::create_mock_chat_completions_server; pub use mock_model_server::create_mock_chat_completions_server_unchecked; pub use models_cache::write_models_cache; pub use models_cache::write_models_cache_with_models; pub use responses::create_apply_patch_sse_response; pub use responses::create_exec_command_sse_response; pub use responses::create_final_assistant_message_sse_response; pub use responses::create_shell_command_sse_response; pub use rollout::create_fake_rollout; use serde::de::DeserializeOwned; pub fn to_response<T: DeserializeOwned>(response: JSONRPCResponse) -> anyhow::Result<T> { let value = serde_json::to_value(response.result)?; let codex_response = serde_json::from_value(value)?; Ok(codex_response) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/common/rollout.rs
codex-rs/app-server/tests/common/rollout.rs
use anyhow::Result; use codex_protocol::ConversationId; use codex_protocol::protocol::GitInfo; use codex_protocol::protocol::SessionMeta; use codex_protocol::protocol::SessionMetaLine; use codex_protocol::protocol::SessionSource; use serde_json::json; use std::fs; use std::path::Path; use std::path::PathBuf; use uuid::Uuid; /// Create a minimal rollout file under `CODEX_HOME/sessions/YYYY/MM/DD/`. /// /// - `filename_ts` is the filename timestamp component in `YYYY-MM-DDThh-mm-ss` format. /// - `meta_rfc3339` is the envelope timestamp used in JSON lines. /// - `preview` is the user message preview text. /// - `model_provider` optionally sets the provider in the session meta payload. /// /// Returns the generated conversation/session UUID as a string. pub fn create_fake_rollout( codex_home: &Path, filename_ts: &str, meta_rfc3339: &str, preview: &str, model_provider: Option<&str>, git_info: Option<GitInfo>, ) -> Result<String> { let uuid = Uuid::new_v4(); let uuid_str = uuid.to_string(); let conversation_id = ConversationId::from_string(&uuid_str)?; // sessions/YYYY/MM/DD derived from filename_ts (YYYY-MM-DDThh-mm-ss) let year = &filename_ts[0..4]; let month = &filename_ts[5..7]; let day = &filename_ts[8..10]; let dir = codex_home.join("sessions").join(year).join(month).join(day); fs::create_dir_all(&dir)?; let file_path = dir.join(format!("rollout-{filename_ts}-{uuid}.jsonl")); // Build JSONL lines let meta = SessionMeta { id: conversation_id, timestamp: meta_rfc3339.to_string(), cwd: PathBuf::from("/"), originator: "codex".to_string(), cli_version: "0.0.0".to_string(), instructions: None, source: SessionSource::Cli, model_provider: model_provider.map(str::to_string), }; let payload = serde_json::to_value(SessionMetaLine { meta, git: git_info, })?; let lines = [ json!({ "timestamp": meta_rfc3339, "type": "session_meta", "payload": payload }) .to_string(), json!({ "timestamp": meta_rfc3339, "type":"response_item", "payload": { "type":"message", "role":"user", "content":[{"type":"input_text","text": preview}] } }) .to_string(), json!({ "timestamp": meta_rfc3339, "type":"event_msg", "payload": { "type":"user_message", "message": preview, "kind": "plain" } }) .to_string(), ]; fs::write(file_path, lines.join("\n") + "\n")?; Ok(uuid_str) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/common/auth_fixtures.rs
codex-rs/app-server/tests/common/auth_fixtures.rs
use std::path::Path; use anyhow::Context; use anyhow::Result; use base64::Engine; use base64::engine::general_purpose::URL_SAFE_NO_PAD; use chrono::DateTime; use chrono::Utc; use codex_core::auth::AuthCredentialsStoreMode; use codex_core::auth::AuthDotJson; use codex_core::auth::save_auth; use codex_core::token_data::TokenData; use codex_core::token_data::parse_id_token; use serde_json::json; /// Builder for writing a fake ChatGPT auth.json in tests. #[derive(Debug, Clone)] pub struct ChatGptAuthFixture { access_token: String, refresh_token: String, account_id: Option<String>, claims: ChatGptIdTokenClaims, last_refresh: Option<Option<DateTime<Utc>>>, } impl ChatGptAuthFixture { pub fn new(access_token: impl Into<String>) -> Self { Self { access_token: access_token.into(), refresh_token: "refresh-token".to_string(), account_id: None, claims: ChatGptIdTokenClaims::default(), last_refresh: None, } } pub fn refresh_token(mut self, refresh_token: impl Into<String>) -> Self { self.refresh_token = refresh_token.into(); self } pub fn account_id(mut self, account_id: impl Into<String>) -> Self { self.account_id = Some(account_id.into()); self } pub fn plan_type(mut self, plan_type: impl Into<String>) -> Self { self.claims.plan_type = Some(plan_type.into()); self } pub fn email(mut self, email: impl Into<String>) -> Self { self.claims.email = Some(email.into()); self } pub fn last_refresh(mut self, last_refresh: Option<DateTime<Utc>>) -> Self { self.last_refresh = Some(last_refresh); self } pub fn claims(mut self, claims: ChatGptIdTokenClaims) -> Self { self.claims = claims; self } } #[derive(Debug, Clone, Default)] pub struct ChatGptIdTokenClaims { pub email: Option<String>, pub plan_type: Option<String>, } impl ChatGptIdTokenClaims { pub fn new() -> Self { Self::default() } pub fn email(mut self, email: impl Into<String>) -> Self { self.email = Some(email.into()); self } pub fn plan_type(mut self, plan_type: impl Into<String>) -> Self { self.plan_type = Some(plan_type.into()); self } } pub fn encode_id_token(claims: &ChatGptIdTokenClaims) -> Result<String> { let header = json!({ "alg": "none", "typ": "JWT" }); let mut payload = serde_json::Map::new(); if let Some(email) = &claims.email { payload.insert("email".to_string(), json!(email)); } if let Some(plan_type) = &claims.plan_type { payload.insert( "https://api.openai.com/auth".to_string(), json!({ "chatgpt_plan_type": plan_type }), ); } let payload = serde_json::Value::Object(payload); let header_b64 = URL_SAFE_NO_PAD.encode(serde_json::to_vec(&header).context("serialize jwt header")?); let payload_b64 = URL_SAFE_NO_PAD.encode(serde_json::to_vec(&payload).context("serialize jwt payload")?); let signature_b64 = URL_SAFE_NO_PAD.encode(b"signature"); Ok(format!("{header_b64}.{payload_b64}.{signature_b64}")) } pub fn write_chatgpt_auth( codex_home: &Path, fixture: ChatGptAuthFixture, cli_auth_credentials_store_mode: AuthCredentialsStoreMode, ) -> Result<()> { let id_token_raw = encode_id_token(&fixture.claims)?; let id_token = parse_id_token(&id_token_raw).context("parse id token")?; let tokens = TokenData { id_token, access_token: fixture.access_token, refresh_token: fixture.refresh_token, account_id: fixture.account_id, }; let last_refresh = fixture.last_refresh.unwrap_or_else(|| Some(Utc::now())); let auth = AuthDotJson { openai_api_key: None, tokens: Some(tokens), last_refresh, }; save_auth(codex_home, &auth, cli_auth_credentials_store_mode).context("write auth.json") }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/common/mcp_process.rs
codex-rs/app-server/tests/common/mcp_process.rs
use std::collections::VecDeque; use std::path::Path; use std::process::Stdio; use std::sync::atomic::AtomicI64; use std::sync::atomic::Ordering; use tokio::io::AsyncBufReadExt; use tokio::io::AsyncWriteExt; use tokio::io::BufReader; use tokio::process::Child; use tokio::process::ChildStdin; use tokio::process::ChildStdout; use anyhow::Context; use codex_app_server_protocol::AddConversationListenerParams; use codex_app_server_protocol::ArchiveConversationParams; use codex_app_server_protocol::CancelLoginAccountParams; use codex_app_server_protocol::CancelLoginChatGptParams; use codex_app_server_protocol::ClientInfo; use codex_app_server_protocol::ClientNotification; use codex_app_server_protocol::ConfigBatchWriteParams; use codex_app_server_protocol::ConfigReadParams; use codex_app_server_protocol::ConfigValueWriteParams; use codex_app_server_protocol::FeedbackUploadParams; use codex_app_server_protocol::GetAccountParams; use codex_app_server_protocol::GetAuthStatusParams; use codex_app_server_protocol::InitializeParams; use codex_app_server_protocol::InterruptConversationParams; use codex_app_server_protocol::JSONRPCError; use codex_app_server_protocol::JSONRPCMessage; use codex_app_server_protocol::JSONRPCNotification; use codex_app_server_protocol::JSONRPCRequest; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::ListConversationsParams; use codex_app_server_protocol::LoginApiKeyParams; use codex_app_server_protocol::ModelListParams; use codex_app_server_protocol::NewConversationParams; use codex_app_server_protocol::RemoveConversationListenerParams; use codex_app_server_protocol::RequestId; use codex_app_server_protocol::ResumeConversationParams; use codex_app_server_protocol::ReviewStartParams; use codex_app_server_protocol::SendUserMessageParams; use codex_app_server_protocol::SendUserTurnParams; use codex_app_server_protocol::ServerRequest; use codex_app_server_protocol::SetDefaultModelParams; use codex_app_server_protocol::ThreadArchiveParams; use codex_app_server_protocol::ThreadListParams; use codex_app_server_protocol::ThreadResumeParams; use codex_app_server_protocol::ThreadStartParams; use codex_app_server_protocol::TurnInterruptParams; use codex_app_server_protocol::TurnStartParams; use tokio::process::Command; pub struct McpProcess { next_request_id: AtomicI64, /// Retain this child process until the client is dropped. The Tokio runtime /// will make a "best effort" to reap the process after it exits, but it is /// not a guarantee. See the `kill_on_drop` documentation for details. #[allow(dead_code)] process: Child, stdin: ChildStdin, stdout: BufReader<ChildStdout>, pending_user_messages: VecDeque<JSONRPCNotification>, } impl McpProcess { pub async fn new(codex_home: &Path) -> anyhow::Result<Self> { Self::new_with_env(codex_home, &[]).await } /// Creates a new MCP process, allowing tests to override or remove /// specific environment variables for the child process only. /// /// Pass a tuple of (key, Some(value)) to set/override, or (key, None) to /// remove a variable from the child's environment. pub async fn new_with_env( codex_home: &Path, env_overrides: &[(&str, Option<&str>)], ) -> anyhow::Result<Self> { let program = codex_utils_cargo_bin::cargo_bin("codex-app-server") .context("should find binary for codex-app-server")?; let mut cmd = Command::new(program); cmd.stdin(Stdio::piped()); cmd.stdout(Stdio::piped()); cmd.stderr(Stdio::piped()); cmd.env("CODEX_HOME", codex_home); cmd.env("RUST_LOG", "debug"); for (k, v) in env_overrides { match v { Some(val) => { cmd.env(k, val); } None => { cmd.env_remove(k); } } } let mut process = cmd .kill_on_drop(true) .spawn() .context("codex-mcp-server proc should start")?; let stdin = process .stdin .take() .ok_or_else(|| anyhow::format_err!("mcp should have stdin fd"))?; let stdout = process .stdout .take() .ok_or_else(|| anyhow::format_err!("mcp should have stdout fd"))?; let stdout = BufReader::new(stdout); // Forward child's stderr to our stderr so failures are visible even // when stdout/stderr are captured by the test harness. if let Some(stderr) = process.stderr.take() { let mut stderr_reader = BufReader::new(stderr).lines(); tokio::spawn(async move { while let Ok(Some(line)) = stderr_reader.next_line().await { eprintln!("[mcp stderr] {line}"); } }); } Ok(Self { next_request_id: AtomicI64::new(0), process, stdin, stdout, pending_user_messages: VecDeque::new(), }) } /// Performs the initialization handshake with the MCP server. pub async fn initialize(&mut self) -> anyhow::Result<()> { let params = Some(serde_json::to_value(InitializeParams { client_info: ClientInfo { name: "codex-app-server-tests".to_string(), title: None, version: "0.1.0".to_string(), }, })?); let req_id = self.send_request("initialize", params).await?; let initialized = self.read_jsonrpc_message().await?; let JSONRPCMessage::Response(response) = initialized else { unreachable!("expected JSONRPCMessage::Response for initialize, got {initialized:?}"); }; if response.id != RequestId::Integer(req_id) { anyhow::bail!( "initialize response id mismatch: expected {}, got {:?}", req_id, response.id ); } // Send notifications/initialized to ack the response. self.send_notification(ClientNotification::Initialized) .await?; Ok(()) } /// Send a `newConversation` JSON-RPC request. pub async fn send_new_conversation_request( &mut self, params: NewConversationParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("newConversation", params).await } /// Send an `archiveConversation` JSON-RPC request. pub async fn send_archive_conversation_request( &mut self, params: ArchiveConversationParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("archiveConversation", params).await } /// Send an `addConversationListener` JSON-RPC request. pub async fn send_add_conversation_listener_request( &mut self, params: AddConversationListenerParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("addConversationListener", params).await } /// Send a `sendUserMessage` JSON-RPC request with a single text item. pub async fn send_send_user_message_request( &mut self, params: SendUserMessageParams, ) -> anyhow::Result<i64> { // Wire format expects variants in camelCase; text item uses external tagging. let params = Some(serde_json::to_value(params)?); self.send_request("sendUserMessage", params).await } /// Send a `removeConversationListener` JSON-RPC request. pub async fn send_remove_conversation_listener_request( &mut self, params: RemoveConversationListenerParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("removeConversationListener", params) .await } /// Send a `sendUserTurn` JSON-RPC request. pub async fn send_send_user_turn_request( &mut self, params: SendUserTurnParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("sendUserTurn", params).await } /// Send a `interruptConversation` JSON-RPC request. pub async fn send_interrupt_conversation_request( &mut self, params: InterruptConversationParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("interruptConversation", params).await } /// Send a `getAuthStatus` JSON-RPC request. pub async fn send_get_auth_status_request( &mut self, params: GetAuthStatusParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("getAuthStatus", params).await } /// Send a `getUserSavedConfig` JSON-RPC request. pub async fn send_get_user_saved_config_request(&mut self) -> anyhow::Result<i64> { self.send_request("getUserSavedConfig", None).await } /// Send a `getUserAgent` JSON-RPC request. pub async fn send_get_user_agent_request(&mut self) -> anyhow::Result<i64> { self.send_request("getUserAgent", None).await } /// Send an `account/rateLimits/read` JSON-RPC request. pub async fn send_get_account_rate_limits_request(&mut self) -> anyhow::Result<i64> { self.send_request("account/rateLimits/read", None).await } /// Send an `account/read` JSON-RPC request. pub async fn send_get_account_request( &mut self, params: GetAccountParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("account/read", params).await } /// Send a `feedback/upload` JSON-RPC request. pub async fn send_feedback_upload_request( &mut self, params: FeedbackUploadParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("feedback/upload", params).await } /// Send a `userInfo` JSON-RPC request. pub async fn send_user_info_request(&mut self) -> anyhow::Result<i64> { self.send_request("userInfo", None).await } /// Send a `setDefaultModel` JSON-RPC request. pub async fn send_set_default_model_request( &mut self, params: SetDefaultModelParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("setDefaultModel", params).await } /// Send a `listConversations` JSON-RPC request. pub async fn send_list_conversations_request( &mut self, params: ListConversationsParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("listConversations", params).await } /// Send a `thread/start` JSON-RPC request. pub async fn send_thread_start_request( &mut self, params: ThreadStartParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("thread/start", params).await } /// Send a `thread/resume` JSON-RPC request. pub async fn send_thread_resume_request( &mut self, params: ThreadResumeParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("thread/resume", params).await } /// Send a `thread/archive` JSON-RPC request. pub async fn send_thread_archive_request( &mut self, params: ThreadArchiveParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("thread/archive", params).await } /// Send a `thread/list` JSON-RPC request. pub async fn send_thread_list_request( &mut self, params: ThreadListParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("thread/list", params).await } /// Send a `model/list` JSON-RPC request. pub async fn send_list_models_request( &mut self, params: ModelListParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("model/list", params).await } /// Send a `resumeConversation` JSON-RPC request. pub async fn send_resume_conversation_request( &mut self, params: ResumeConversationParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("resumeConversation", params).await } /// Send a `loginApiKey` JSON-RPC request. pub async fn send_login_api_key_request( &mut self, params: LoginApiKeyParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("loginApiKey", params).await } /// Send a `loginChatGpt` JSON-RPC request. pub async fn send_login_chat_gpt_request(&mut self) -> anyhow::Result<i64> { self.send_request("loginChatGpt", None).await } /// Send a `turn/start` JSON-RPC request (v2). pub async fn send_turn_start_request( &mut self, params: TurnStartParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("turn/start", params).await } /// Send a `turn/interrupt` JSON-RPC request (v2). pub async fn send_turn_interrupt_request( &mut self, params: TurnInterruptParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("turn/interrupt", params).await } /// Send a `review/start` JSON-RPC request (v2). pub async fn send_review_start_request( &mut self, params: ReviewStartParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("review/start", params).await } /// Send a `cancelLoginChatGpt` JSON-RPC request. pub async fn send_cancel_login_chat_gpt_request( &mut self, params: CancelLoginChatGptParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("cancelLoginChatGpt", params).await } /// Send a `logoutChatGpt` JSON-RPC request. pub async fn send_logout_chat_gpt_request(&mut self) -> anyhow::Result<i64> { self.send_request("logoutChatGpt", None).await } pub async fn send_config_read_request( &mut self, params: ConfigReadParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("config/read", params).await } pub async fn send_config_value_write_request( &mut self, params: ConfigValueWriteParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("config/value/write", params).await } pub async fn send_config_batch_write_request( &mut self, params: ConfigBatchWriteParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("config/batchWrite", params).await } /// Send an `account/logout` JSON-RPC request. pub async fn send_logout_account_request(&mut self) -> anyhow::Result<i64> { self.send_request("account/logout", None).await } /// Send an `account/login/start` JSON-RPC request for API key login. pub async fn send_login_account_api_key_request( &mut self, api_key: &str, ) -> anyhow::Result<i64> { let params = serde_json::json!({ "type": "apiKey", "apiKey": api_key, }); self.send_request("account/login/start", Some(params)).await } /// Send an `account/login/start` JSON-RPC request for ChatGPT login. pub async fn send_login_account_chatgpt_request(&mut self) -> anyhow::Result<i64> { let params = serde_json::json!({ "type": "chatgpt" }); self.send_request("account/login/start", Some(params)).await } /// Send an `account/login/cancel` JSON-RPC request. pub async fn send_cancel_login_account_request( &mut self, params: CancelLoginAccountParams, ) -> anyhow::Result<i64> { let params = Some(serde_json::to_value(params)?); self.send_request("account/login/cancel", params).await } /// Send a `fuzzyFileSearch` JSON-RPC request. pub async fn send_fuzzy_file_search_request( &mut self, query: &str, roots: Vec<String>, cancellation_token: Option<String>, ) -> anyhow::Result<i64> { let mut params = serde_json::json!({ "query": query, "roots": roots, }); if let Some(token) = cancellation_token { params["cancellationToken"] = serde_json::json!(token); } self.send_request("fuzzyFileSearch", Some(params)).await } async fn send_request( &mut self, method: &str, params: Option<serde_json::Value>, ) -> anyhow::Result<i64> { let request_id = self.next_request_id.fetch_add(1, Ordering::Relaxed); let message = JSONRPCMessage::Request(JSONRPCRequest { id: RequestId::Integer(request_id), method: method.to_string(), params, }); self.send_jsonrpc_message(message).await?; Ok(request_id) } pub async fn send_response( &mut self, id: RequestId, result: serde_json::Value, ) -> anyhow::Result<()> { self.send_jsonrpc_message(JSONRPCMessage::Response(JSONRPCResponse { id, result })) .await } pub async fn send_notification( &mut self, notification: ClientNotification, ) -> anyhow::Result<()> { let value = serde_json::to_value(notification)?; self.send_jsonrpc_message(JSONRPCMessage::Notification(JSONRPCNotification { method: value .get("method") .and_then(|m| m.as_str()) .ok_or_else(|| anyhow::format_err!("notification missing method field"))? .to_string(), params: value.get("params").cloned(), })) .await } async fn send_jsonrpc_message(&mut self, message: JSONRPCMessage) -> anyhow::Result<()> { eprintln!("writing message to stdin: {message:?}"); let payload = serde_json::to_string(&message)?; self.stdin.write_all(payload.as_bytes()).await?; self.stdin.write_all(b"\n").await?; self.stdin.flush().await?; Ok(()) } async fn read_jsonrpc_message(&mut self) -> anyhow::Result<JSONRPCMessage> { let mut line = String::new(); self.stdout.read_line(&mut line).await?; let message = serde_json::from_str::<JSONRPCMessage>(&line)?; eprintln!("read message from stdout: {message:?}"); Ok(message) } pub async fn read_stream_until_request_message(&mut self) -> anyhow::Result<ServerRequest> { eprintln!("in read_stream_until_request_message()"); loop { let message = self.read_jsonrpc_message().await?; match message { JSONRPCMessage::Notification(notification) => { eprintln!("notification: {notification:?}"); self.enqueue_user_message(notification); } JSONRPCMessage::Request(jsonrpc_request) => { return jsonrpc_request.try_into().with_context( || "failed to deserialize ServerRequest from JSONRPCRequest", ); } JSONRPCMessage::Error(_) => { anyhow::bail!("unexpected JSONRPCMessage::Error: {message:?}"); } JSONRPCMessage::Response(_) => { anyhow::bail!("unexpected JSONRPCMessage::Response: {message:?}"); } } } } pub async fn read_stream_until_response_message( &mut self, request_id: RequestId, ) -> anyhow::Result<JSONRPCResponse> { eprintln!("in read_stream_until_response_message({request_id:?})"); loop { let message = self.read_jsonrpc_message().await?; match message { JSONRPCMessage::Notification(notification) => { eprintln!("notification: {notification:?}"); self.enqueue_user_message(notification); } JSONRPCMessage::Request(_) => { anyhow::bail!("unexpected JSONRPCMessage::Request: {message:?}"); } JSONRPCMessage::Error(_) => { anyhow::bail!("unexpected JSONRPCMessage::Error: {message:?}"); } JSONRPCMessage::Response(jsonrpc_response) => { if jsonrpc_response.id == request_id { return Ok(jsonrpc_response); } } } } } pub async fn read_stream_until_error_message( &mut self, request_id: RequestId, ) -> anyhow::Result<JSONRPCError> { loop { let message = self.read_jsonrpc_message().await?; match message { JSONRPCMessage::Notification(notification) => { eprintln!("notification: {notification:?}"); self.enqueue_user_message(notification); } JSONRPCMessage::Request(_) => { anyhow::bail!("unexpected JSONRPCMessage::Request: {message:?}"); } JSONRPCMessage::Response(_) => { // Keep scanning; we're waiting for an error with matching id. } JSONRPCMessage::Error(err) => { if err.id == request_id { return Ok(err); } } } } } pub async fn read_stream_until_notification_message( &mut self, method: &str, ) -> anyhow::Result<JSONRPCNotification> { eprintln!("in read_stream_until_notification_message({method})"); if let Some(notification) = self.take_pending_notification_by_method(method) { return Ok(notification); } loop { let message = self.read_jsonrpc_message().await?; match message { JSONRPCMessage::Notification(notification) => { if notification.method == method { return Ok(notification); } self.enqueue_user_message(notification); } JSONRPCMessage::Request(_) => { anyhow::bail!("unexpected JSONRPCMessage::Request: {message:?}"); } JSONRPCMessage::Error(_) => { anyhow::bail!("unexpected JSONRPCMessage::Error: {message:?}"); } JSONRPCMessage::Response(_) => { anyhow::bail!("unexpected JSONRPCMessage::Response: {message:?}"); } } } } fn take_pending_notification_by_method(&mut self, method: &str) -> Option<JSONRPCNotification> { if let Some(pos) = self .pending_user_messages .iter() .position(|notification| notification.method == method) { return self.pending_user_messages.remove(pos); } None } fn enqueue_user_message(&mut self, notification: JSONRPCNotification) { if notification.method == "codex/event/user_message" { self.pending_user_messages.push_back(notification); } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/common/mock_model_server.rs
codex-rs/app-server/tests/common/mock_model_server.rs
use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; use wiremock::Mock; use wiremock::MockServer; use wiremock::Respond; use wiremock::ResponseTemplate; use wiremock::matchers::method; use wiremock::matchers::path; /// Create a mock server that will provide the responses, in order, for /// requests to the `/v1/chat/completions` endpoint. pub async fn create_mock_chat_completions_server(responses: Vec<String>) -> MockServer { let server = MockServer::start().await; let num_calls = responses.len(); let seq_responder = SeqResponder { num_calls: AtomicUsize::new(0), responses, }; Mock::given(method("POST")) .and(path("/v1/chat/completions")) .respond_with(seq_responder) .expect(num_calls as u64) .mount(&server) .await; server } /// Same as `create_mock_chat_completions_server` but does not enforce an /// expectation on the number of calls. pub async fn create_mock_chat_completions_server_unchecked(responses: Vec<String>) -> MockServer { let server = MockServer::start().await; let seq_responder = SeqResponder { num_calls: AtomicUsize::new(0), responses, }; Mock::given(method("POST")) .and(path("/v1/chat/completions")) .respond_with(seq_responder) .mount(&server) .await; server } struct SeqResponder { num_calls: AtomicUsize, responses: Vec<String>, } impl Respond for SeqResponder { fn respond(&self, _: &wiremock::Request) -> ResponseTemplate { let call_num = self.num_calls.fetch_add(1, Ordering::SeqCst); match self.responses.get(call_num) { Some(response) => ResponseTemplate::new(200) .insert_header("content-type", "text/event-stream") .set_body_raw(response.clone(), "text/event-stream"), None => panic!("no response for {call_num}"), } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server/tests/common/models_cache.rs
codex-rs/app-server/tests/common/models_cache.rs
use chrono::DateTime; use chrono::Utc; use codex_core::models_manager::model_presets::all_model_presets; use codex_protocol::openai_models::ConfigShellToolType; use codex_protocol::openai_models::ModelInfo; use codex_protocol::openai_models::ModelPreset; use codex_protocol::openai_models::ModelVisibility; use codex_protocol::openai_models::TruncationPolicyConfig; use serde_json::json; use std::path::Path; /// Convert a ModelPreset to ModelInfo for cache storage. fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo { ModelInfo { slug: preset.id.clone(), display_name: preset.display_name.clone(), description: Some(preset.description.clone()), default_reasoning_level: preset.default_reasoning_effort, supported_reasoning_levels: preset.supported_reasoning_efforts.clone(), shell_type: ConfigShellToolType::ShellCommand, visibility: if preset.show_in_picker { ModelVisibility::List } else { ModelVisibility::Hide }, supported_in_api: true, priority, upgrade: preset.upgrade.as_ref().map(|u| u.id.clone()), base_instructions: None, supports_reasoning_summaries: false, support_verbosity: false, default_verbosity: None, apply_patch_tool_type: None, truncation_policy: TruncationPolicyConfig::bytes(10_000), supports_parallel_tool_calls: false, context_window: None, experimental_supported_tools: Vec::new(), } } // todo(aibrahim): fix the priorities to be the opposite here. /// Write a models_cache.json file to the codex home directory. /// This prevents ModelsManager from making network requests to refresh models. /// The cache will be treated as fresh (within TTL) and used instead of fetching from the network. /// Uses the built-in model presets from ModelsManager, converted to ModelInfo format. pub fn write_models_cache(codex_home: &Path) -> std::io::Result<()> { // Get all presets and filter for show_in_picker (same as builtin_model_presets does) let presets: Vec<&ModelPreset> = all_model_presets() .iter() .filter(|preset| preset.show_in_picker) .collect(); // Convert presets to ModelInfo, assigning priorities (higher = earlier in list) // Priority is used for sorting, so first model gets highest priority let models: Vec<ModelInfo> = presets .iter() .enumerate() .map(|(idx, preset)| { // Higher priority = earlier in list, so reverse the index let priority = (presets.len() - idx) as i32; preset_to_info(preset, priority) }) .collect(); write_models_cache_with_models(codex_home, models) } /// Write a models_cache.json file with specific models. /// Useful when tests need specific models to be available. pub fn write_models_cache_with_models( codex_home: &Path, models: Vec<ModelInfo>, ) -> std::io::Result<()> { let cache_path = codex_home.join("models_cache.json"); // DateTime<Utc> serializes to RFC3339 format by default with serde let fetched_at: DateTime<Utc> = Utc::now(); let cache = json!({ "fetched_at": fetched_at, "etag": null, "models": models }); std::fs::write(cache_path, serde_json::to_string_pretty(&cache)?) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/arg0/src/lib.rs
codex-rs/arg0/src/lib.rs
use std::future::Future; use std::path::Path; use std::path::PathBuf; use codex_core::CODEX_APPLY_PATCH_ARG1; #[cfg(unix)] use std::os::unix::fs::symlink; use tempfile::TempDir; const LINUX_SANDBOX_ARG0: &str = "codex-linux-sandbox"; const APPLY_PATCH_ARG0: &str = "apply_patch"; const MISSPELLED_APPLY_PATCH_ARG0: &str = "applypatch"; pub fn arg0_dispatch() -> Option<TempDir> { // Determine if we were invoked via the special alias. let mut args = std::env::args_os(); let argv0 = args.next().unwrap_or_default(); let exe_name = Path::new(&argv0) .file_name() .and_then(|s| s.to_str()) .unwrap_or(""); if exe_name == LINUX_SANDBOX_ARG0 { // Safety: [`run_main`] never returns. codex_linux_sandbox::run_main(); } else if exe_name == APPLY_PATCH_ARG0 || exe_name == MISSPELLED_APPLY_PATCH_ARG0 { codex_apply_patch::main(); } let argv1 = args.next().unwrap_or_default(); if argv1 == CODEX_APPLY_PATCH_ARG1 { let patch_arg = args.next().and_then(|s| s.to_str().map(str::to_owned)); let exit_code = match patch_arg { Some(patch_arg) => { let mut stdout = std::io::stdout(); let mut stderr = std::io::stderr(); match codex_apply_patch::apply_patch(&patch_arg, &mut stdout, &mut stderr) { Ok(()) => 0, Err(_) => 1, } } None => { eprintln!("Error: {CODEX_APPLY_PATCH_ARG1} requires a UTF-8 PATCH argument."); 1 } }; std::process::exit(exit_code); } // This modifies the environment, which is not thread-safe, so do this // before creating any threads/the Tokio runtime. load_dotenv(); match prepend_path_entry_for_codex_aliases() { Ok(path_entry) => Some(path_entry), Err(err) => { // It is possible that Codex will proceed successfully even if // updating the PATH fails, so warn the user and move on. eprintln!("WARNING: proceeding, even though we could not update PATH: {err}"); None } } } /// While we want to deploy the Codex CLI as a single executable for simplicity, /// we also want to expose some of its functionality as distinct CLIs, so we use /// the "arg0 trick" to determine which CLI to dispatch. This effectively allows /// us to simulate deploying multiple executables as a single binary on Mac and /// Linux (but not Windows). /// /// When the current executable is invoked through the hard-link or alias named /// `codex-linux-sandbox` we *directly* execute /// [`codex_linux_sandbox::run_main`] (which never returns). Otherwise we: /// /// 1. Load `.env` values from `~/.codex/.env` before creating any threads. /// 2. Construct a Tokio multi-thread runtime. /// 3. Derive the path to the current executable (so children can re-invoke the /// sandbox) when running on Linux. /// 4. Execute the provided async `main_fn` inside that runtime, forwarding any /// error. Note that `main_fn` receives `codex_linux_sandbox_exe: /// Option<PathBuf>`, as an argument, which is generally needed as part of /// constructing [`codex_core::config::Config`]. /// /// This function should be used to wrap any `main()` function in binary crates /// in this workspace that depends on these helper CLIs. pub fn arg0_dispatch_or_else<F, Fut>(main_fn: F) -> anyhow::Result<()> where F: FnOnce(Option<PathBuf>) -> Fut, Fut: Future<Output = anyhow::Result<()>>, { // Retain the TempDir so it exists for the lifetime of the invocation of // this executable. Admittedly, we could invoke `keep()` on it, but it // would be nice to avoid leaving temporary directories behind, if possible. let _path_entry = arg0_dispatch(); // Regular invocation – create a Tokio runtime and execute the provided // async entry-point. let runtime = tokio::runtime::Runtime::new()?; runtime.block_on(async move { let codex_linux_sandbox_exe: Option<PathBuf> = if cfg!(target_os = "linux") { std::env::current_exe().ok() } else { None }; main_fn(codex_linux_sandbox_exe).await }) } const ILLEGAL_ENV_VAR_PREFIX: &str = "CODEX_"; /// Load env vars from ~/.codex/.env. /// /// Security: Do not allow `.env` files to create or modify any variables /// with names starting with `CODEX_`. fn load_dotenv() { if let Ok(codex_home) = codex_core::config::find_codex_home() && let Ok(iter) = dotenvy::from_path_iter(codex_home.join(".env")) { set_filtered(iter); } } /// Helper to set vars from a dotenvy iterator while filtering out `CODEX_` keys. fn set_filtered<I>(iter: I) where I: IntoIterator<Item = Result<(String, String), dotenvy::Error>>, { for (key, value) in iter.into_iter().flatten() { if !key.to_ascii_uppercase().starts_with(ILLEGAL_ENV_VAR_PREFIX) { // It is safe to call set_var() because our process is // single-threaded at this point in its execution. unsafe { std::env::set_var(&key, &value) }; } } } /// Creates a temporary directory with either: /// /// - UNIX: `apply_patch` symlink to the current executable /// - WINDOWS: `apply_patch.bat` batch script to invoke the current executable /// with the "secret" --codex-run-as-apply-patch flag. /// /// This temporary directory is prepended to the PATH environment variable so /// that `apply_patch` can be on the PATH without requiring the user to /// install a separate `apply_patch` executable, simplifying the deployment of /// Codex CLI. /// /// IMPORTANT: This function modifies the PATH environment variable, so it MUST /// be called before multiple threads are spawned. pub fn prepend_path_entry_for_codex_aliases() -> std::io::Result<TempDir> { let temp_dir = TempDir::new()?; let path = temp_dir.path(); for filename in &[ APPLY_PATCH_ARG0, MISSPELLED_APPLY_PATCH_ARG0, #[cfg(target_os = "linux")] LINUX_SANDBOX_ARG0, ] { let exe = std::env::current_exe()?; #[cfg(unix)] { let link = path.join(filename); symlink(&exe, &link)?; } #[cfg(windows)] { let batch_script = path.join(format!("{filename}.bat")); std::fs::write( &batch_script, format!( r#"@echo off "{}" {CODEX_APPLY_PATCH_ARG1} %* "#, exe.display() ), )?; } } #[cfg(unix)] const PATH_SEPARATOR: &str = ":"; #[cfg(windows)] const PATH_SEPARATOR: &str = ";"; let path_element = path.display(); let updated_path_env_var = match std::env::var("PATH") { Ok(existing_path) => { format!("{path_element}{PATH_SEPARATOR}{existing_path}") } Err(_) => { format!("{path_element}") } }; unsafe { std::env::set_var("PATH", updated_path_env_var); } Ok(temp_dir) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/lib.rs
codex-rs/codex-api/src/lib.rs
pub mod auth; pub mod common; pub mod endpoint; pub mod error; pub mod provider; pub mod rate_limits; pub mod requests; pub mod sse; pub mod telemetry; pub use codex_client::RequestTelemetry; pub use codex_client::ReqwestTransport; pub use codex_client::TransportError; pub use crate::auth::AuthProvider; pub use crate::common::CompactionInput; pub use crate::common::Prompt; pub use crate::common::ResponseEvent; pub use crate::common::ResponseStream; pub use crate::common::ResponsesApiRequest; pub use crate::common::create_text_param_for_request; pub use crate::endpoint::chat::AggregateStreamExt; pub use crate::endpoint::chat::ChatClient; pub use crate::endpoint::compact::CompactClient; pub use crate::endpoint::models::ModelsClient; pub use crate::endpoint::responses::ResponsesClient; pub use crate::endpoint::responses::ResponsesOptions; pub use crate::error::ApiError; pub use crate::provider::Provider; pub use crate::provider::WireApi; pub use crate::requests::ChatRequest; pub use crate::requests::ChatRequestBuilder; pub use crate::requests::ResponsesRequest; pub use crate::requests::ResponsesRequestBuilder; pub use crate::sse::stream_from_fixture; pub use crate::telemetry::SseTelemetry;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/telemetry.rs
codex-rs/codex-api/src/telemetry.rs
use codex_client::Request; use codex_client::RequestTelemetry; use codex_client::Response; use codex_client::RetryPolicy; use codex_client::StreamResponse; use codex_client::TransportError; use codex_client::run_with_retry; use http::StatusCode; use std::future::Future; use std::sync::Arc; use std::time::Duration; use tokio::time::Instant; /// Generic telemetry. pub trait SseTelemetry: Send + Sync { fn on_sse_poll( &self, result: &Result< Option< Result< eventsource_stream::Event, eventsource_stream::EventStreamError<TransportError>, >, >, tokio::time::error::Elapsed, >, duration: Duration, ); } pub(crate) trait WithStatus { fn status(&self) -> StatusCode; } fn http_status(err: &TransportError) -> Option<StatusCode> { match err { TransportError::Http { status, .. } => Some(*status), _ => None, } } impl WithStatus for Response { fn status(&self) -> StatusCode { self.status } } impl WithStatus for StreamResponse { fn status(&self) -> StatusCode { self.status } } pub(crate) async fn run_with_request_telemetry<T, F, Fut>( policy: RetryPolicy, telemetry: Option<Arc<dyn RequestTelemetry>>, make_request: impl FnMut() -> Request, send: F, ) -> Result<T, TransportError> where T: WithStatus, F: Clone + Fn(Request) -> Fut, Fut: Future<Output = Result<T, TransportError>>, { // Wraps `run_with_retry` to attach per-attempt request telemetry for both // unary and streaming HTTP calls. run_with_retry(policy, make_request, move |req, attempt| { let telemetry = telemetry.clone(); let send = send.clone(); async move { let start = Instant::now(); let result = send(req).await; if let Some(t) = telemetry.as_ref() { let (status, err) = match &result { Ok(resp) => (Some(resp.status()), None), Err(err) => (http_status(err), Some(err)), }; t.on_request(attempt, status, err, start.elapsed()); } result } }) .await }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/error.rs
codex-rs/codex-api/src/error.rs
use crate::rate_limits::RateLimitError; use codex_client::TransportError; use http::StatusCode; use std::time::Duration; use thiserror::Error; #[derive(Debug, Error)] pub enum ApiError { #[error(transparent)] Transport(#[from] TransportError), #[error("api error {status}: {message}")] Api { status: StatusCode, message: String }, #[error("stream error: {0}")] Stream(String), #[error("context window exceeded")] ContextWindowExceeded, #[error("quota exceeded")] QuotaExceeded, #[error("usage not included")] UsageNotIncluded, #[error("retryable error: {message}")] Retryable { message: String, delay: Option<Duration>, }, #[error("rate limit: {0}")] RateLimit(String), } impl From<RateLimitError> for ApiError { fn from(err: RateLimitError) -> Self { Self::RateLimit(err.to_string()) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/auth.rs
codex-rs/codex-api/src/auth.rs
use codex_client::Request; /// Provides bearer and account identity information for API requests. /// /// Implementations should be cheap and non-blocking; any asynchronous /// refresh or I/O should be handled by higher layers before requests /// reach this interface. pub trait AuthProvider: Send + Sync { fn bearer_token(&self) -> Option<String>; fn account_id(&self) -> Option<String> { None } } pub(crate) fn add_auth_headers<A: AuthProvider>(auth: &A, mut req: Request) -> Request { if let Some(token) = auth.bearer_token() && let Ok(header) = format!("Bearer {token}").parse() { let _ = req.headers.insert(http::header::AUTHORIZATION, header); } if let Some(account_id) = auth.account_id() && let Ok(header) = account_id.parse() { let _ = req.headers.insert("ChatGPT-Account-ID", header); } req }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/common.rs
codex-rs/codex-api/src/common.rs
use crate::error::ApiError; use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig; use codex_protocol::config_types::Verbosity as VerbosityConfig; use codex_protocol::models::ResponseItem; use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig; use codex_protocol::protocol::RateLimitSnapshot; use codex_protocol::protocol::TokenUsage; use futures::Stream; use serde::Serialize; use serde_json::Value; use std::pin::Pin; use std::task::Context; use std::task::Poll; use tokio::sync::mpsc; /// Canonical prompt input for Chat and Responses endpoints. #[derive(Debug, Clone)] pub struct Prompt { /// Fully-resolved system instructions for this turn. pub instructions: String, /// Conversation history and user/tool messages. pub input: Vec<ResponseItem>, /// JSON-encoded tool definitions compatible with the target API. // TODO(jif) have a proper type here pub tools: Vec<Value>, /// Whether parallel tool calls are permitted. pub parallel_tool_calls: bool, /// Optional output schema used to build the `text.format` controls. pub output_schema: Option<Value>, } /// Canonical input payload for the compaction endpoint. #[derive(Debug, Clone, Serialize)] pub struct CompactionInput<'a> { pub model: &'a str, pub input: &'a [ResponseItem], pub instructions: &'a str, } #[derive(Debug)] pub enum ResponseEvent { Created, OutputItemDone(ResponseItem), OutputItemAdded(ResponseItem), Completed { response_id: String, token_usage: Option<TokenUsage>, }, OutputTextDelta(String), ReasoningSummaryDelta { delta: String, summary_index: i64, }, ReasoningContentDelta { delta: String, content_index: i64, }, ReasoningSummaryPartAdded { summary_index: i64, }, RateLimits(RateLimitSnapshot), ModelsEtag(String), } #[derive(Debug, Serialize, Clone)] pub struct Reasoning { #[serde(skip_serializing_if = "Option::is_none")] pub effort: Option<ReasoningEffortConfig>, #[serde(skip_serializing_if = "Option::is_none")] pub summary: Option<ReasoningSummaryConfig>, } #[derive(Debug, Serialize, Default, Clone)] #[serde(rename_all = "snake_case")] pub enum TextFormatType { #[default] JsonSchema, } #[derive(Debug, Serialize, Default, Clone)] pub struct TextFormat { /// Format type used by the OpenAI text controls. pub r#type: TextFormatType, /// When true, the server is expected to strictly validate responses. pub strict: bool, /// JSON schema for the desired output. pub schema: Value, /// Friendly name for the format, used in telemetry/debugging. pub name: String, } /// Controls the `text` field for the Responses API, combining verbosity and /// optional JSON schema output formatting. #[derive(Debug, Serialize, Default, Clone)] pub struct TextControls { #[serde(skip_serializing_if = "Option::is_none")] pub verbosity: Option<OpenAiVerbosity>, #[serde(skip_serializing_if = "Option::is_none")] pub format: Option<TextFormat>, } #[derive(Debug, Serialize, Default, Clone)] #[serde(rename_all = "lowercase")] pub enum OpenAiVerbosity { Low, #[default] Medium, High, } impl From<VerbosityConfig> for OpenAiVerbosity { fn from(v: VerbosityConfig) -> Self { match v { VerbosityConfig::Low => OpenAiVerbosity::Low, VerbosityConfig::Medium => OpenAiVerbosity::Medium, VerbosityConfig::High => OpenAiVerbosity::High, } } } #[derive(Debug, Serialize)] pub struct ResponsesApiRequest<'a> { pub model: &'a str, pub instructions: &'a str, pub input: &'a [ResponseItem], pub tools: &'a [serde_json::Value], pub tool_choice: &'static str, pub parallel_tool_calls: bool, pub reasoning: Option<Reasoning>, pub store: bool, pub stream: bool, pub include: Vec<String>, #[serde(skip_serializing_if = "Option::is_none")] pub prompt_cache_key: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub text: Option<TextControls>, } pub fn create_text_param_for_request( verbosity: Option<VerbosityConfig>, output_schema: &Option<Value>, ) -> Option<TextControls> { if verbosity.is_none() && output_schema.is_none() { return None; } Some(TextControls { verbosity: verbosity.map(std::convert::Into::into), format: output_schema.as_ref().map(|schema| TextFormat { r#type: TextFormatType::JsonSchema, strict: true, schema: schema.clone(), name: "codex_output_schema".to_string(), }), }) } pub struct ResponseStream { pub rx_event: mpsc::Receiver<Result<ResponseEvent, ApiError>>, } impl Stream for ResponseStream { type Item = Result<ResponseEvent, ApiError>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { self.rx_event.poll_recv(cx) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/provider.rs
codex-rs/codex-api/src/provider.rs
use codex_client::Request; use codex_client::RetryOn; use codex_client::RetryPolicy; use http::Method; use http::header::HeaderMap; use std::collections::HashMap; use std::time::Duration; /// Wire-level APIs supported by a `Provider`. #[derive(Debug, Clone, PartialEq, Eq)] pub enum WireApi { Responses, Chat, Compact, } /// High-level retry configuration for a provider. /// /// This is converted into a `RetryPolicy` used by `codex-client` to drive /// transport-level retries for both unary and streaming calls. #[derive(Debug, Clone)] pub struct RetryConfig { pub max_attempts: u64, pub base_delay: Duration, pub retry_429: bool, pub retry_5xx: bool, pub retry_transport: bool, } impl RetryConfig { pub fn to_policy(&self) -> RetryPolicy { RetryPolicy { max_attempts: self.max_attempts, base_delay: self.base_delay, retry_on: RetryOn { retry_429: self.retry_429, retry_5xx: self.retry_5xx, retry_transport: self.retry_transport, }, } } } /// HTTP endpoint configuration used to talk to a concrete API deployment. /// /// Encapsulates base URL, default headers, query params, retry policy, and /// stream idle timeout, plus helper methods for building requests. #[derive(Debug, Clone)] pub struct Provider { pub name: String, pub base_url: String, pub query_params: Option<HashMap<String, String>>, pub wire: WireApi, pub headers: HeaderMap, pub retry: RetryConfig, pub stream_idle_timeout: Duration, } impl Provider { pub fn url_for_path(&self, path: &str) -> String { let base = self.base_url.trim_end_matches('/'); let path = path.trim_start_matches('/'); let mut url = if path.is_empty() { base.to_string() } else { format!("{base}/{path}") }; if let Some(params) = &self.query_params && !params.is_empty() { let qs = params .iter() .map(|(k, v)| format!("{k}={v}")) .collect::<Vec<_>>() .join("&"); url.push('?'); url.push_str(&qs); } url } pub fn build_request(&self, method: Method, path: &str) -> Request { Request { method, url: self.url_for_path(path), headers: self.headers.clone(), body: None, timeout: None, } } pub fn is_azure_responses_endpoint(&self) -> bool { if self.wire != WireApi::Responses { return false; } if self.name.eq_ignore_ascii_case("azure") { return true; } self.base_url.to_ascii_lowercase().contains("openai.azure.") || matches_azure_responses_base_url(&self.base_url) } } fn matches_azure_responses_base_url(base_url: &str) -> bool { const AZURE_MARKERS: [&str; 5] = [ "cognitiveservices.azure.", "aoai.azure.", "azure-api.", "azurefd.", "windows.net/openai", ]; let base = base_url.to_ascii_lowercase(); AZURE_MARKERS.iter().any(|marker| base.contains(marker)) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/rate_limits.rs
codex-rs/codex-api/src/rate_limits.rs
use codex_protocol::protocol::CreditsSnapshot; use codex_protocol::protocol::RateLimitSnapshot; use codex_protocol::protocol::RateLimitWindow; use http::HeaderMap; use std::fmt::Display; #[derive(Debug)] pub struct RateLimitError { pub message: String, } impl Display for RateLimitError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.message) } } /// Parses the bespoke Codex rate-limit headers into a `RateLimitSnapshot`. pub fn parse_rate_limit(headers: &HeaderMap) -> Option<RateLimitSnapshot> { let primary = parse_rate_limit_window( headers, "x-codex-primary-used-percent", "x-codex-primary-window-minutes", "x-codex-primary-reset-at", ); let secondary = parse_rate_limit_window( headers, "x-codex-secondary-used-percent", "x-codex-secondary-window-minutes", "x-codex-secondary-reset-at", ); let credits = parse_credits_snapshot(headers); Some(RateLimitSnapshot { primary, secondary, credits, plan_type: None, }) } fn parse_rate_limit_window( headers: &HeaderMap, used_percent_header: &str, window_minutes_header: &str, resets_at_header: &str, ) -> Option<RateLimitWindow> { let used_percent: Option<f64> = parse_header_f64(headers, used_percent_header); used_percent.and_then(|used_percent| { let window_minutes = parse_header_i64(headers, window_minutes_header); let resets_at = parse_header_i64(headers, resets_at_header); let has_data = used_percent != 0.0 || window_minutes.is_some_and(|minutes| minutes != 0) || resets_at.is_some(); has_data.then_some(RateLimitWindow { used_percent, window_minutes, resets_at, }) }) } fn parse_credits_snapshot(headers: &HeaderMap) -> Option<CreditsSnapshot> { let has_credits = parse_header_bool(headers, "x-codex-credits-has-credits")?; let unlimited = parse_header_bool(headers, "x-codex-credits-unlimited")?; let balance = parse_header_str(headers, "x-codex-credits-balance") .map(str::trim) .filter(|value| !value.is_empty()) .map(std::string::ToString::to_string); Some(CreditsSnapshot { has_credits, unlimited, balance, }) } fn parse_header_f64(headers: &HeaderMap, name: &str) -> Option<f64> { parse_header_str(headers, name)? .parse::<f64>() .ok() .filter(|v| v.is_finite()) } fn parse_header_i64(headers: &HeaderMap, name: &str) -> Option<i64> { parse_header_str(headers, name)?.parse::<i64>().ok() } fn parse_header_bool(headers: &HeaderMap, name: &str) -> Option<bool> { let raw = parse_header_str(headers, name)?; if raw.eq_ignore_ascii_case("true") || raw == "1" { Some(true) } else if raw.eq_ignore_ascii_case("false") || raw == "0" { Some(false) } else { None } } fn parse_header_str<'a>(headers: &'a HeaderMap, name: &str) -> Option<&'a str> { headers.get(name)?.to_str().ok() }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/requests/responses.rs
codex-rs/codex-api/src/requests/responses.rs
use crate::common::Reasoning; use crate::common::ResponsesApiRequest; use crate::common::TextControls; use crate::error::ApiError; use crate::provider::Provider; use crate::requests::headers::build_conversation_headers; use crate::requests::headers::insert_header; use crate::requests::headers::subagent_header; use codex_protocol::models::ResponseItem; use codex_protocol::protocol::SessionSource; use http::HeaderMap; use serde_json::Value; /// Assembled request body plus headers for a Responses stream request. pub struct ResponsesRequest { pub body: Value, pub headers: HeaderMap, } #[derive(Default)] pub struct ResponsesRequestBuilder<'a> { model: Option<&'a str>, instructions: Option<&'a str>, input: Option<&'a [ResponseItem]>, tools: Option<&'a [Value]>, parallel_tool_calls: bool, reasoning: Option<Reasoning>, include: Vec<String>, prompt_cache_key: Option<String>, text: Option<TextControls>, conversation_id: Option<String>, session_source: Option<SessionSource>, store_override: Option<bool>, headers: HeaderMap, } impl<'a> ResponsesRequestBuilder<'a> { pub fn new(model: &'a str, instructions: &'a str, input: &'a [ResponseItem]) -> Self { Self { model: Some(model), instructions: Some(instructions), input: Some(input), ..Default::default() } } pub fn tools(mut self, tools: &'a [Value]) -> Self { self.tools = Some(tools); self } pub fn parallel_tool_calls(mut self, enabled: bool) -> Self { self.parallel_tool_calls = enabled; self } pub fn reasoning(mut self, reasoning: Option<Reasoning>) -> Self { self.reasoning = reasoning; self } pub fn include(mut self, include: Vec<String>) -> Self { self.include = include; self } pub fn prompt_cache_key(mut self, key: Option<String>) -> Self { self.prompt_cache_key = key; self } pub fn text(mut self, text: Option<TextControls>) -> Self { self.text = text; self } pub fn conversation(mut self, conversation_id: Option<String>) -> Self { self.conversation_id = conversation_id; self } pub fn session_source(mut self, source: Option<SessionSource>) -> Self { self.session_source = source; self } pub fn store_override(mut self, store: Option<bool>) -> Self { self.store_override = store; self } pub fn extra_headers(mut self, headers: HeaderMap) -> Self { self.headers = headers; self } pub fn build(self, provider: &Provider) -> Result<ResponsesRequest, ApiError> { let model = self .model .ok_or_else(|| ApiError::Stream("missing model for responses request".into()))?; let instructions = self .instructions .ok_or_else(|| ApiError::Stream("missing instructions for responses request".into()))?; let input = self .input .ok_or_else(|| ApiError::Stream("missing input for responses request".into()))?; let tools = self.tools.unwrap_or_default(); let store = self .store_override .unwrap_or_else(|| provider.is_azure_responses_endpoint()); let req = ResponsesApiRequest { model, instructions, input, tools, tool_choice: "auto", parallel_tool_calls: self.parallel_tool_calls, reasoning: self.reasoning, store, stream: true, include: self.include, prompt_cache_key: self.prompt_cache_key, text: self.text, }; let mut body = serde_json::to_value(&req) .map_err(|e| ApiError::Stream(format!("failed to encode responses request: {e}")))?; if store && provider.is_azure_responses_endpoint() { attach_item_ids(&mut body, input); } let mut headers = self.headers; headers.extend(build_conversation_headers(self.conversation_id)); if let Some(subagent) = subagent_header(&self.session_source) { insert_header(&mut headers, "x-openai-subagent", &subagent); } Ok(ResponsesRequest { body, headers }) } } fn attach_item_ids(payload_json: &mut Value, original_items: &[ResponseItem]) { let Some(input_value) = payload_json.get_mut("input") else { return; }; let Value::Array(items) = input_value else { return; }; for (value, item) in items.iter_mut().zip(original_items.iter()) { if let ResponseItem::Reasoning { id, .. } | ResponseItem::Message { id: Some(id), .. } | ResponseItem::WebSearchCall { id: Some(id), .. } | ResponseItem::FunctionCall { id: Some(id), .. } | ResponseItem::LocalShellCall { id: Some(id), .. } | ResponseItem::CustomToolCall { id: Some(id), .. } = item { if id.is_empty() { continue; } if let Some(obj) = value.as_object_mut() { obj.insert("id".to_string(), Value::String(id.clone())); } } } } #[cfg(test)] mod tests { use super::*; use crate::provider::RetryConfig; use crate::provider::WireApi; use codex_protocol::protocol::SubAgentSource; use http::HeaderValue; use pretty_assertions::assert_eq; use std::time::Duration; fn provider(name: &str, base_url: &str) -> Provider { Provider { name: name.to_string(), base_url: base_url.to_string(), query_params: None, wire: WireApi::Responses, headers: HeaderMap::new(), retry: RetryConfig { max_attempts: 1, base_delay: Duration::from_millis(50), retry_429: false, retry_5xx: true, retry_transport: true, }, stream_idle_timeout: Duration::from_secs(5), } } #[test] fn azure_default_store_attaches_ids_and_headers() { let provider = provider("azure", "https://example.openai.azure.com/v1"); let input = vec![ ResponseItem::Message { id: Some("m1".into()), role: "assistant".into(), content: Vec::new(), }, ResponseItem::Message { id: None, role: "assistant".into(), content: Vec::new(), }, ]; let request = ResponsesRequestBuilder::new("gpt-test", "inst", &input) .conversation(Some("conv-1".into())) .session_source(Some(SessionSource::SubAgent(SubAgentSource::Review))) .build(&provider) .expect("request"); assert_eq!(request.body.get("store"), Some(&Value::Bool(true))); let ids: Vec<Option<String>> = request .body .get("input") .and_then(|v| v.as_array()) .into_iter() .flatten() .map(|item| item.get("id").and_then(|v| v.as_str().map(str::to_string))) .collect(); assert_eq!(ids, vec![Some("m1".to_string()), None]); assert_eq!( request.headers.get("conversation_id"), Some(&HeaderValue::from_static("conv-1")) ); assert_eq!( request.headers.get("session_id"), Some(&HeaderValue::from_static("conv-1")) ); assert_eq!( request.headers.get("x-openai-subagent"), Some(&HeaderValue::from_static("review")) ); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/requests/headers.rs
codex-rs/codex-api/src/requests/headers.rs
use codex_protocol::protocol::SessionSource; use http::HeaderMap; use http::HeaderValue; pub(crate) fn build_conversation_headers(conversation_id: Option<String>) -> HeaderMap { let mut headers = HeaderMap::new(); if let Some(id) = conversation_id { insert_header(&mut headers, "conversation_id", &id); insert_header(&mut headers, "session_id", &id); } headers } pub(crate) fn subagent_header(source: &Option<SessionSource>) -> Option<String> { let SessionSource::SubAgent(sub) = source.as_ref()? else { return None; }; match sub { codex_protocol::protocol::SubAgentSource::Other(label) => Some(label.clone()), other => Some( serde_json::to_value(other) .ok() .and_then(|v| v.as_str().map(std::string::ToString::to_string)) .unwrap_or_else(|| "other".to_string()), ), } } pub(crate) fn insert_header(headers: &mut HeaderMap, name: &str, value: &str) { if let (Ok(header_name), Ok(header_value)) = ( name.parse::<http::HeaderName>(), HeaderValue::from_str(value), ) { headers.insert(header_name, header_value); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/requests/mod.rs
codex-rs/codex-api/src/requests/mod.rs
pub mod chat; pub(crate) mod headers; pub mod responses; pub use chat::ChatRequest; pub use chat::ChatRequestBuilder; pub use responses::ResponsesRequest; pub use responses::ResponsesRequestBuilder;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/requests/chat.rs
codex-rs/codex-api/src/requests/chat.rs
use crate::error::ApiError; use crate::provider::Provider; use crate::requests::headers::build_conversation_headers; use crate::requests::headers::insert_header; use crate::requests::headers::subagent_header; use codex_protocol::models::ContentItem; use codex_protocol::models::FunctionCallOutputContentItem; use codex_protocol::models::ReasoningItemContent; use codex_protocol::models::ResponseItem; use codex_protocol::protocol::SessionSource; use http::HeaderMap; use serde_json::Value; use serde_json::json; use std::collections::HashMap; /// Assembled request body plus headers for Chat Completions streaming calls. pub struct ChatRequest { pub body: Value, pub headers: HeaderMap, } pub struct ChatRequestBuilder<'a> { model: &'a str, instructions: &'a str, input: &'a [ResponseItem], tools: &'a [Value], conversation_id: Option<String>, session_source: Option<SessionSource>, } impl<'a> ChatRequestBuilder<'a> { pub fn new( model: &'a str, instructions: &'a str, input: &'a [ResponseItem], tools: &'a [Value], ) -> Self { Self { model, instructions, input, tools, conversation_id: None, session_source: None, } } pub fn conversation_id(mut self, id: Option<String>) -> Self { self.conversation_id = id; self } pub fn session_source(mut self, source: Option<SessionSource>) -> Self { self.session_source = source; self } pub fn build(self, _provider: &Provider) -> Result<ChatRequest, ApiError> { let mut messages = Vec::<Value>::new(); messages.push(json!({"role": "system", "content": self.instructions})); let input = self.input; let mut reasoning_by_anchor_index: HashMap<usize, String> = HashMap::new(); let mut last_emitted_role: Option<&str> = None; for item in input { match item { ResponseItem::Message { role, .. } => last_emitted_role = Some(role.as_str()), ResponseItem::FunctionCall { .. } | ResponseItem::LocalShellCall { .. } => { last_emitted_role = Some("assistant") } ResponseItem::FunctionCallOutput { .. } => last_emitted_role = Some("tool"), ResponseItem::Reasoning { .. } | ResponseItem::Other => {} ResponseItem::CustomToolCall { .. } => {} ResponseItem::CustomToolCallOutput { .. } => {} ResponseItem::WebSearchCall { .. } => {} ResponseItem::GhostSnapshot { .. } => {} ResponseItem::Compaction { .. } => {} } } let mut last_user_index: Option<usize> = None; for (idx, item) in input.iter().enumerate() { if let ResponseItem::Message { role, .. } = item && role == "user" { last_user_index = Some(idx); } } if !matches!(last_emitted_role, Some("user")) { for (idx, item) in input.iter().enumerate() { if let Some(u_idx) = last_user_index && idx <= u_idx { continue; } if let ResponseItem::Reasoning { content: Some(items), .. } = item { let mut text = String::new(); for entry in items { match entry { ReasoningItemContent::ReasoningText { text: segment } | ReasoningItemContent::Text { text: segment } => { text.push_str(segment) } } } if text.trim().is_empty() { continue; } let mut attached = false; if idx > 0 && let ResponseItem::Message { role, .. } = &input[idx - 1] && role == "assistant" { reasoning_by_anchor_index .entry(idx - 1) .and_modify(|v| v.push_str(&text)) .or_insert(text.clone()); attached = true; } if !attached && idx + 1 < input.len() { match &input[idx + 1] { ResponseItem::FunctionCall { .. } | ResponseItem::LocalShellCall { .. } => { reasoning_by_anchor_index .entry(idx + 1) .and_modify(|v| v.push_str(&text)) .or_insert(text.clone()); } ResponseItem::Message { role, .. } if role == "assistant" => { reasoning_by_anchor_index .entry(idx + 1) .and_modify(|v| v.push_str(&text)) .or_insert(text.clone()); } _ => {} } } } } } let mut last_assistant_text: Option<String> = None; for (idx, item) in input.iter().enumerate() { match item { ResponseItem::Message { role, content, .. } => { let mut text = String::new(); let mut items: Vec<Value> = Vec::new(); let mut saw_image = false; for c in content { match c { ContentItem::InputText { text: t } | ContentItem::OutputText { text: t } => { text.push_str(t); items.push(json!({"type":"text","text": t})); } ContentItem::InputImage { image_url } => { saw_image = true; items.push( json!({"type":"image_url","image_url": {"url": image_url}}), ); } } } if role == "assistant" { if let Some(prev) = &last_assistant_text && prev == &text { continue; } last_assistant_text = Some(text.clone()); } let content_value = if role == "assistant" { json!(text) } else if saw_image { json!(items) } else { json!(text) }; let mut msg = json!({"role": role, "content": content_value}); if role == "assistant" && let Some(reasoning) = reasoning_by_anchor_index.get(&idx) && let Some(obj) = msg.as_object_mut() { obj.insert("reasoning".to_string(), json!(reasoning)); } messages.push(msg); } ResponseItem::FunctionCall { name, arguments, call_id, .. } => { let mut msg = json!({ "role": "assistant", "content": null, "tool_calls": [{ "id": call_id, "type": "function", "function": { "name": name, "arguments": arguments, } }] }); if let Some(reasoning) = reasoning_by_anchor_index.get(&idx) && let Some(obj) = msg.as_object_mut() { obj.insert("reasoning".to_string(), json!(reasoning)); } messages.push(msg); } ResponseItem::LocalShellCall { id, call_id: _, status, action, } => { let mut msg = json!({ "role": "assistant", "content": null, "tool_calls": [{ "id": id.clone().unwrap_or_default(), "type": "local_shell_call", "status": status, "action": action, }] }); if let Some(reasoning) = reasoning_by_anchor_index.get(&idx) && let Some(obj) = msg.as_object_mut() { obj.insert("reasoning".to_string(), json!(reasoning)); } messages.push(msg); } ResponseItem::FunctionCallOutput { call_id, output } => { let content_value = if let Some(items) = &output.content_items { let mapped: Vec<Value> = items .iter() .map(|it| match it { FunctionCallOutputContentItem::InputText { text } => { json!({"type":"text","text": text}) } FunctionCallOutputContentItem::InputImage { image_url } => { json!({"type":"image_url","image_url": {"url": image_url}}) } }) .collect(); json!(mapped) } else { json!(output.content) }; messages.push(json!({ "role": "tool", "tool_call_id": call_id, "content": content_value, })); } ResponseItem::CustomToolCall { id, call_id: _, name, input, status: _, } => { messages.push(json!({ "role": "assistant", "content": null, "tool_calls": [{ "id": id, "type": "custom", "custom": { "name": name, "input": input, } }] })); } ResponseItem::CustomToolCallOutput { call_id, output } => { messages.push(json!({ "role": "tool", "tool_call_id": call_id, "content": output, })); } ResponseItem::GhostSnapshot { .. } => { continue; } ResponseItem::Reasoning { .. } | ResponseItem::WebSearchCall { .. } | ResponseItem::Other | ResponseItem::Compaction { .. } => { continue; } } } let payload = json!({ "model": self.model, "messages": messages, "stream": true, "tools": self.tools, }); let mut headers = build_conversation_headers(self.conversation_id); if let Some(subagent) = subagent_header(&self.session_source) { insert_header(&mut headers, "x-openai-subagent", &subagent); } Ok(ChatRequest { body: payload, headers, }) } } #[cfg(test)] mod tests { use super::*; use crate::provider::RetryConfig; use crate::provider::WireApi; use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::SubAgentSource; use http::HeaderValue; use pretty_assertions::assert_eq; use std::time::Duration; fn provider() -> Provider { Provider { name: "openai".to_string(), base_url: "https://api.openai.com/v1".to_string(), query_params: None, wire: WireApi::Chat, headers: HeaderMap::new(), retry: RetryConfig { max_attempts: 1, base_delay: Duration::from_millis(10), retry_429: false, retry_5xx: true, retry_transport: true, }, stream_idle_timeout: Duration::from_secs(1), } } #[test] fn attaches_conversation_and_subagent_headers() { let prompt_input = vec![ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: "hi".to_string(), }], }]; let req = ChatRequestBuilder::new("gpt-test", "inst", &prompt_input, &[]) .conversation_id(Some("conv-1".into())) .session_source(Some(SessionSource::SubAgent(SubAgentSource::Review))) .build(&provider()) .expect("request"); assert_eq!( req.headers.get("conversation_id"), Some(&HeaderValue::from_static("conv-1")) ); assert_eq!( req.headers.get("session_id"), Some(&HeaderValue::from_static("conv-1")) ); assert_eq!( req.headers.get("x-openai-subagent"), Some(&HeaderValue::from_static("review")) ); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/sse/responses.rs
codex-rs/codex-api/src/sse/responses.rs
use crate::common::ResponseEvent; use crate::common::ResponseStream; use crate::error::ApiError; use crate::rate_limits::parse_rate_limit; use crate::telemetry::SseTelemetry; use codex_client::ByteStream; use codex_client::StreamResponse; use codex_client::TransportError; use codex_protocol::models::ResponseItem; use codex_protocol::protocol::TokenUsage; use eventsource_stream::Eventsource; use futures::StreamExt; use futures::TryStreamExt; use serde::Deserialize; use serde_json::Value; use std::io::BufRead; use std::path::Path; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use tokio::time::Instant; use tokio::time::timeout; use tokio_util::io::ReaderStream; use tracing::debug; use tracing::trace; /// Streams SSE events from an on-disk fixture for tests. pub fn stream_from_fixture( path: impl AsRef<Path>, idle_timeout: Duration, ) -> Result<ResponseStream, ApiError> { let file = std::fs::File::open(path.as_ref()).map_err(|err| ApiError::Stream(err.to_string()))?; let mut content = String::new(); for line in std::io::BufReader::new(file).lines() { let line = line.map_err(|err| ApiError::Stream(err.to_string()))?; content.push_str(&line); content.push_str("\n\n"); } let reader = std::io::Cursor::new(content); let stream = ReaderStream::new(reader).map_err(|err| TransportError::Network(err.to_string())); let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent, ApiError>>(1600); tokio::spawn(process_sse(Box::pin(stream), tx_event, idle_timeout, None)); Ok(ResponseStream { rx_event }) } pub fn spawn_response_stream( stream_response: StreamResponse, idle_timeout: Duration, telemetry: Option<Arc<dyn SseTelemetry>>, ) -> ResponseStream { let rate_limits = parse_rate_limit(&stream_response.headers); let models_etag = stream_response .headers .get("X-Models-Etag") .and_then(|v| v.to_str().ok()) .map(ToString::to_string); let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent, ApiError>>(1600); tokio::spawn(async move { if let Some(snapshot) = rate_limits { let _ = tx_event.send(Ok(ResponseEvent::RateLimits(snapshot))).await; } if let Some(etag) = models_etag { let _ = tx_event.send(Ok(ResponseEvent::ModelsEtag(etag))).await; } process_sse(stream_response.bytes, tx_event, idle_timeout, telemetry).await; }); ResponseStream { rx_event } } #[derive(Debug, Deserialize)] #[allow(dead_code)] struct Error { r#type: Option<String>, code: Option<String>, message: Option<String>, plan_type: Option<String>, resets_at: Option<i64>, } #[derive(Debug, Deserialize)] #[allow(dead_code)] struct ResponseCompleted { id: String, #[serde(default)] usage: Option<ResponseCompletedUsage>, } #[derive(Debug, Deserialize)] struct ResponseCompletedUsage { input_tokens: i64, input_tokens_details: Option<ResponseCompletedInputTokensDetails>, output_tokens: i64, output_tokens_details: Option<ResponseCompletedOutputTokensDetails>, total_tokens: i64, } impl From<ResponseCompletedUsage> for TokenUsage { fn from(val: ResponseCompletedUsage) -> Self { TokenUsage { input_tokens: val.input_tokens, cached_input_tokens: val .input_tokens_details .map(|d| d.cached_tokens) .unwrap_or(0), output_tokens: val.output_tokens, reasoning_output_tokens: val .output_tokens_details .map(|d| d.reasoning_tokens) .unwrap_or(0), total_tokens: val.total_tokens, } } } #[derive(Debug, Deserialize)] struct ResponseCompletedInputTokensDetails { cached_tokens: i64, } #[derive(Debug, Deserialize)] struct ResponseCompletedOutputTokensDetails { reasoning_tokens: i64, } #[derive(Deserialize, Debug)] struct SseEvent { #[serde(rename = "type")] kind: String, response: Option<Value>, item: Option<Value>, delta: Option<String>, summary_index: Option<i64>, content_index: Option<i64>, } pub async fn process_sse( stream: ByteStream, tx_event: mpsc::Sender<Result<ResponseEvent, ApiError>>, idle_timeout: Duration, telemetry: Option<Arc<dyn SseTelemetry>>, ) { let mut stream = stream.eventsource(); let mut response_completed: Option<ResponseCompleted> = None; let mut response_error: Option<ApiError> = None; loop { let start = Instant::now(); let response = timeout(idle_timeout, stream.next()).await; if let Some(t) = telemetry.as_ref() { t.on_sse_poll(&response, start.elapsed()); } let sse = match response { Ok(Some(Ok(sse))) => sse, Ok(Some(Err(e))) => { debug!("SSE Error: {e:#}"); let _ = tx_event.send(Err(ApiError::Stream(e.to_string()))).await; return; } Ok(None) => { match response_completed.take() { Some(ResponseCompleted { id, usage }) => { let event = ResponseEvent::Completed { response_id: id, token_usage: usage.map(Into::into), }; let _ = tx_event.send(Ok(event)).await; } None => { let error = response_error.unwrap_or(ApiError::Stream( "stream closed before response.completed".into(), )); let _ = tx_event.send(Err(error)).await; } } return; } Err(_) => { let _ = tx_event .send(Err(ApiError::Stream("idle timeout waiting for SSE".into()))) .await; return; } }; let raw = sse.data.clone(); trace!("SSE event: {raw}"); let event: SseEvent = match serde_json::from_str(&sse.data) { Ok(event) => event, Err(e) => { debug!("Failed to parse SSE event: {e}, data: {}", &sse.data); continue; } }; match event.kind.as_str() { "response.output_item.done" => { let Some(item_val) = event.item else { continue }; let Ok(item) = serde_json::from_value::<ResponseItem>(item_val) else { debug!("failed to parse ResponseItem from output_item.done"); continue; }; let event = ResponseEvent::OutputItemDone(item); if tx_event.send(Ok(event)).await.is_err() { return; } } "response.output_text.delta" => { if let Some(delta) = event.delta { let event = ResponseEvent::OutputTextDelta(delta); if tx_event.send(Ok(event)).await.is_err() { return; } } } "response.reasoning_summary_text.delta" => { if let (Some(delta), Some(summary_index)) = (event.delta, event.summary_index) { let event = ResponseEvent::ReasoningSummaryDelta { delta, summary_index, }; if tx_event.send(Ok(event)).await.is_err() { return; } } } "response.reasoning_text.delta" => { if let (Some(delta), Some(content_index)) = (event.delta, event.content_index) { let event = ResponseEvent::ReasoningContentDelta { delta, content_index, }; if tx_event.send(Ok(event)).await.is_err() { return; } } } "response.created" => { if event.response.is_some() { let _ = tx_event.send(Ok(ResponseEvent::Created {})).await; } } "response.failed" => { if let Some(resp_val) = event.response { response_error = Some(ApiError::Stream("response.failed event received".into())); if let Some(error) = resp_val.get("error") && let Ok(error) = serde_json::from_value::<Error>(error.clone()) { if is_context_window_error(&error) { response_error = Some(ApiError::ContextWindowExceeded); } else if is_quota_exceeded_error(&error) { response_error = Some(ApiError::QuotaExceeded); } else if is_usage_not_included(&error) { response_error = Some(ApiError::UsageNotIncluded); } else { let delay = try_parse_retry_after(&error); let message = error.message.clone().unwrap_or_default(); response_error = Some(ApiError::Retryable { message, delay }); } } } } "response.completed" => { if let Some(resp_val) = event.response { match serde_json::from_value::<ResponseCompleted>(resp_val) { Ok(r) => { response_completed = Some(r); } Err(e) => { let error = format!("failed to parse ResponseCompleted: {e}"); debug!(error); response_error = Some(ApiError::Stream(error)); continue; } }; }; } "response.output_item.added" => { let Some(item_val) = event.item else { continue }; let Ok(item) = serde_json::from_value::<ResponseItem>(item_val) else { debug!("failed to parse ResponseItem from output_item.done"); continue; }; let event = ResponseEvent::OutputItemAdded(item); if tx_event.send(Ok(event)).await.is_err() { return; } } "response.reasoning_summary_part.added" => { if let Some(summary_index) = event.summary_index { let event = ResponseEvent::ReasoningSummaryPartAdded { summary_index }; if tx_event.send(Ok(event)).await.is_err() { return; } } } _ => {} } } } fn try_parse_retry_after(err: &Error) -> Option<Duration> { if err.code.as_deref() != Some("rate_limit_exceeded") { return None; } let re = rate_limit_regex(); if let Some(message) = &err.message && let Some(captures) = re.captures(message) { let seconds = captures.get(1); let unit = captures.get(2); if let (Some(value), Some(unit)) = (seconds, unit) { let value = value.as_str().parse::<f64>().ok()?; let unit = unit.as_str().to_ascii_lowercase(); if unit == "s" || unit.starts_with("second") { return Some(Duration::from_secs_f64(value)); } else if unit == "ms" { return Some(Duration::from_millis(value as u64)); } } } None } fn is_context_window_error(error: &Error) -> bool { error.code.as_deref() == Some("context_length_exceeded") } fn is_quota_exceeded_error(error: &Error) -> bool { error.code.as_deref() == Some("insufficient_quota") } fn is_usage_not_included(error: &Error) -> bool { error.code.as_deref() == Some("usage_not_included") } fn rate_limit_regex() -> &'static regex_lite::Regex { static RE: std::sync::OnceLock<regex_lite::Regex> = std::sync::OnceLock::new(); #[expect(clippy::unwrap_used)] RE.get_or_init(|| { regex_lite::Regex::new(r"(?i)try again in\s*(\d+(?:\.\d+)?)\s*(s|ms|seconds?)").unwrap() }) } #[cfg(test)] mod tests { use super::*; use assert_matches::assert_matches; use codex_protocol::models::ResponseItem; use pretty_assertions::assert_eq; use serde_json::json; use tokio::sync::mpsc; use tokio_test::io::Builder as IoBuilder; async fn collect_events(chunks: &[&[u8]]) -> Vec<Result<ResponseEvent, ApiError>> { let mut builder = IoBuilder::new(); for chunk in chunks { builder.read(chunk); } let reader = builder.build(); let stream = ReaderStream::new(reader).map_err(|err| TransportError::Network(err.to_string())); let (tx, mut rx) = mpsc::channel::<Result<ResponseEvent, ApiError>>(16); tokio::spawn(process_sse(Box::pin(stream), tx, idle_timeout(), None)); let mut events = Vec::new(); while let Some(ev) = rx.recv().await { events.push(ev); } events } async fn run_sse(events: Vec<serde_json::Value>) -> Vec<ResponseEvent> { let mut body = String::new(); for e in events { let kind = e .get("type") .and_then(|v| v.as_str()) .expect("fixture event missing type"); if e.as_object().map(|o| o.len() == 1).unwrap_or(false) { body.push_str(&format!("event: {kind}\n\n")); } else { body.push_str(&format!("event: {kind}\ndata: {e}\n\n")); } } let (tx, mut rx) = mpsc::channel::<Result<ResponseEvent, ApiError>>(8); let stream = ReaderStream::new(std::io::Cursor::new(body)) .map_err(|err| TransportError::Network(err.to_string())); tokio::spawn(process_sse(Box::pin(stream), tx, idle_timeout(), None)); let mut out = Vec::new(); while let Some(ev) = rx.recv().await { out.push(ev.expect("channel closed")); } out } fn idle_timeout() -> Duration { Duration::from_millis(1000) } #[tokio::test] async fn parses_items_and_completed() { let item1 = json!({ "type": "response.output_item.done", "item": { "type": "message", "role": "assistant", "content": [{"type": "output_text", "text": "Hello"}] } }) .to_string(); let item2 = json!({ "type": "response.output_item.done", "item": { "type": "message", "role": "assistant", "content": [{"type": "output_text", "text": "World"}] } }) .to_string(); let completed = json!({ "type": "response.completed", "response": { "id": "resp1" } }) .to_string(); let sse1 = format!("event: response.output_item.done\ndata: {item1}\n\n"); let sse2 = format!("event: response.output_item.done\ndata: {item2}\n\n"); let sse3 = format!("event: response.completed\ndata: {completed}\n\n"); let events = collect_events(&[sse1.as_bytes(), sse2.as_bytes(), sse3.as_bytes()]).await; assert_eq!(events.len(), 3); assert_matches!( &events[0], Ok(ResponseEvent::OutputItemDone(ResponseItem::Message { role, .. })) if role == "assistant" ); assert_matches!( &events[1], Ok(ResponseEvent::OutputItemDone(ResponseItem::Message { role, .. })) if role == "assistant" ); match &events[2] { Ok(ResponseEvent::Completed { response_id, token_usage, }) => { assert_eq!(response_id, "resp1"); assert!(token_usage.is_none()); } other => panic!("unexpected third event: {other:?}"), } } #[tokio::test] async fn error_when_missing_completed() { let item1 = json!({ "type": "response.output_item.done", "item": { "type": "message", "role": "assistant", "content": [{"type": "output_text", "text": "Hello"}] } }) .to_string(); let sse1 = format!("event: response.output_item.done\ndata: {item1}\n\n"); let events = collect_events(&[sse1.as_bytes()]).await; assert_eq!(events.len(), 2); assert_matches!(events[0], Ok(ResponseEvent::OutputItemDone(_))); match &events[1] { Err(ApiError::Stream(msg)) => { assert_eq!(msg, "stream closed before response.completed") } other => panic!("unexpected second event: {other:?}"), } } #[tokio::test] async fn error_when_error_event() { let raw_error = r#"{"type":"response.failed","sequence_number":3,"response":{"id":"resp_689bcf18d7f08194bf3440ba62fe05d803fee0cdac429894","object":"response","created_at":1755041560,"status":"failed","background":false,"error":{"code":"rate_limit_exceeded","message":"Rate limit reached for gpt-5.1 in organization org-AAA on tokens per min (TPM): Limit 30000, Used 22999, Requested 12528. Please try again in 11.054s. Visit https://platform.openai.com/account/rate-limits to learn more."}, "usage":null,"user":null,"metadata":{}}}"#; let sse1 = format!("event: response.failed\ndata: {raw_error}\n\n"); let events = collect_events(&[sse1.as_bytes()]).await; assert_eq!(events.len(), 1); match &events[0] { Err(ApiError::Retryable { message, delay }) => { assert_eq!( message, "Rate limit reached for gpt-5.1 in organization org-AAA on tokens per min (TPM): Limit 30000, Used 22999, Requested 12528. Please try again in 11.054s. Visit https://platform.openai.com/account/rate-limits to learn more." ); assert_eq!(*delay, Some(Duration::from_secs_f64(11.054))); } other => panic!("unexpected second event: {other:?}"), } } #[tokio::test] async fn context_window_error_is_fatal() { let raw_error = r#"{"type":"response.failed","sequence_number":3,"response":{"id":"resp_5c66275b97b9baef1ed95550adb3b7ec13b17aafd1d2f11b","object":"response","created_at":1759510079,"status":"failed","background":false,"error":{"code":"context_length_exceeded","message":"Your input exceeds the context window of this model. Please adjust your input and try again."},"usage":null,"user":null,"metadata":{}}}"#; let sse1 = format!("event: response.failed\ndata: {raw_error}\n\n"); let events = collect_events(&[sse1.as_bytes()]).await; assert_eq!(events.len(), 1); assert_matches!(events[0], Err(ApiError::ContextWindowExceeded)); } #[tokio::test] async fn context_window_error_with_newline_is_fatal() { let raw_error = r#"{"type":"response.failed","sequence_number":4,"response":{"id":"resp_fatal_newline","object":"response","created_at":1759510080,"status":"failed","background":false,"error":{"code":"context_length_exceeded","message":"Your input exceeds the context window of this model. Please adjust your input and try\nagain."},"usage":null,"user":null,"metadata":{}}}"#; let sse1 = format!("event: response.failed\ndata: {raw_error}\n\n"); let events = collect_events(&[sse1.as_bytes()]).await; assert_eq!(events.len(), 1); assert_matches!(events[0], Err(ApiError::ContextWindowExceeded)); } #[tokio::test] async fn quota_exceeded_error_is_fatal() { let raw_error = r#"{"type":"response.failed","sequence_number":3,"response":{"id":"resp_fatal_quota","object":"response","created_at":1759771626,"status":"failed","background":false,"error":{"code":"insufficient_quota","message":"You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors."},"incomplete_details":null}}"#; let sse1 = format!("event: response.failed\ndata: {raw_error}\n\n"); let events = collect_events(&[sse1.as_bytes()]).await; assert_eq!(events.len(), 1); assert_matches!(events[0], Err(ApiError::QuotaExceeded)); } #[tokio::test] async fn table_driven_event_kinds() { struct TestCase { name: &'static str, event: serde_json::Value, expect_first: fn(&ResponseEvent) -> bool, expected_len: usize, } fn is_created(ev: &ResponseEvent) -> bool { matches!(ev, ResponseEvent::Created) } fn is_output(ev: &ResponseEvent) -> bool { matches!(ev, ResponseEvent::OutputItemDone(_)) } fn is_completed(ev: &ResponseEvent) -> bool { matches!(ev, ResponseEvent::Completed { .. }) } let completed = json!({ "type": "response.completed", "response": { "id": "c", "usage": { "input_tokens": 0, "input_tokens_details": null, "output_tokens": 0, "output_tokens_details": null, "total_tokens": 0 }, "output": [] } }); let cases = vec![ TestCase { name: "created", event: json!({"type": "response.created", "response": {}}), expect_first: is_created, expected_len: 2, }, TestCase { name: "output_item.done", event: json!({ "type": "response.output_item.done", "item": { "type": "message", "role": "assistant", "content": [ {"type": "output_text", "text": "hi"} ] } }), expect_first: is_output, expected_len: 2, }, TestCase { name: "unknown", event: json!({"type": "response.new_tool_event"}), expect_first: is_completed, expected_len: 1, }, ]; for case in cases { let mut evs = vec![case.event]; evs.push(completed.clone()); let out = run_sse(evs).await; assert_eq!(out.len(), case.expected_len, "case {}", case.name); assert!( (case.expect_first)(&out[0]), "first event mismatch in case {}", case.name ); } } #[test] fn test_try_parse_retry_after() { let err = Error { r#type: None, message: Some("Rate limit reached for gpt-5.1 in organization org- on tokens per min (TPM): Limit 1, Used 1, Requested 19304. Please try again in 28ms. Visit https://platform.openai.com/account/rate-limits to learn more.".to_string()), code: Some("rate_limit_exceeded".to_string()), plan_type: None, resets_at: None, }; let delay = try_parse_retry_after(&err); assert_eq!(delay, Some(Duration::from_millis(28))); } #[test] fn test_try_parse_retry_after_no_delay() { let err = Error { r#type: None, message: Some("Rate limit reached for gpt-5.1 in organization <ORG> on tokens per min (TPM): Limit 30000, Used 6899, Requested 24050. Please try again in 1.898s. Visit https://platform.openai.com/account/rate-limits to learn more.".to_string()), code: Some("rate_limit_exceeded".to_string()), plan_type: None, resets_at: None, }; let delay = try_parse_retry_after(&err); assert_eq!(delay, Some(Duration::from_secs_f64(1.898))); } #[test] fn test_try_parse_retry_after_azure() { let err = Error { r#type: None, message: Some("Rate limit exceeded. Try again in 35 seconds.".to_string()), code: Some("rate_limit_exceeded".to_string()), plan_type: None, resets_at: None, }; let delay = try_parse_retry_after(&err); assert_eq!(delay, Some(Duration::from_secs(35))); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/sse/mod.rs
codex-rs/codex-api/src/sse/mod.rs
pub mod chat; pub mod responses; pub use responses::process_sse; pub use responses::spawn_response_stream; pub use responses::stream_from_fixture;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/sse/chat.rs
codex-rs/codex-api/src/sse/chat.rs
use crate::common::ResponseEvent; use crate::common::ResponseStream; use crate::error::ApiError; use crate::telemetry::SseTelemetry; use codex_client::StreamResponse; use codex_protocol::models::ContentItem; use codex_protocol::models::ReasoningItemContent; use codex_protocol::models::ResponseItem; use eventsource_stream::Eventsource; use futures::Stream; use futures::StreamExt; use std::collections::HashMap; use std::collections::HashSet; use std::time::Duration; use tokio::sync::mpsc; use tokio::time::Instant; use tokio::time::timeout; use tracing::debug; use tracing::trace; pub(crate) fn spawn_chat_stream( stream_response: StreamResponse, idle_timeout: Duration, telemetry: Option<std::sync::Arc<dyn SseTelemetry>>, ) -> ResponseStream { let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent, ApiError>>(1600); tokio::spawn(async move { process_chat_sse(stream_response.bytes, tx_event, idle_timeout, telemetry).await; }); ResponseStream { rx_event } } pub async fn process_chat_sse<S>( stream: S, tx_event: mpsc::Sender<Result<ResponseEvent, ApiError>>, idle_timeout: Duration, telemetry: Option<std::sync::Arc<dyn SseTelemetry>>, ) where S: Stream<Item = Result<bytes::Bytes, codex_client::TransportError>> + Unpin, { let mut stream = stream.eventsource(); #[derive(Default, Debug)] struct ToolCallState { id: Option<String>, name: Option<String>, arguments: String, } let mut tool_calls: HashMap<usize, ToolCallState> = HashMap::new(); let mut tool_call_order: Vec<usize> = Vec::new(); let mut tool_call_order_seen: HashSet<usize> = HashSet::new(); let mut tool_call_index_by_id: HashMap<String, usize> = HashMap::new(); let mut next_tool_call_index = 0usize; let mut last_tool_call_index: Option<usize> = None; let mut assistant_item: Option<ResponseItem> = None; let mut reasoning_item: Option<ResponseItem> = None; let mut completed_sent = false; loop { let start = Instant::now(); let response = timeout(idle_timeout, stream.next()).await; if let Some(t) = telemetry.as_ref() { t.on_sse_poll(&response, start.elapsed()); } let sse = match response { Ok(Some(Ok(sse))) => sse, Ok(Some(Err(e))) => { let _ = tx_event.send(Err(ApiError::Stream(e.to_string()))).await; return; } Ok(None) => { if let Some(reasoning) = reasoning_item { let _ = tx_event .send(Ok(ResponseEvent::OutputItemDone(reasoning))) .await; } if let Some(assistant) = assistant_item { let _ = tx_event .send(Ok(ResponseEvent::OutputItemDone(assistant))) .await; } if !completed_sent { let _ = tx_event .send(Ok(ResponseEvent::Completed { response_id: String::new(), token_usage: None, })) .await; } return; } Err(_) => { let _ = tx_event .send(Err(ApiError::Stream("idle timeout waiting for SSE".into()))) .await; return; } }; trace!("SSE event: {}", sse.data); if sse.data.trim().is_empty() { continue; } let value: serde_json::Value = match serde_json::from_str(&sse.data) { Ok(val) => val, Err(err) => { debug!( "Failed to parse ChatCompletions SSE event: {err}, data: {}", &sse.data ); continue; } }; let Some(choices) = value.get("choices").and_then(|c| c.as_array()) else { continue; }; for choice in choices { if let Some(delta) = choice.get("delta") { if let Some(reasoning) = delta.get("reasoning") { if let Some(text) = reasoning.as_str() { append_reasoning_text(&tx_event, &mut reasoning_item, text.to_string()) .await; } else if let Some(text) = reasoning.get("text").and_then(|v| v.as_str()) { append_reasoning_text(&tx_event, &mut reasoning_item, text.to_string()) .await; } else if let Some(text) = reasoning.get("content").and_then(|v| v.as_str()) { append_reasoning_text(&tx_event, &mut reasoning_item, text.to_string()) .await; } } if let Some(content) = delta.get("content") { if content.is_array() { for item in content.as_array().unwrap_or(&vec![]) { if let Some(text) = item.get("text").and_then(|t| t.as_str()) { append_assistant_text( &tx_event, &mut assistant_item, text.to_string(), ) .await; } } } else if let Some(text) = content.as_str() { append_assistant_text(&tx_event, &mut assistant_item, text.to_string()) .await; } } if let Some(tool_call_values) = delta.get("tool_calls").and_then(|c| c.as_array()) { for tool_call in tool_call_values { let mut index = tool_call .get("index") .and_then(serde_json::Value::as_u64) .map(|i| i as usize); let mut call_id_for_lookup = None; if let Some(call_id) = tool_call.get("id").and_then(|i| i.as_str()) { call_id_for_lookup = Some(call_id.to_string()); if let Some(existing) = tool_call_index_by_id.get(call_id) { index = Some(*existing); } } if index.is_none() && call_id_for_lookup.is_none() { index = last_tool_call_index; } let index = index.unwrap_or_else(|| { while tool_calls.contains_key(&next_tool_call_index) { next_tool_call_index += 1; } let idx = next_tool_call_index; next_tool_call_index += 1; idx }); let call_state = tool_calls.entry(index).or_default(); if tool_call_order_seen.insert(index) { tool_call_order.push(index); } if let Some(id) = tool_call.get("id").and_then(|i| i.as_str()) { call_state.id.get_or_insert_with(|| id.to_string()); tool_call_index_by_id.entry(id.to_string()).or_insert(index); } if let Some(func) = tool_call.get("function") { if let Some(fname) = func.get("name").and_then(|n| n.as_str()) && !fname.is_empty() { call_state.name.get_or_insert_with(|| fname.to_string()); } if let Some(arguments) = func.get("arguments").and_then(|a| a.as_str()) { call_state.arguments.push_str(arguments); } } last_tool_call_index = Some(index); } } } if let Some(message) = choice.get("message") && let Some(reasoning) = message.get("reasoning") { if let Some(text) = reasoning.as_str() { append_reasoning_text(&tx_event, &mut reasoning_item, text.to_string()).await; } else if let Some(text) = reasoning.get("text").and_then(|v| v.as_str()) { append_reasoning_text(&tx_event, &mut reasoning_item, text.to_string()).await; } else if let Some(text) = reasoning.get("content").and_then(|v| v.as_str()) { append_reasoning_text(&tx_event, &mut reasoning_item, text.to_string()).await; } } let finish_reason = choice.get("finish_reason").and_then(|r| r.as_str()); if finish_reason == Some("stop") { if let Some(reasoning) = reasoning_item.take() { let _ = tx_event .send(Ok(ResponseEvent::OutputItemDone(reasoning))) .await; } if let Some(assistant) = assistant_item.take() { let _ = tx_event .send(Ok(ResponseEvent::OutputItemDone(assistant))) .await; } if !completed_sent { let _ = tx_event .send(Ok(ResponseEvent::Completed { response_id: String::new(), token_usage: None, })) .await; completed_sent = true; } continue; } if finish_reason == Some("length") { let _ = tx_event.send(Err(ApiError::ContextWindowExceeded)).await; return; } if finish_reason == Some("tool_calls") { if let Some(reasoning) = reasoning_item.take() { let _ = tx_event .send(Ok(ResponseEvent::OutputItemDone(reasoning))) .await; } for index in tool_call_order.drain(..) { let Some(state) = tool_calls.remove(&index) else { continue; }; tool_call_order_seen.remove(&index); let ToolCallState { id, name, arguments, } = state; let Some(name) = name else { debug!("Skipping tool call at index {index} because name is missing"); continue; }; let item = ResponseItem::FunctionCall { id: None, name, arguments, call_id: id.unwrap_or_else(|| format!("tool-call-{index}")), }; let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await; } } } } } async fn append_assistant_text( tx_event: &mpsc::Sender<Result<ResponseEvent, ApiError>>, assistant_item: &mut Option<ResponseItem>, text: String, ) { if assistant_item.is_none() { let item = ResponseItem::Message { id: None, role: "assistant".to_string(), content: vec![], }; *assistant_item = Some(item.clone()); let _ = tx_event .send(Ok(ResponseEvent::OutputItemAdded(item))) .await; } if let Some(ResponseItem::Message { content, .. }) = assistant_item { content.push(ContentItem::OutputText { text: text.clone() }); let _ = tx_event .send(Ok(ResponseEvent::OutputTextDelta(text.clone()))) .await; } } async fn append_reasoning_text( tx_event: &mpsc::Sender<Result<ResponseEvent, ApiError>>, reasoning_item: &mut Option<ResponseItem>, text: String, ) { if reasoning_item.is_none() { let item = ResponseItem::Reasoning { id: String::new(), summary: Vec::new(), content: Some(vec![]), encrypted_content: None, }; *reasoning_item = Some(item.clone()); let _ = tx_event .send(Ok(ResponseEvent::OutputItemAdded(item))) .await; } if let Some(ResponseItem::Reasoning { content: Some(content), .. }) = reasoning_item { let content_index = content.len() as i64; content.push(ReasoningItemContent::ReasoningText { text: text.clone() }); let _ = tx_event .send(Ok(ResponseEvent::ReasoningContentDelta { delta: text.clone(), content_index, })) .await; } } #[cfg(test)] mod tests { use super::*; use assert_matches::assert_matches; use codex_protocol::models::ResponseItem; use futures::TryStreamExt; use serde_json::json; use tokio::sync::mpsc; use tokio_util::io::ReaderStream; fn build_body(events: &[serde_json::Value]) -> String { let mut body = String::new(); for e in events { body.push_str(&format!("event: message\ndata: {e}\n\n")); } body } async fn collect_events(body: &str) -> Vec<ResponseEvent> { let reader = ReaderStream::new(std::io::Cursor::new(body.to_string())) .map_err(|err| codex_client::TransportError::Network(err.to_string())); let (tx, mut rx) = mpsc::channel::<Result<ResponseEvent, ApiError>>(16); tokio::spawn(process_chat_sse( reader, tx, Duration::from_millis(1000), None, )); let mut out = Vec::new(); while let Some(ev) = rx.recv().await { out.push(ev.expect("stream error")); } out } #[tokio::test] async fn concatenates_tool_call_arguments_across_deltas() { let delta_name = json!({ "choices": [{ "delta": { "tool_calls": [{ "id": "call_a", "index": 0, "function": { "name": "do_a" } }] } }] }); let delta_args_1 = json!({ "choices": [{ "delta": { "tool_calls": [{ "index": 0, "function": { "arguments": "{ \"foo\":" } }] } }] }); let delta_args_2 = json!({ "choices": [{ "delta": { "tool_calls": [{ "index": 0, "function": { "arguments": "1}" } }] } }] }); let finish = json!({ "choices": [{ "finish_reason": "tool_calls" }] }); let body = build_body(&[delta_name, delta_args_1, delta_args_2, finish]); let events = collect_events(&body).await; assert_matches!( &events[..], [ ResponseEvent::OutputItemDone(ResponseItem::FunctionCall { call_id, name, arguments, .. }), ResponseEvent::Completed { .. } ] if call_id == "call_a" && name == "do_a" && arguments == "{ \"foo\":1}" ); } #[tokio::test] async fn emits_multiple_tool_calls() { let delta_a = json!({ "choices": [{ "delta": { "tool_calls": [{ "id": "call_a", "function": { "name": "do_a", "arguments": "{\"foo\":1}" } }] } }] }); let delta_b = json!({ "choices": [{ "delta": { "tool_calls": [{ "id": "call_b", "function": { "name": "do_b", "arguments": "{\"bar\":2}" } }] } }] }); let finish = json!({ "choices": [{ "finish_reason": "tool_calls" }] }); let body = build_body(&[delta_a, delta_b, finish]); let events = collect_events(&body).await; assert_matches!( &events[..], [ ResponseEvent::OutputItemDone(ResponseItem::FunctionCall { call_id: call_a, name: name_a, arguments: args_a, .. }), ResponseEvent::OutputItemDone(ResponseItem::FunctionCall { call_id: call_b, name: name_b, arguments: args_b, .. }), ResponseEvent::Completed { .. } ] if call_a == "call_a" && name_a == "do_a" && args_a == "{\"foo\":1}" && call_b == "call_b" && name_b == "do_b" && args_b == "{\"bar\":2}" ); } #[tokio::test] async fn emits_tool_calls_for_multiple_choices() { let payload = json!({ "choices": [ { "delta": { "tool_calls": [{ "id": "call_a", "index": 0, "function": { "name": "do_a", "arguments": "{}" } }] }, "finish_reason": "tool_calls" }, { "delta": { "tool_calls": [{ "id": "call_b", "index": 0, "function": { "name": "do_b", "arguments": "{}" } }] }, "finish_reason": "tool_calls" } ] }); let body = build_body(&[payload]); let events = collect_events(&body).await; assert_matches!( &events[..], [ ResponseEvent::OutputItemDone(ResponseItem::FunctionCall { call_id: call_a, name: name_a, arguments: args_a, .. }), ResponseEvent::OutputItemDone(ResponseItem::FunctionCall { call_id: call_b, name: name_b, arguments: args_b, .. }), ResponseEvent::Completed { .. } ] if call_a == "call_a" && name_a == "do_a" && args_a == "{}" && call_b == "call_b" && name_b == "do_b" && args_b == "{}" ); } #[tokio::test] async fn merges_tool_calls_by_index_when_id_missing_on_subsequent_deltas() { let delta_with_id = json!({ "choices": [{ "delta": { "tool_calls": [{ "index": 0, "id": "call_a", "function": { "name": "do_a", "arguments": "{ \"foo\":" } }] } }] }); let delta_without_id = json!({ "choices": [{ "delta": { "tool_calls": [{ "index": 0, "function": { "arguments": "1}" } }] } }] }); let finish = json!({ "choices": [{ "finish_reason": "tool_calls" }] }); let body = build_body(&[delta_with_id, delta_without_id, finish]); let events = collect_events(&body).await; assert_matches!( &events[..], [ ResponseEvent::OutputItemDone(ResponseItem::FunctionCall { call_id, name, arguments, .. }), ResponseEvent::Completed { .. } ] if call_id == "call_a" && name == "do_a" && arguments == "{ \"foo\":1}" ); } #[tokio::test] async fn preserves_tool_call_name_when_empty_deltas_arrive() { let delta_with_name = json!({ "choices": [{ "delta": { "tool_calls": [{ "id": "call_a", "function": { "name": "do_a" } }] } }] }); let delta_with_empty_name = json!({ "choices": [{ "delta": { "tool_calls": [{ "id": "call_a", "function": { "name": "", "arguments": "{}" } }] } }] }); let finish = json!({ "choices": [{ "finish_reason": "tool_calls" }] }); let body = build_body(&[delta_with_name, delta_with_empty_name, finish]); let events = collect_events(&body).await; assert_matches!( &events[..], [ ResponseEvent::OutputItemDone(ResponseItem::FunctionCall { name, arguments, .. }), ResponseEvent::Completed { .. } ] if name == "do_a" && arguments == "{}" ); } #[tokio::test] async fn emits_tool_calls_even_when_content_and_reasoning_present() { let delta_content_and_tools = json!({ "choices": [{ "delta": { "content": [{"text": "hi"}], "reasoning": "because", "tool_calls": [{ "id": "call_a", "function": { "name": "do_a", "arguments": "{}" } }] } }] }); let finish = json!({ "choices": [{ "finish_reason": "tool_calls" }] }); let body = build_body(&[delta_content_and_tools, finish]); let events = collect_events(&body).await; assert_matches!( &events[..], [ ResponseEvent::OutputItemAdded(ResponseItem::Reasoning { .. }), ResponseEvent::ReasoningContentDelta { .. }, ResponseEvent::OutputItemAdded(ResponseItem::Message { .. }), ResponseEvent::OutputTextDelta(delta), ResponseEvent::OutputItemDone(ResponseItem::Reasoning { .. }), ResponseEvent::OutputItemDone(ResponseItem::FunctionCall { call_id, name, .. }), ResponseEvent::OutputItemDone(ResponseItem::Message { .. }), ResponseEvent::Completed { .. } ] if delta == "hi" && call_id == "call_a" && name == "do_a" ); } #[tokio::test] async fn drops_partial_tool_calls_on_stop_finish_reason() { let delta_tool = json!({ "choices": [{ "delta": { "tool_calls": [{ "id": "call_a", "function": { "name": "do_a", "arguments": "{}" } }] } }] }); let finish_stop = json!({ "choices": [{ "finish_reason": "stop" }] }); let body = build_body(&[delta_tool, finish_stop]); let events = collect_events(&body).await; assert!(!events.iter().any(|ev| { matches!( ev, ResponseEvent::OutputItemDone(ResponseItem::FunctionCall { .. }) ) })); assert_matches!(events.last(), Some(ResponseEvent::Completed { .. })); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/endpoint/responses.rs
codex-rs/codex-api/src/endpoint/responses.rs
use crate::auth::AuthProvider; use crate::common::Prompt as ApiPrompt; use crate::common::Reasoning; use crate::common::ResponseStream; use crate::common::TextControls; use crate::endpoint::streaming::StreamingClient; use crate::error::ApiError; use crate::provider::Provider; use crate::provider::WireApi; use crate::requests::ResponsesRequest; use crate::requests::ResponsesRequestBuilder; use crate::sse::spawn_response_stream; use crate::telemetry::SseTelemetry; use codex_client::HttpTransport; use codex_client::RequestTelemetry; use codex_protocol::protocol::SessionSource; use http::HeaderMap; use serde_json::Value; use std::sync::Arc; use tracing::instrument; pub struct ResponsesClient<T: HttpTransport, A: AuthProvider> { streaming: StreamingClient<T, A>, } #[derive(Default)] pub struct ResponsesOptions { pub reasoning: Option<Reasoning>, pub include: Vec<String>, pub prompt_cache_key: Option<String>, pub text: Option<TextControls>, pub store_override: Option<bool>, pub conversation_id: Option<String>, pub session_source: Option<SessionSource>, pub extra_headers: HeaderMap, } impl<T: HttpTransport, A: AuthProvider> ResponsesClient<T, A> { pub fn new(transport: T, provider: Provider, auth: A) -> Self { Self { streaming: StreamingClient::new(transport, provider, auth), } } pub fn with_telemetry( self, request: Option<Arc<dyn RequestTelemetry>>, sse: Option<Arc<dyn SseTelemetry>>, ) -> Self { Self { streaming: self.streaming.with_telemetry(request, sse), } } pub async fn stream_request( &self, request: ResponsesRequest, ) -> Result<ResponseStream, ApiError> { self.stream(request.body, request.headers).await } #[instrument(level = "trace", skip_all, err)] pub async fn stream_prompt( &self, model: &str, prompt: &ApiPrompt, options: ResponsesOptions, ) -> Result<ResponseStream, ApiError> { let ResponsesOptions { reasoning, include, prompt_cache_key, text, store_override, conversation_id, session_source, extra_headers, } = options; let request = ResponsesRequestBuilder::new(model, &prompt.instructions, &prompt.input) .tools(&prompt.tools) .parallel_tool_calls(prompt.parallel_tool_calls) .reasoning(reasoning) .include(include) .prompt_cache_key(prompt_cache_key) .text(text) .conversation(conversation_id) .session_source(session_source) .store_override(store_override) .extra_headers(extra_headers) .build(self.streaming.provider())?; self.stream_request(request).await } fn path(&self) -> &'static str { match self.streaming.provider().wire { WireApi::Responses | WireApi::Compact => "responses", WireApi::Chat => "chat/completions", } } pub async fn stream( &self, body: Value, extra_headers: HeaderMap, ) -> Result<ResponseStream, ApiError> { self.streaming .stream(self.path(), body, extra_headers, spawn_response_stream) .await } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/endpoint/compact.rs
codex-rs/codex-api/src/endpoint/compact.rs
use crate::auth::AuthProvider; use crate::auth::add_auth_headers; use crate::common::CompactionInput; use crate::error::ApiError; use crate::provider::Provider; use crate::provider::WireApi; use crate::telemetry::run_with_request_telemetry; use codex_client::HttpTransport; use codex_client::RequestTelemetry; use codex_protocol::models::ResponseItem; use http::HeaderMap; use http::Method; use serde::Deserialize; use serde_json::to_value; use std::sync::Arc; pub struct CompactClient<T: HttpTransport, A: AuthProvider> { transport: T, provider: Provider, auth: A, request_telemetry: Option<Arc<dyn RequestTelemetry>>, } impl<T: HttpTransport, A: AuthProvider> CompactClient<T, A> { pub fn new(transport: T, provider: Provider, auth: A) -> Self { Self { transport, provider, auth, request_telemetry: None, } } pub fn with_telemetry(mut self, request: Option<Arc<dyn RequestTelemetry>>) -> Self { self.request_telemetry = request; self } fn path(&self) -> Result<&'static str, ApiError> { match self.provider.wire { WireApi::Compact | WireApi::Responses => Ok("responses/compact"), WireApi::Chat => Err(ApiError::Stream( "compact endpoint requires responses wire api".to_string(), )), } } pub async fn compact( &self, body: serde_json::Value, extra_headers: HeaderMap, ) -> Result<Vec<ResponseItem>, ApiError> { let path = self.path()?; let builder = || { let mut req = self.provider.build_request(Method::POST, path); req.headers.extend(extra_headers.clone()); req.body = Some(body.clone()); add_auth_headers(&self.auth, req) }; let resp = run_with_request_telemetry( self.provider.retry.to_policy(), self.request_telemetry.clone(), builder, |req| self.transport.execute(req), ) .await?; let parsed: CompactHistoryResponse = serde_json::from_slice(&resp.body).map_err(|e| ApiError::Stream(e.to_string()))?; Ok(parsed.output) } pub async fn compact_input( &self, input: &CompactionInput<'_>, extra_headers: HeaderMap, ) -> Result<Vec<ResponseItem>, ApiError> { let body = to_value(input) .map_err(|e| ApiError::Stream(format!("failed to encode compaction input: {e}")))?; self.compact(body, extra_headers).await } } #[derive(Debug, Deserialize)] struct CompactHistoryResponse { output: Vec<ResponseItem>, } #[cfg(test)] mod tests { use super::*; use crate::provider::RetryConfig; use async_trait::async_trait; use codex_client::Request; use codex_client::Response; use codex_client::StreamResponse; use codex_client::TransportError; use http::HeaderMap; use std::time::Duration; #[derive(Clone, Default)] struct DummyTransport; #[async_trait] impl HttpTransport for DummyTransport { async fn execute(&self, _req: Request) -> Result<Response, TransportError> { Err(TransportError::Build("execute should not run".to_string())) } async fn stream(&self, _req: Request) -> Result<StreamResponse, TransportError> { Err(TransportError::Build("stream should not run".to_string())) } } #[derive(Clone, Default)] struct DummyAuth; impl AuthProvider for DummyAuth { fn bearer_token(&self) -> Option<String> { None } } fn provider(wire: WireApi) -> Provider { Provider { name: "test".to_string(), base_url: "https://example.com/v1".to_string(), query_params: None, wire, headers: HeaderMap::new(), retry: RetryConfig { max_attempts: 1, base_delay: Duration::from_millis(1), retry_429: false, retry_5xx: true, retry_transport: true, }, stream_idle_timeout: Duration::from_secs(1), } } #[tokio::test] async fn errors_when_wire_is_chat() { let client = CompactClient::new(DummyTransport, provider(WireApi::Chat), DummyAuth); let input = CompactionInput { model: "gpt-test", input: &[], instructions: "inst", }; let err = client .compact_input(&input, HeaderMap::new()) .await .expect_err("expected wire mismatch to fail"); match err { ApiError::Stream(msg) => { assert_eq!(msg, "compact endpoint requires responses wire api"); } other => panic!("unexpected error: {other:?}"), } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/endpoint/mod.rs
codex-rs/codex-api/src/endpoint/mod.rs
pub mod chat; pub mod compact; pub mod models; pub mod responses; mod streaming;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/endpoint/chat.rs
codex-rs/codex-api/src/endpoint/chat.rs
use crate::ChatRequest; use crate::auth::AuthProvider; use crate::common::Prompt as ApiPrompt; use crate::common::ResponseEvent; use crate::common::ResponseStream; use crate::endpoint::streaming::StreamingClient; use crate::error::ApiError; use crate::provider::Provider; use crate::provider::WireApi; use crate::sse::chat::spawn_chat_stream; use crate::telemetry::SseTelemetry; use codex_client::HttpTransport; use codex_client::RequestTelemetry; use codex_protocol::models::ContentItem; use codex_protocol::models::ReasoningItemContent; use codex_protocol::models::ResponseItem; use codex_protocol::protocol::SessionSource; use futures::Stream; use http::HeaderMap; use serde_json::Value; use std::collections::VecDeque; use std::pin::Pin; use std::sync::Arc; use std::task::Context; use std::task::Poll; pub struct ChatClient<T: HttpTransport, A: AuthProvider> { streaming: StreamingClient<T, A>, } impl<T: HttpTransport, A: AuthProvider> ChatClient<T, A> { pub fn new(transport: T, provider: Provider, auth: A) -> Self { Self { streaming: StreamingClient::new(transport, provider, auth), } } pub fn with_telemetry( self, request: Option<Arc<dyn RequestTelemetry>>, sse: Option<Arc<dyn SseTelemetry>>, ) -> Self { Self { streaming: self.streaming.with_telemetry(request, sse), } } pub async fn stream_request(&self, request: ChatRequest) -> Result<ResponseStream, ApiError> { self.stream(request.body, request.headers).await } pub async fn stream_prompt( &self, model: &str, prompt: &ApiPrompt, conversation_id: Option<String>, session_source: Option<SessionSource>, ) -> Result<ResponseStream, ApiError> { use crate::requests::ChatRequestBuilder; let request = ChatRequestBuilder::new(model, &prompt.instructions, &prompt.input, &prompt.tools) .conversation_id(conversation_id) .session_source(session_source) .build(self.streaming.provider())?; self.stream_request(request).await } fn path(&self) -> &'static str { match self.streaming.provider().wire { WireApi::Chat => "chat/completions", _ => "responses", } } pub async fn stream( &self, body: Value, extra_headers: HeaderMap, ) -> Result<ResponseStream, ApiError> { self.streaming .stream(self.path(), body, extra_headers, spawn_chat_stream) .await } } #[derive(Copy, Clone, Eq, PartialEq)] pub enum AggregateMode { AggregatedOnly, Streaming, } /// Stream adapter that merges token deltas into a single assistant message per turn. pub struct AggregatedStream { inner: ResponseStream, cumulative: String, cumulative_reasoning: String, pending: VecDeque<ResponseEvent>, mode: AggregateMode, } impl Stream for AggregatedStream { type Item = Result<ResponseEvent, ApiError>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { let this = self.get_mut(); if let Some(ev) = this.pending.pop_front() { return Poll::Ready(Some(Ok(ev))); } loop { match Pin::new(&mut this.inner).poll_next(cx) { Poll::Pending => return Poll::Pending, Poll::Ready(None) => return Poll::Ready(None), Poll::Ready(Some(Err(e))) => return Poll::Ready(Some(Err(e))), Poll::Ready(Some(Ok(ResponseEvent::OutputItemDone(item)))) => { let is_assistant_message = matches!( &item, ResponseItem::Message { role, .. } if role == "assistant" ); if is_assistant_message { match this.mode { AggregateMode::AggregatedOnly => { if this.cumulative.is_empty() && let ResponseItem::Message { content, .. } = &item && let Some(text) = content.iter().find_map(|c| match c { ContentItem::OutputText { text } => Some(text), _ => None, }) { this.cumulative.push_str(text); } continue; } AggregateMode::Streaming => { if this.cumulative.is_empty() { return Poll::Ready(Some(Ok(ResponseEvent::OutputItemDone( item, )))); } else { continue; } } } } return Poll::Ready(Some(Ok(ResponseEvent::OutputItemDone(item)))); } Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot)))) => { return Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot)))); } Poll::Ready(Some(Ok(ResponseEvent::ModelsEtag(etag)))) => { return Poll::Ready(Some(Ok(ResponseEvent::ModelsEtag(etag)))); } Poll::Ready(Some(Ok(ResponseEvent::Completed { response_id, token_usage, }))) => { let mut emitted_any = false; if !this.cumulative_reasoning.is_empty() { let aggregated_reasoning = ResponseItem::Reasoning { id: String::new(), summary: Vec::new(), content: Some(vec![ReasoningItemContent::ReasoningText { text: std::mem::take(&mut this.cumulative_reasoning), }]), encrypted_content: None, }; this.pending .push_back(ResponseEvent::OutputItemDone(aggregated_reasoning)); emitted_any = true; } if !this.cumulative.is_empty() { let aggregated_message = ResponseItem::Message { id: None, role: "assistant".to_string(), content: vec![ContentItem::OutputText { text: std::mem::take(&mut this.cumulative), }], }; this.pending .push_back(ResponseEvent::OutputItemDone(aggregated_message)); emitted_any = true; } if emitted_any { this.pending.push_back(ResponseEvent::Completed { response_id: response_id.clone(), token_usage: token_usage.clone(), }); if let Some(ev) = this.pending.pop_front() { return Poll::Ready(Some(Ok(ev))); } } return Poll::Ready(Some(Ok(ResponseEvent::Completed { response_id, token_usage, }))); } Poll::Ready(Some(Ok(ResponseEvent::Created))) => { continue; } Poll::Ready(Some(Ok(ResponseEvent::OutputTextDelta(delta)))) => { this.cumulative.push_str(&delta); if matches!(this.mode, AggregateMode::Streaming) { return Poll::Ready(Some(Ok(ResponseEvent::OutputTextDelta(delta)))); } else { continue; } } Poll::Ready(Some(Ok(ResponseEvent::ReasoningContentDelta { delta, content_index, }))) => { this.cumulative_reasoning.push_str(&delta); if matches!(this.mode, AggregateMode::Streaming) { return Poll::Ready(Some(Ok(ResponseEvent::ReasoningContentDelta { delta, content_index, }))); } else { continue; } } Poll::Ready(Some(Ok(ResponseEvent::ReasoningSummaryDelta { .. }))) => continue, Poll::Ready(Some(Ok(ResponseEvent::ReasoningSummaryPartAdded { .. }))) => { continue; } Poll::Ready(Some(Ok(ResponseEvent::OutputItemAdded(item)))) => { return Poll::Ready(Some(Ok(ResponseEvent::OutputItemAdded(item)))); } } } } } pub trait AggregateStreamExt { fn aggregate(self) -> AggregatedStream; fn streaming_mode(self) -> ResponseStream; } impl AggregateStreamExt for ResponseStream { fn aggregate(self) -> AggregatedStream { AggregatedStream::new(self, AggregateMode::AggregatedOnly) } fn streaming_mode(self) -> ResponseStream { self } } impl AggregatedStream { fn new(inner: ResponseStream, mode: AggregateMode) -> Self { AggregatedStream { inner, cumulative: String::new(), cumulative_reasoning: String::new(), pending: VecDeque::new(), mode, } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/endpoint/streaming.rs
codex-rs/codex-api/src/endpoint/streaming.rs
use crate::auth::AuthProvider; use crate::auth::add_auth_headers; use crate::common::ResponseStream; use crate::error::ApiError; use crate::provider::Provider; use crate::telemetry::SseTelemetry; use crate::telemetry::run_with_request_telemetry; use codex_client::HttpTransport; use codex_client::RequestTelemetry; use codex_client::StreamResponse; use http::HeaderMap; use http::Method; use serde_json::Value; use std::sync::Arc; use std::time::Duration; pub(crate) struct StreamingClient<T: HttpTransport, A: AuthProvider> { transport: T, provider: Provider, auth: A, request_telemetry: Option<Arc<dyn RequestTelemetry>>, sse_telemetry: Option<Arc<dyn SseTelemetry>>, } impl<T: HttpTransport, A: AuthProvider> StreamingClient<T, A> { pub(crate) fn new(transport: T, provider: Provider, auth: A) -> Self { Self { transport, provider, auth, request_telemetry: None, sse_telemetry: None, } } pub(crate) fn with_telemetry( mut self, request: Option<Arc<dyn RequestTelemetry>>, sse: Option<Arc<dyn SseTelemetry>>, ) -> Self { self.request_telemetry = request; self.sse_telemetry = sse; self } pub(crate) fn provider(&self) -> &Provider { &self.provider } pub(crate) async fn stream( &self, path: &str, body: Value, extra_headers: HeaderMap, spawner: fn(StreamResponse, Duration, Option<Arc<dyn SseTelemetry>>) -> ResponseStream, ) -> Result<ResponseStream, ApiError> { let builder = || { let mut req = self.provider.build_request(Method::POST, path); req.headers.extend(extra_headers.clone()); req.headers.insert( http::header::ACCEPT, http::HeaderValue::from_static("text/event-stream"), ); req.body = Some(body.clone()); add_auth_headers(&self.auth, req) }; let stream_response = run_with_request_telemetry( self.provider.retry.to_policy(), self.request_telemetry.clone(), builder, |req| self.transport.stream(req), ) .await?; Ok(spawner( stream_response, self.provider.stream_idle_timeout, self.sse_telemetry.clone(), )) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/src/endpoint/models.rs
codex-rs/codex-api/src/endpoint/models.rs
use crate::auth::AuthProvider; use crate::auth::add_auth_headers; use crate::error::ApiError; use crate::provider::Provider; use crate::telemetry::run_with_request_telemetry; use codex_client::HttpTransport; use codex_client::RequestTelemetry; use codex_protocol::openai_models::ModelInfo; use codex_protocol::openai_models::ModelsResponse; use http::HeaderMap; use http::Method; use http::header::ETAG; use std::sync::Arc; pub struct ModelsClient<T: HttpTransport, A: AuthProvider> { transport: T, provider: Provider, auth: A, request_telemetry: Option<Arc<dyn RequestTelemetry>>, } impl<T: HttpTransport, A: AuthProvider> ModelsClient<T, A> { pub fn new(transport: T, provider: Provider, auth: A) -> Self { Self { transport, provider, auth, request_telemetry: None, } } pub fn with_telemetry(mut self, request: Option<Arc<dyn RequestTelemetry>>) -> Self { self.request_telemetry = request; self } fn path(&self) -> &'static str { "models" } pub async fn list_models( &self, client_version: &str, extra_headers: HeaderMap, ) -> Result<(Vec<ModelInfo>, Option<String>), ApiError> { let builder = || { let mut req = self.provider.build_request(Method::GET, self.path()); req.headers.extend(extra_headers.clone()); let separator = if req.url.contains('?') { '&' } else { '?' }; req.url = format!("{}{}client_version={client_version}", req.url, separator); add_auth_headers(&self.auth, req) }; let resp = run_with_request_telemetry( self.provider.retry.to_policy(), self.request_telemetry.clone(), builder, |req| self.transport.execute(req), ) .await?; let header_etag = resp .headers .get(ETAG) .and_then(|value| value.to_str().ok()) .map(ToString::to_string); let ModelsResponse { models } = serde_json::from_slice::<ModelsResponse>(&resp.body) .map_err(|e| { ApiError::Stream(format!( "failed to decode models response: {e}; body: {}", String::from_utf8_lossy(&resp.body) )) })?; Ok((models, header_etag)) } } #[cfg(test)] mod tests { use super::*; use crate::provider::RetryConfig; use crate::provider::WireApi; use async_trait::async_trait; use codex_client::Request; use codex_client::Response; use codex_client::StreamResponse; use codex_client::TransportError; use http::HeaderMap; use http::StatusCode; use pretty_assertions::assert_eq; use serde_json::json; use std::sync::Arc; use std::sync::Mutex; use std::time::Duration; #[derive(Clone)] struct CapturingTransport { last_request: Arc<Mutex<Option<Request>>>, body: Arc<ModelsResponse>, etag: Option<String>, } impl Default for CapturingTransport { fn default() -> Self { Self { last_request: Arc::new(Mutex::new(None)), body: Arc::new(ModelsResponse { models: Vec::new() }), etag: None, } } } #[async_trait] impl HttpTransport for CapturingTransport { async fn execute(&self, req: Request) -> Result<Response, TransportError> { *self.last_request.lock().unwrap() = Some(req); let body = serde_json::to_vec(&*self.body).unwrap(); let mut headers = HeaderMap::new(); if let Some(etag) = &self.etag { headers.insert(ETAG, etag.parse().unwrap()); } Ok(Response { status: StatusCode::OK, headers, body: body.into(), }) } async fn stream(&self, _req: Request) -> Result<StreamResponse, TransportError> { Err(TransportError::Build("stream should not run".to_string())) } } #[derive(Clone, Default)] struct DummyAuth; impl AuthProvider for DummyAuth { fn bearer_token(&self) -> Option<String> { None } } fn provider(base_url: &str) -> Provider { Provider { name: "test".to_string(), base_url: base_url.to_string(), query_params: None, wire: WireApi::Responses, headers: HeaderMap::new(), retry: RetryConfig { max_attempts: 1, base_delay: Duration::from_millis(1), retry_429: false, retry_5xx: true, retry_transport: true, }, stream_idle_timeout: Duration::from_secs(1), } } #[tokio::test] async fn appends_client_version_query() { let response = ModelsResponse { models: Vec::new() }; let transport = CapturingTransport { last_request: Arc::new(Mutex::new(None)), body: Arc::new(response), etag: None, }; let client = ModelsClient::new( transport.clone(), provider("https://example.com/api/codex"), DummyAuth, ); let (models, _) = client .list_models("0.99.0", HeaderMap::new()) .await .expect("request should succeed"); assert_eq!(models.len(), 0); let url = transport .last_request .lock() .unwrap() .as_ref() .unwrap() .url .clone(); assert_eq!( url, "https://example.com/api/codex/models?client_version=0.99.0" ); } #[tokio::test] async fn parses_models_response() { let response = ModelsResponse { models: vec![ serde_json::from_value(json!({ "slug": "gpt-test", "display_name": "gpt-test", "description": "desc", "default_reasoning_level": "medium", "supported_reasoning_levels": [{"effort": "low", "description": "low"}, {"effort": "medium", "description": "medium"}, {"effort": "high", "description": "high"}], "shell_type": "shell_command", "visibility": "list", "minimal_client_version": [0, 99, 0], "supported_in_api": true, "priority": 1, "upgrade": null, "base_instructions": null, "supports_reasoning_summaries": false, "support_verbosity": false, "default_verbosity": null, "apply_patch_tool_type": null, "truncation_policy": {"mode": "bytes", "limit": 10_000}, "supports_parallel_tool_calls": false, "context_window": null, "experimental_supported_tools": [], })) .unwrap(), ], }; let transport = CapturingTransport { last_request: Arc::new(Mutex::new(None)), body: Arc::new(response), etag: None, }; let client = ModelsClient::new( transport, provider("https://example.com/api/codex"), DummyAuth, ); let (models, _) = client .list_models("0.99.0", HeaderMap::new()) .await .expect("request should succeed"); assert_eq!(models.len(), 1); assert_eq!(models[0].slug, "gpt-test"); assert_eq!(models[0].supported_in_api, true); assert_eq!(models[0].priority, 1); } #[tokio::test] async fn list_models_includes_etag() { let response = ModelsResponse { models: Vec::new() }; let transport = CapturingTransport { last_request: Arc::new(Mutex::new(None)), body: Arc::new(response), etag: Some("\"abc\"".to_string()), }; let client = ModelsClient::new( transport, provider("https://example.com/api/codex"), DummyAuth, ); let (models, etag) = client .list_models("0.1.0", HeaderMap::new()) .await .expect("request should succeed"); assert_eq!(models.len(), 0); assert_eq!(etag, Some("\"abc\"".to_string())); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/tests/sse_end_to_end.rs
codex-rs/codex-api/tests/sse_end_to_end.rs
use std::time::Duration; use anyhow::Result; use async_trait::async_trait; use bytes::Bytes; use codex_api::AggregateStreamExt; use codex_api::AuthProvider; use codex_api::Provider; use codex_api::ResponseEvent; use codex_api::ResponsesClient; use codex_api::WireApi; use codex_client::HttpTransport; use codex_client::Request; use codex_client::Response; use codex_client::StreamResponse; use codex_client::TransportError; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseItem; use futures::StreamExt; use http::HeaderMap; use http::StatusCode; use pretty_assertions::assert_eq; use serde_json::Value; #[derive(Clone)] struct FixtureSseTransport { body: String, } impl FixtureSseTransport { fn new(body: String) -> Self { Self { body } } } #[async_trait] impl HttpTransport for FixtureSseTransport { async fn execute(&self, _req: Request) -> Result<Response, TransportError> { Err(TransportError::Build("execute should not run".to_string())) } async fn stream(&self, _req: Request) -> Result<StreamResponse, TransportError> { let stream = futures::stream::iter(vec![Ok::<Bytes, TransportError>(Bytes::from( self.body.clone(), ))]); Ok(StreamResponse { status: StatusCode::OK, headers: HeaderMap::new(), bytes: Box::pin(stream), }) } } #[derive(Clone, Default)] struct NoAuth; impl AuthProvider for NoAuth { fn bearer_token(&self) -> Option<String> { None } } fn provider(name: &str, wire: WireApi) -> Provider { Provider { name: name.to_string(), base_url: "https://example.com/v1".to_string(), query_params: None, wire, headers: HeaderMap::new(), retry: codex_api::provider::RetryConfig { max_attempts: 1, base_delay: Duration::from_millis(1), retry_429: false, retry_5xx: false, retry_transport: true, }, stream_idle_timeout: Duration::from_millis(50), } } fn build_responses_body(events: Vec<Value>) -> String { let mut body = String::new(); for e in events { let kind = e .get("type") .and_then(|v| v.as_str()) .unwrap_or_else(|| panic!("fixture event missing type in SSE fixture: {e}")); if e.as_object().map(|o| o.len() == 1).unwrap_or(false) { body.push_str(&format!("event: {kind}\n\n")); } else { body.push_str(&format!("event: {kind}\ndata: {e}\n\n")); } } body } #[tokio::test] async fn responses_stream_parses_items_and_completed_end_to_end() -> Result<()> { let item1 = serde_json::json!({ "type": "response.output_item.done", "item": { "type": "message", "role": "assistant", "content": [{"type": "output_text", "text": "Hello"}] } }); let item2 = serde_json::json!({ "type": "response.output_item.done", "item": { "type": "message", "role": "assistant", "content": [{"type": "output_text", "text": "World"}] } }); let completed = serde_json::json!({ "type": "response.completed", "response": { "id": "resp1" } }); let body = build_responses_body(vec![item1, item2, completed]); let transport = FixtureSseTransport::new(body); let client = ResponsesClient::new(transport, provider("openai", WireApi::Responses), NoAuth); let mut stream = client .stream(serde_json::json!({"echo": true}), HeaderMap::new()) .await?; let mut events = Vec::new(); while let Some(ev) = stream.next().await { events.push(ev?); } let events: Vec<ResponseEvent> = events .into_iter() .filter(|ev| !matches!(ev, ResponseEvent::RateLimits(_))) .collect(); assert_eq!(events.len(), 3); match &events[0] { ResponseEvent::OutputItemDone(ResponseItem::Message { role, .. }) => { assert_eq!(role, "assistant"); } other => panic!("unexpected first event: {other:?}"), } match &events[1] { ResponseEvent::OutputItemDone(ResponseItem::Message { role, .. }) => { assert_eq!(role, "assistant"); } other => panic!("unexpected second event: {other:?}"), } match &events[2] { ResponseEvent::Completed { response_id, token_usage, } => { assert_eq!(response_id, "resp1"); assert!(token_usage.is_none()); } other => panic!("unexpected third event: {other:?}"), } Ok(()) } #[tokio::test] async fn responses_stream_aggregates_output_text_deltas() -> Result<()> { let delta1 = serde_json::json!({ "type": "response.output_text.delta", "delta": "Hello, " }); let delta2 = serde_json::json!({ "type": "response.output_text.delta", "delta": "world" }); let completed = serde_json::json!({ "type": "response.completed", "response": { "id": "resp-agg" } }); let body = build_responses_body(vec![delta1, delta2, completed]); let transport = FixtureSseTransport::new(body); let client = ResponsesClient::new(transport, provider("openai", WireApi::Responses), NoAuth); let stream = client .stream(serde_json::json!({"echo": true}), HeaderMap::new()) .await?; let mut stream = stream.aggregate(); let mut events = Vec::new(); while let Some(ev) = stream.next().await { events.push(ev?); } let events: Vec<ResponseEvent> = events .into_iter() .filter(|ev| !matches!(ev, ResponseEvent::RateLimits(_))) .collect(); assert_eq!(events.len(), 2); match &events[0] { ResponseEvent::OutputItemDone(ResponseItem::Message { content, .. }) => { let mut aggregated = String::new(); for item in content { if let ContentItem::OutputText { text } = item { aggregated.push_str(text); } } assert_eq!(aggregated, "Hello, world"); } other => panic!("unexpected first event: {other:?}"), } match &events[1] { ResponseEvent::Completed { response_id, .. } => { assert_eq!(response_id, "resp-agg"); } other => panic!("unexpected second event: {other:?}"), } Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/tests/clients.rs
codex-rs/codex-api/tests/clients.rs
use std::sync::Arc; use std::sync::Mutex; use std::time::Duration; use anyhow::Result; use async_trait::async_trait; use bytes::Bytes; use codex_api::AuthProvider; use codex_api::ChatClient; use codex_api::Provider; use codex_api::ResponsesClient; use codex_api::ResponsesOptions; use codex_api::WireApi; use codex_client::HttpTransport; use codex_client::Request; use codex_client::Response; use codex_client::StreamResponse; use codex_client::TransportError; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseItem; use http::HeaderMap; use http::StatusCode; use pretty_assertions::assert_eq; use serde_json::Value; fn assert_path_ends_with(requests: &[Request], suffix: &str) { assert_eq!(requests.len(), 1); let url = &requests[0].url; assert!( url.ends_with(suffix), "expected url to end with {suffix}, got {url}" ); } #[derive(Debug, Default, Clone)] struct RecordingState { stream_requests: Arc<Mutex<Vec<Request>>>, } impl RecordingState { fn record(&self, req: Request) { let mut guard = self .stream_requests .lock() .unwrap_or_else(|err| panic!("mutex poisoned: {err}")); guard.push(req); } fn take_stream_requests(&self) -> Vec<Request> { let mut guard = self .stream_requests .lock() .unwrap_or_else(|err| panic!("mutex poisoned: {err}")); std::mem::take(&mut *guard) } } #[derive(Clone)] struct RecordingTransport { state: RecordingState, } impl RecordingTransport { fn new(state: RecordingState) -> Self { Self { state } } } #[async_trait] impl HttpTransport for RecordingTransport { async fn execute(&self, _req: Request) -> Result<Response, TransportError> { Err(TransportError::Build("execute should not run".to_string())) } async fn stream(&self, req: Request) -> Result<StreamResponse, TransportError> { self.state.record(req); let stream = futures::stream::iter(Vec::<Result<Bytes, TransportError>>::new()); Ok(StreamResponse { status: StatusCode::OK, headers: HeaderMap::new(), bytes: Box::pin(stream), }) } } #[derive(Clone, Default)] struct NoAuth; impl AuthProvider for NoAuth { fn bearer_token(&self) -> Option<String> { None } } #[derive(Clone)] struct StaticAuth { token: String, account_id: String, } impl StaticAuth { fn new(token: &str, account_id: &str) -> Self { Self { token: token.to_string(), account_id: account_id.to_string(), } } } impl AuthProvider for StaticAuth { fn bearer_token(&self) -> Option<String> { Some(self.token.clone()) } fn account_id(&self) -> Option<String> { Some(self.account_id.clone()) } } fn provider(name: &str, wire: WireApi) -> Provider { Provider { name: name.to_string(), base_url: "https://example.com/v1".to_string(), query_params: None, wire, headers: HeaderMap::new(), retry: codex_api::provider::RetryConfig { max_attempts: 1, base_delay: Duration::from_millis(1), retry_429: false, retry_5xx: false, retry_transport: true, }, stream_idle_timeout: Duration::from_millis(10), } } #[derive(Clone)] struct FlakyTransport { state: Arc<Mutex<i64>>, } impl Default for FlakyTransport { fn default() -> Self { Self::new() } } impl FlakyTransport { fn new() -> Self { Self { state: Arc::new(Mutex::new(0)), } } fn attempts(&self) -> i64 { *self .state .lock() .unwrap_or_else(|err| panic!("mutex poisoned: {err}")) } } #[async_trait] impl HttpTransport for FlakyTransport { async fn execute(&self, _req: Request) -> Result<Response, TransportError> { Err(TransportError::Build("execute should not run".to_string())) } async fn stream(&self, _req: Request) -> Result<StreamResponse, TransportError> { let mut attempts = self .state .lock() .unwrap_or_else(|err| panic!("mutex poisoned: {err}")); *attempts += 1; if *attempts == 1 { return Err(TransportError::Network("first attempt fails".to_string())); } let stream = futures::stream::iter(vec![Ok(Bytes::from( r#"event: message data: {"id":"resp-1","output":[{"type":"message","role":"assistant","content":[{"type":"output_text","text":"hi"}]}]} "#, ))]); Ok(StreamResponse { status: StatusCode::OK, headers: HeaderMap::new(), bytes: Box::pin(stream), }) } } #[tokio::test] async fn chat_client_uses_chat_completions_path_for_chat_wire() -> Result<()> { let state = RecordingState::default(); let transport = RecordingTransport::new(state.clone()); let client = ChatClient::new(transport, provider("openai", WireApi::Chat), NoAuth); let body = serde_json::json!({ "echo": true }); let _stream = client.stream(body, HeaderMap::new()).await?; let requests = state.take_stream_requests(); assert_path_ends_with(&requests, "/chat/completions"); Ok(()) } #[tokio::test] async fn chat_client_uses_responses_path_for_responses_wire() -> Result<()> { let state = RecordingState::default(); let transport = RecordingTransport::new(state.clone()); let client = ChatClient::new(transport, provider("openai", WireApi::Responses), NoAuth); let body = serde_json::json!({ "echo": true }); let _stream = client.stream(body, HeaderMap::new()).await?; let requests = state.take_stream_requests(); assert_path_ends_with(&requests, "/responses"); Ok(()) } #[tokio::test] async fn responses_client_uses_responses_path_for_responses_wire() -> Result<()> { let state = RecordingState::default(); let transport = RecordingTransport::new(state.clone()); let client = ResponsesClient::new(transport, provider("openai", WireApi::Responses), NoAuth); let body = serde_json::json!({ "echo": true }); let _stream = client.stream(body, HeaderMap::new()).await?; let requests = state.take_stream_requests(); assert_path_ends_with(&requests, "/responses"); Ok(()) } #[tokio::test] async fn responses_client_uses_chat_path_for_chat_wire() -> Result<()> { let state = RecordingState::default(); let transport = RecordingTransport::new(state.clone()); let client = ResponsesClient::new(transport, provider("openai", WireApi::Chat), NoAuth); let body = serde_json::json!({ "echo": true }); let _stream = client.stream(body, HeaderMap::new()).await?; let requests = state.take_stream_requests(); assert_path_ends_with(&requests, "/chat/completions"); Ok(()) } #[tokio::test] async fn streaming_client_adds_auth_headers() -> Result<()> { let state = RecordingState::default(); let transport = RecordingTransport::new(state.clone()); let auth = StaticAuth::new("secret-token", "acct-1"); let client = ResponsesClient::new(transport, provider("openai", WireApi::Responses), auth); let body = serde_json::json!({ "model": "gpt-test" }); let _stream = client.stream(body, HeaderMap::new()).await?; let requests = state.take_stream_requests(); assert_eq!(requests.len(), 1); let req = &requests[0]; let auth_header = req.headers.get(http::header::AUTHORIZATION); assert!(auth_header.is_some(), "missing auth header"); assert_eq!( auth_header.unwrap().to_str().ok(), Some("Bearer secret-token") ); let account_header = req.headers.get("ChatGPT-Account-ID"); assert!(account_header.is_some(), "missing account header"); assert_eq!(account_header.unwrap().to_str().ok(), Some("acct-1")); let accept_header = req.headers.get(http::header::ACCEPT); assert!(accept_header.is_some(), "missing Accept header"); assert_eq!( accept_header.unwrap().to_str().ok(), Some("text/event-stream") ); Ok(()) } #[tokio::test] async fn streaming_client_retries_on_transport_error() -> Result<()> { let transport = FlakyTransport::new(); let mut provider = provider("openai", WireApi::Responses); provider.retry.max_attempts = 2; let client = ResponsesClient::new(transport.clone(), provider, NoAuth); let prompt = codex_api::Prompt { instructions: "Say hi".to_string(), input: vec![ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: "hi".to_string(), }], }], tools: Vec::<Value>::new(), parallel_tool_calls: false, output_schema: None, }; let options = ResponsesOptions::default(); let _stream = client.stream_prompt("gpt-test", &prompt, options).await?; assert_eq!(transport.attempts(), 2); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-api/tests/models_integration.rs
codex-rs/codex-api/tests/models_integration.rs
use codex_api::AuthProvider; use codex_api::ModelsClient; use codex_api::provider::Provider; use codex_api::provider::RetryConfig; use codex_api::provider::WireApi; use codex_client::ReqwestTransport; use codex_protocol::openai_models::ConfigShellToolType; use codex_protocol::openai_models::ModelInfo; use codex_protocol::openai_models::ModelVisibility; use codex_protocol::openai_models::ModelsResponse; use codex_protocol::openai_models::ReasoningEffort; use codex_protocol::openai_models::ReasoningEffortPreset; use codex_protocol::openai_models::TruncationPolicyConfig; use http::HeaderMap; use http::Method; use wiremock::Mock; use wiremock::MockServer; use wiremock::ResponseTemplate; use wiremock::matchers::method; use wiremock::matchers::path; #[derive(Clone, Default)] struct DummyAuth; impl AuthProvider for DummyAuth { fn bearer_token(&self) -> Option<String> { None } } fn provider(base_url: &str) -> Provider { Provider { name: "test".to_string(), base_url: base_url.to_string(), query_params: None, wire: WireApi::Responses, headers: HeaderMap::new(), retry: RetryConfig { max_attempts: 1, base_delay: std::time::Duration::from_millis(1), retry_429: false, retry_5xx: true, retry_transport: true, }, stream_idle_timeout: std::time::Duration::from_secs(1), } } #[tokio::test] async fn models_client_hits_models_endpoint() { let server = MockServer::start().await; let base_url = format!("{}/api/codex", server.uri()); let response = ModelsResponse { models: vec![ModelInfo { slug: "gpt-test".to_string(), display_name: "gpt-test".to_string(), description: Some("desc".to_string()), default_reasoning_level: ReasoningEffort::Medium, supported_reasoning_levels: vec![ ReasoningEffortPreset { effort: ReasoningEffort::Low, description: ReasoningEffort::Low.to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::Medium, description: ReasoningEffort::Medium.to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::High, description: ReasoningEffort::High.to_string(), }, ], shell_type: ConfigShellToolType::ShellCommand, visibility: ModelVisibility::List, supported_in_api: true, priority: 1, upgrade: None, base_instructions: None, supports_reasoning_summaries: false, support_verbosity: false, default_verbosity: None, apply_patch_tool_type: None, truncation_policy: TruncationPolicyConfig::bytes(10_000), supports_parallel_tool_calls: false, context_window: None, experimental_supported_tools: Vec::new(), }], }; Mock::given(method("GET")) .and(path("/api/codex/models")) .respond_with( ResponseTemplate::new(200) .insert_header("content-type", "application/json") .set_body_json(&response), ) .mount(&server) .await; let transport = ReqwestTransport::new(reqwest::Client::new()); let client = ModelsClient::new(transport, provider(&base_url), DummyAuth); let (models, _) = client .list_models("0.1.0", HeaderMap::new()) .await .expect("models request should succeed"); assert_eq!(models.len(), 1); assert_eq!(models[0].slug, "gpt-test"); let received = server .received_requests() .await .expect("should capture requests"); assert_eq!(received.len(), 1); assert_eq!(received[0].method, Method::GET.as_str()); assert_eq!(received[0].url.path(), "/api/codex/models"); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/ollama/src/lib.rs
codex-rs/ollama/src/lib.rs
mod client; mod parser; mod pull; mod url; pub use client::OllamaClient; use codex_core::config::Config; pub use pull::CliProgressReporter; pub use pull::PullEvent; pub use pull::PullProgressReporter; pub use pull::TuiProgressReporter; /// Default OSS model to use when `--oss` is passed without an explicit `-m`. pub const DEFAULT_OSS_MODEL: &str = "gpt-oss:20b"; /// Prepare the local OSS environment when `--oss` is selected. /// /// - Ensures a local Ollama server is reachable. /// - Checks if the model exists locally and pulls it if missing. pub async fn ensure_oss_ready(config: &Config) -> std::io::Result<()> { // Only download when the requested model is the default OSS model (or when -m is not provided). let model = match config.model.as_ref() { Some(model) => model, None => DEFAULT_OSS_MODEL, }; // Verify local Ollama is reachable. let ollama_client = crate::OllamaClient::try_from_oss_provider(config).await?; // If the model is not present locally, pull it. match ollama_client.fetch_models().await { Ok(models) => { if !models.iter().any(|m| m == model) { let mut reporter = crate::CliProgressReporter::new(); ollama_client .pull_with_reporter(model, &mut reporter) .await?; } } Err(err) => { // Not fatal; higher layers may still proceed and surface errors later. tracing::warn!("Failed to query local models from Ollama: {}.", err); } } Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/ollama/src/parser.rs
codex-rs/ollama/src/parser.rs
use serde_json::Value as JsonValue; use crate::pull::PullEvent; // Convert a single JSON object representing a pull update into one or more events. pub(crate) fn pull_events_from_value(value: &JsonValue) -> Vec<PullEvent> { let mut events = Vec::new(); if let Some(status) = value.get("status").and_then(|s| s.as_str()) { events.push(PullEvent::Status(status.to_string())); if status == "success" { events.push(PullEvent::Success); } } let digest = value .get("digest") .and_then(|d| d.as_str()) .unwrap_or("") .to_string(); let total = value.get("total").and_then(JsonValue::as_u64); let completed = value.get("completed").and_then(JsonValue::as_u64); if total.is_some() || completed.is_some() { events.push(PullEvent::ChunkProgress { digest, total, completed, }); } events } #[cfg(test)] mod tests { use assert_matches::assert_matches; use super::*; #[test] fn test_pull_events_decoder_status_and_success() { let v: JsonValue = serde_json::json!({"status":"verifying"}); let events = pull_events_from_value(&v); assert_matches!(events.as_slice(), [PullEvent::Status(s)] if s == "verifying"); let v2: JsonValue = serde_json::json!({"status":"success"}); let events2 = pull_events_from_value(&v2); assert_eq!(events2.len(), 2); assert_matches!(events2[0], PullEvent::Status(ref s) if s == "success"); assert_matches!(events2[1], PullEvent::Success); } #[test] fn test_pull_events_decoder_progress() { let v: JsonValue = serde_json::json!({"digest":"sha256:abc","total":100}); let events = pull_events_from_value(&v); assert_eq!(events.len(), 1); assert_matches!( &events[0], PullEvent::ChunkProgress { digest, total, completed, } if digest == "sha256:abc" && total == &Some(100) && completed.is_none() ); let v2: JsonValue = serde_json::json!({"digest":"sha256:def","completed":42}); let events2 = pull_events_from_value(&v2); assert_eq!(events2.len(), 1); assert_matches!( &events2[0], PullEvent::ChunkProgress { digest, total, completed, } if digest == "sha256:def" && total.is_none() && completed == &Some(42) ); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/ollama/src/url.rs
codex-rs/ollama/src/url.rs
/// Identify whether a base_url points at an OpenAI-compatible root (".../v1"). pub(crate) fn is_openai_compatible_base_url(base_url: &str) -> bool { base_url.trim_end_matches('/').ends_with("/v1") } /// Convert a provider base_url into the native Ollama host root. /// For example, "http://localhost:11434/v1" -> "http://localhost:11434". pub fn base_url_to_host_root(base_url: &str) -> String { let trimmed = base_url.trim_end_matches('/'); if trimmed.ends_with("/v1") { trimmed .trim_end_matches("/v1") .trim_end_matches('/') .to_string() } else { trimmed.to_string() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_base_url_to_host_root() { assert_eq!( base_url_to_host_root("http://localhost:11434/v1"), "http://localhost:11434" ); assert_eq!( base_url_to_host_root("http://localhost:11434"), "http://localhost:11434" ); assert_eq!( base_url_to_host_root("http://localhost:11434/"), "http://localhost:11434" ); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/ollama/src/client.rs
codex-rs/ollama/src/client.rs
use bytes::BytesMut; use futures::StreamExt; use futures::stream::BoxStream; use serde_json::Value as JsonValue; use std::collections::VecDeque; use std::io; use crate::parser::pull_events_from_value; use crate::pull::PullEvent; use crate::pull::PullProgressReporter; use crate::url::base_url_to_host_root; use crate::url::is_openai_compatible_base_url; use codex_core::ModelProviderInfo; use codex_core::OLLAMA_OSS_PROVIDER_ID; use codex_core::WireApi; use codex_core::config::Config; const OLLAMA_CONNECTION_ERROR: &str = "No running Ollama server detected. Start it with: `ollama serve` (after installing). Install instructions: https://github.com/ollama/ollama?tab=readme-ov-file#ollama"; /// Client for interacting with a local Ollama instance. pub struct OllamaClient { client: reqwest::Client, host_root: String, uses_openai_compat: bool, } impl OllamaClient { /// Construct a client for the built‑in open‑source ("oss") model provider /// and verify that a local Ollama server is reachable. If no server is /// detected, returns an error with helpful installation/run instructions. pub async fn try_from_oss_provider(config: &Config) -> io::Result<Self> { // Note that we must look up the provider from the Config to ensure that // any overrides the user has in their config.toml are taken into // account. let provider = config .model_providers .get(OLLAMA_OSS_PROVIDER_ID) .ok_or_else(|| { io::Error::new( io::ErrorKind::NotFound, format!("Built-in provider {OLLAMA_OSS_PROVIDER_ID} not found",), ) })?; Self::try_from_provider(provider).await } #[cfg(test)] async fn try_from_provider_with_base_url(base_url: &str) -> io::Result<Self> { let provider = codex_core::create_oss_provider_with_base_url(base_url, codex_core::WireApi::Chat); Self::try_from_provider(&provider).await } /// Build a client from a provider definition and verify the server is reachable. async fn try_from_provider(provider: &ModelProviderInfo) -> io::Result<Self> { #![expect(clippy::expect_used)] let base_url = provider .base_url .as_ref() .expect("oss provider must have a base_url"); let uses_openai_compat = is_openai_compatible_base_url(base_url) || matches!(provider.wire_api, WireApi::Chat) && is_openai_compatible_base_url(base_url); let host_root = base_url_to_host_root(base_url); let client = reqwest::Client::builder() .connect_timeout(std::time::Duration::from_secs(5)) .build() .unwrap_or_else(|_| reqwest::Client::new()); let client = Self { client, host_root, uses_openai_compat, }; client.probe_server().await?; Ok(client) } /// Probe whether the server is reachable by hitting the appropriate health endpoint. async fn probe_server(&self) -> io::Result<()> { let url = if self.uses_openai_compat { format!("{}/v1/models", self.host_root.trim_end_matches('/')) } else { format!("{}/api/tags", self.host_root.trim_end_matches('/')) }; let resp = self.client.get(url).send().await.map_err(|err| { tracing::warn!("Failed to connect to Ollama server: {err:?}"); io::Error::other(OLLAMA_CONNECTION_ERROR) })?; if resp.status().is_success() { Ok(()) } else { tracing::warn!( "Failed to probe server at {}: HTTP {}", self.host_root, resp.status() ); Err(io::Error::other(OLLAMA_CONNECTION_ERROR)) } } /// Return the list of model names known to the local Ollama instance. pub async fn fetch_models(&self) -> io::Result<Vec<String>> { let tags_url = format!("{}/api/tags", self.host_root.trim_end_matches('/')); let resp = self .client .get(tags_url) .send() .await .map_err(io::Error::other)?; if !resp.status().is_success() { return Ok(Vec::new()); } let val = resp.json::<JsonValue>().await.map_err(io::Error::other)?; let names = val .get("models") .and_then(|m| m.as_array()) .map(|arr| { arr.iter() .filter_map(|v| v.get("name").and_then(|n| n.as_str())) .map(str::to_string) .collect::<Vec<_>>() }) .unwrap_or_default(); Ok(names) } /// Start a model pull and emit streaming events. The returned stream ends when /// a Success event is observed or the server closes the connection. pub async fn pull_model_stream( &self, model: &str, ) -> io::Result<BoxStream<'static, PullEvent>> { let url = format!("{}/api/pull", self.host_root.trim_end_matches('/')); let resp = self .client .post(url) .json(&serde_json::json!({"model": model, "stream": true})) .send() .await .map_err(io::Error::other)?; if !resp.status().is_success() { return Err(io::Error::other(format!( "failed to start pull: HTTP {}", resp.status() ))); } let mut stream = resp.bytes_stream(); let mut buf = BytesMut::new(); let _pending: VecDeque<PullEvent> = VecDeque::new(); // Using an async stream adaptor backed by unfold-like manual loop. let s = async_stream::stream! { while let Some(chunk) = stream.next().await { match chunk { Ok(bytes) => { buf.extend_from_slice(&bytes); while let Some(pos) = buf.iter().position(|b| *b == b'\n') { let line = buf.split_to(pos + 1); if let Ok(text) = std::str::from_utf8(&line) { let text = text.trim(); if text.is_empty() { continue; } if let Ok(value) = serde_json::from_str::<JsonValue>(text) { for ev in pull_events_from_value(&value) { yield ev; } if let Some(err_msg) = value.get("error").and_then(|e| e.as_str()) { yield PullEvent::Error(err_msg.to_string()); return; } if let Some(status) = value.get("status").and_then(|s| s.as_str()) && status == "success" { yield PullEvent::Success; return; } } } } } Err(_) => { // Connection error: end the stream. return; } } } }; Ok(Box::pin(s)) } /// High-level helper to pull a model and drive a progress reporter. pub async fn pull_with_reporter( &self, model: &str, reporter: &mut dyn PullProgressReporter, ) -> io::Result<()> { reporter.on_event(&PullEvent::Status(format!("Pulling model {model}...")))?; let mut stream = self.pull_model_stream(model).await?; while let Some(event) = stream.next().await { reporter.on_event(&event)?; match event { PullEvent::Success => { return Ok(()); } PullEvent::Error(err) => { // Empirically, ollama returns a 200 OK response even when // the output stream includes an error message. Verify with: // // `curl -i http://localhost:11434/api/pull -d '{ "model": "foobarbaz" }'` // // As such, we have to check the event stream, not the // HTTP response status, to determine whether to return Err. return Err(io::Error::other(format!("Pull failed: {err}"))); } PullEvent::ChunkProgress { .. } | PullEvent::Status(_) => { continue; } } } Err(io::Error::other( "Pull stream ended unexpectedly without success.", )) } /// Low-level constructor given a raw host root, e.g. "http://localhost:11434". #[cfg(test)] fn from_host_root(host_root: impl Into<String>) -> Self { let client = reqwest::Client::builder() .connect_timeout(std::time::Duration::from_secs(5)) .build() .unwrap_or_else(|_| reqwest::Client::new()); Self { client, host_root: host_root.into(), uses_openai_compat: false, } } } #[cfg(test)] mod tests { use super::*; // Happy-path tests using a mock HTTP server; skip if sandbox network is disabled. #[tokio::test] async fn test_fetch_models_happy_path() { if std::env::var(codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() { tracing::info!( "{} is set; skipping test_fetch_models_happy_path", codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR ); return; } let server = wiremock::MockServer::start().await; wiremock::Mock::given(wiremock::matchers::method("GET")) .and(wiremock::matchers::path("/api/tags")) .respond_with( wiremock::ResponseTemplate::new(200).set_body_raw( serde_json::json!({ "models": [ {"name": "llama3.2:3b"}, {"name":"mistral"} ] }) .to_string(), "application/json", ), ) .mount(&server) .await; let client = OllamaClient::from_host_root(server.uri()); let models = client.fetch_models().await.expect("fetch models"); assert!(models.contains(&"llama3.2:3b".to_string())); assert!(models.contains(&"mistral".to_string())); } #[tokio::test] async fn test_probe_server_happy_path_openai_compat_and_native() { if std::env::var(codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() { tracing::info!( "{} set; skipping test_probe_server_happy_path_openai_compat_and_native", codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR ); return; } let server = wiremock::MockServer::start().await; // Native endpoint wiremock::Mock::given(wiremock::matchers::method("GET")) .and(wiremock::matchers::path("/api/tags")) .respond_with(wiremock::ResponseTemplate::new(200)) .mount(&server) .await; let native = OllamaClient::from_host_root(server.uri()); native.probe_server().await.expect("probe native"); // OpenAI compatibility endpoint wiremock::Mock::given(wiremock::matchers::method("GET")) .and(wiremock::matchers::path("/v1/models")) .respond_with(wiremock::ResponseTemplate::new(200)) .mount(&server) .await; let ollama_client = OllamaClient::try_from_provider_with_base_url(&format!("{}/v1", server.uri())) .await .expect("probe OpenAI compat"); ollama_client .probe_server() .await .expect("probe OpenAI compat"); } #[tokio::test] async fn test_try_from_oss_provider_ok_when_server_running() { if std::env::var(codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() { tracing::info!( "{} set; skipping test_try_from_oss_provider_ok_when_server_running", codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR ); return; } let server = wiremock::MockServer::start().await; // OpenAI‑compat models endpoint responds OK. wiremock::Mock::given(wiremock::matchers::method("GET")) .and(wiremock::matchers::path("/v1/models")) .respond_with(wiremock::ResponseTemplate::new(200)) .mount(&server) .await; OllamaClient::try_from_provider_with_base_url(&format!("{}/v1", server.uri())) .await .expect("client should be created when probe succeeds"); } #[tokio::test] async fn test_try_from_oss_provider_err_when_server_missing() { if std::env::var(codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() { tracing::info!( "{} set; skipping test_try_from_oss_provider_err_when_server_missing", codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR ); return; } let server = wiremock::MockServer::start().await; let err = OllamaClient::try_from_provider_with_base_url(&format!("{}/v1", server.uri())) .await .err() .expect("expected error"); assert_eq!(OLLAMA_CONNECTION_ERROR, err.to_string()); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/ollama/src/pull.rs
codex-rs/ollama/src/pull.rs
use std::collections::HashMap; use std::io; use std::io::Write; /// Events emitted while pulling a model from Ollama. #[derive(Debug, Clone)] pub enum PullEvent { /// A human-readable status message (e.g., "verifying", "writing"). Status(String), /// Byte-level progress update for a specific layer digest. ChunkProgress { digest: String, total: Option<u64>, completed: Option<u64>, }, /// The pull finished successfully. Success, /// Error event with a message. Error(String), } /// A simple observer for pull progress events. Implementations decide how to /// render progress (CLI, TUI, logs, ...). pub trait PullProgressReporter { fn on_event(&mut self, event: &PullEvent) -> io::Result<()>; } /// A minimal CLI reporter that writes inline progress to stderr. pub struct CliProgressReporter { printed_header: bool, last_line_len: usize, last_completed_sum: u64, last_instant: std::time::Instant, totals_by_digest: HashMap<String, (u64, u64)>, } impl Default for CliProgressReporter { fn default() -> Self { Self::new() } } impl CliProgressReporter { pub fn new() -> Self { Self { printed_header: false, last_line_len: 0, last_completed_sum: 0, last_instant: std::time::Instant::now(), totals_by_digest: HashMap::new(), } } } impl PullProgressReporter for CliProgressReporter { fn on_event(&mut self, event: &PullEvent) -> io::Result<()> { let mut out = std::io::stderr(); match event { PullEvent::Status(status) => { // Avoid noisy manifest messages; otherwise show status inline. if status.eq_ignore_ascii_case("pulling manifest") { return Ok(()); } let pad = self.last_line_len.saturating_sub(status.len()); let line = format!("\r{status}{}", " ".repeat(pad)); self.last_line_len = status.len(); out.write_all(line.as_bytes())?; out.flush() } PullEvent::ChunkProgress { digest, total, completed, } => { if let Some(t) = *total { self.totals_by_digest .entry(digest.clone()) .or_insert((0, 0)) .0 = t; } if let Some(c) = *completed { self.totals_by_digest .entry(digest.clone()) .or_insert((0, 0)) .1 = c; } let (sum_total, sum_completed) = self .totals_by_digest .values() .fold((0u64, 0u64), |acc, (t, c)| (acc.0 + *t, acc.1 + *c)); if sum_total > 0 { if !self.printed_header { let gb = (sum_total as f64) / (1024.0 * 1024.0 * 1024.0); let header = format!("Downloading model: total {gb:.2} GB\n"); out.write_all(b"\r\x1b[2K")?; out.write_all(header.as_bytes())?; self.printed_header = true; } let now = std::time::Instant::now(); let dt = now .duration_since(self.last_instant) .as_secs_f64() .max(0.001); let dbytes = sum_completed.saturating_sub(self.last_completed_sum) as f64; let speed_mb_s = dbytes / (1024.0 * 1024.0) / dt; self.last_completed_sum = sum_completed; self.last_instant = now; let done_gb = (sum_completed as f64) / (1024.0 * 1024.0 * 1024.0); let total_gb = (sum_total as f64) / (1024.0 * 1024.0 * 1024.0); let pct = (sum_completed as f64) * 100.0 / (sum_total as f64); let text = format!("{done_gb:.2}/{total_gb:.2} GB ({pct:.1}%) {speed_mb_s:.1} MB/s"); let pad = self.last_line_len.saturating_sub(text.len()); let line = format!("\r{text}{}", " ".repeat(pad)); self.last_line_len = text.len(); out.write_all(line.as_bytes())?; out.flush() } else { Ok(()) } } PullEvent::Error(_) => { // This will be handled by the caller, so we don't do anything // here or the error will be printed twice. Ok(()) } PullEvent::Success => { out.write_all(b"\n")?; out.flush() } } } } /// For now the TUI reporter delegates to the CLI reporter. This keeps UI and /// CLI behavior aligned until a dedicated TUI integration is implemented. #[derive(Default)] pub struct TuiProgressReporter(CliProgressReporter); impl PullProgressReporter for TuiProgressReporter { fn on_event(&mut self, event: &PullEvent) -> io::Result<()> { self.0.on_event(event) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/otel/src/config.rs
codex-rs/otel/src/config.rs
use std::collections::HashMap; use std::path::PathBuf; use codex_utils_absolute_path::AbsolutePathBuf; #[derive(Clone, Debug)] pub struct OtelSettings { pub environment: String, pub service_name: String, pub service_version: String, pub codex_home: PathBuf, pub exporter: OtelExporter, pub trace_exporter: OtelExporter, } #[derive(Clone, Debug)] pub enum OtelHttpProtocol { /// HTTP protocol with binary protobuf Binary, /// HTTP protocol with JSON payload Json, } #[derive(Clone, Debug, Default)] pub struct OtelTlsConfig { pub ca_certificate: Option<AbsolutePathBuf>, pub client_certificate: Option<AbsolutePathBuf>, pub client_private_key: Option<AbsolutePathBuf>, } #[derive(Clone, Debug)] pub enum OtelExporter { None, OtlpGrpc { endpoint: String, headers: HashMap<String, String>, tls: Option<OtelTlsConfig>, }, OtlpHttp { endpoint: String, headers: HashMap<String, String>, protocol: OtelHttpProtocol, tls: Option<OtelTlsConfig>, }, }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/otel/src/lib.rs
codex-rs/otel/src/lib.rs
pub mod config; pub mod otel_manager; pub mod otel_provider;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/otel/src/otel_provider.rs
codex-rs/otel/src/otel_provider.rs
use crate::config::OtelExporter; use crate::config::OtelHttpProtocol; use crate::config::OtelSettings; use crate::config::OtelTlsConfig; use codex_utils_absolute_path::AbsolutePathBuf; use http::Uri; use opentelemetry::Context; use opentelemetry::KeyValue; use opentelemetry::context::ContextGuard; use opentelemetry::global; use opentelemetry::propagation::TextMapPropagator; use opentelemetry::trace::TraceContextExt; use opentelemetry::trace::TracerProvider as _; use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge; use opentelemetry_otlp::LogExporter; use opentelemetry_otlp::OTEL_EXPORTER_OTLP_LOGS_TIMEOUT; use opentelemetry_otlp::OTEL_EXPORTER_OTLP_TIMEOUT; use opentelemetry_otlp::OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT; use opentelemetry_otlp::OTEL_EXPORTER_OTLP_TRACES_TIMEOUT; use opentelemetry_otlp::Protocol; use opentelemetry_otlp::SpanExporter; use opentelemetry_otlp::WithExportConfig; use opentelemetry_otlp::WithHttpConfig; use opentelemetry_otlp::WithTonicConfig; use opentelemetry_otlp::tonic_types::metadata::MetadataMap; use opentelemetry_otlp::tonic_types::transport::Certificate as TonicCertificate; use opentelemetry_otlp::tonic_types::transport::ClientTlsConfig; use opentelemetry_otlp::tonic_types::transport::Identity as TonicIdentity; use opentelemetry_sdk::Resource; use opentelemetry_sdk::logs::SdkLoggerProvider; use opentelemetry_sdk::propagation::TraceContextPropagator; use opentelemetry_sdk::trace::BatchSpanProcessor; use opentelemetry_sdk::trace::SdkTracerProvider; use opentelemetry_sdk::trace::Tracer; use opentelemetry_semantic_conventions as semconv; use reqwest::Certificate as ReqwestCertificate; use reqwest::Identity as ReqwestIdentity; use reqwest::header::HeaderMap; use reqwest::header::HeaderName; use reqwest::header::HeaderValue; use std::cell::RefCell; use std::collections::HashMap; use std::env; use std::error::Error; use std::fs; use std::io::ErrorKind; use std::io::{self}; use std::path::PathBuf; use std::sync::OnceLock; use std::time::Duration; use tracing::debug; use tracing::level_filters::LevelFilter; use tracing::warn; use tracing_subscriber::Layer; use tracing_subscriber::registry::LookupSpan; const ENV_ATTRIBUTE: &str = "env"; const TRACEPARENT_ENV_VAR: &str = "TRACEPARENT"; const TRACESTATE_ENV_VAR: &str = "TRACESTATE"; static TRACEPARENT_CONTEXT: OnceLock<Option<Context>> = OnceLock::new(); thread_local! { static TRACEPARENT_GUARD: RefCell<Option<ContextGuard>> = const { RefCell::new(None) }; } pub struct OtelProvider { pub logger: Option<SdkLoggerProvider>, pub tracer_provider: Option<SdkTracerProvider>, pub tracer: Option<Tracer>, } impl OtelProvider { pub fn shutdown(&self) { if let Some(logger) = &self.logger { let _ = logger.shutdown(); } if let Some(tracer_provider) = &self.tracer_provider { let _ = tracer_provider.shutdown(); } } pub fn from(settings: &OtelSettings) -> Result<Option<Self>, Box<dyn Error>> { let log_enabled = !matches!(settings.exporter, OtelExporter::None); let trace_enabled = !matches!(settings.trace_exporter, OtelExporter::None); if !log_enabled && !trace_enabled { debug!("No exporter enabled in OTLP settings."); return Ok(None); } let resource = make_resource(settings); let logger = log_enabled .then(|| build_logger(&resource, &settings.exporter)) .transpose()?; let tracer_provider = trace_enabled .then(|| build_tracer_provider(&resource, &settings.trace_exporter)) .transpose()?; let tracer = tracer_provider .as_ref() .map(|provider| provider.tracer(settings.service_name.clone())); if let Some(provider) = tracer_provider.clone() { global::set_tracer_provider(provider); global::set_text_map_propagator(TraceContextPropagator::new()); } if tracer.is_some() { attach_traceparent_context(); } Ok(Some(Self { logger, tracer_provider, tracer, })) } pub fn logger_layer<S>(&self) -> Option<impl Layer<S> + Send + Sync> where S: tracing::Subscriber + for<'span> LookupSpan<'span> + Send + Sync, { self.logger.as_ref().map(|logger| { OpenTelemetryTracingBridge::new(logger).with_filter( tracing_subscriber::filter::filter_fn(OtelProvider::codex_export_filter), ) }) } pub fn tracing_layer<S>(&self) -> Option<impl Layer<S> + Send + Sync> where S: tracing::Subscriber + for<'span> LookupSpan<'span> + Send + Sync, { self.tracer.as_ref().map(|tracer| { tracing_opentelemetry::layer() .with_tracer(tracer.clone()) .with_filter(LevelFilter::TRACE) }) } pub fn codex_export_filter(meta: &tracing::Metadata<'_>) -> bool { meta.target().starts_with("codex_otel") } } impl Drop for OtelProvider { fn drop(&mut self) { if let Some(logger) = &self.logger { let _ = logger.shutdown(); } if let Some(tracer_provider) = &self.tracer_provider { let _ = tracer_provider.shutdown(); } } } pub(crate) fn traceparent_context_from_env() -> Option<Context> { TRACEPARENT_CONTEXT .get_or_init(load_traceparent_context) .clone() } fn attach_traceparent_context() { TRACEPARENT_GUARD.with(|guard| { let mut guard = guard.borrow_mut(); if guard.is_some() { return; } if let Some(context) = traceparent_context_from_env() { *guard = Some(context.attach()); } }); } fn load_traceparent_context() -> Option<Context> { let traceparent = env::var(TRACEPARENT_ENV_VAR).ok()?; let tracestate = env::var(TRACESTATE_ENV_VAR).ok(); match extract_traceparent_context(traceparent, tracestate) { Some(context) => { debug!("TRACEPARENT detected; continuing trace from parent context"); Some(context) } None => { warn!("TRACEPARENT is set but invalid; ignoring trace context"); None } } } fn extract_traceparent_context(traceparent: String, tracestate: Option<String>) -> Option<Context> { let mut headers = HashMap::new(); headers.insert("traceparent".to_string(), traceparent); if let Some(tracestate) = tracestate { headers.insert("tracestate".to_string(), tracestate); } let context = TraceContextPropagator::new().extract(&headers); let span = context.span(); let span_context = span.span_context(); if !span_context.is_valid() { return None; } Some(context) } fn make_resource(settings: &OtelSettings) -> Resource { Resource::builder() .with_service_name(settings.service_name.clone()) .with_attributes(vec![ KeyValue::new( semconv::attribute::SERVICE_VERSION, settings.service_version.clone(), ), KeyValue::new(ENV_ATTRIBUTE, settings.environment.clone()), ]) .build() } fn build_logger( resource: &Resource, exporter: &OtelExporter, ) -> Result<SdkLoggerProvider, Box<dyn Error>> { let mut builder = SdkLoggerProvider::builder().with_resource(resource.clone()); match exporter { OtelExporter::None => return Ok(builder.build()), OtelExporter::OtlpGrpc { endpoint, headers, tls, } => { debug!("Using OTLP Grpc exporter: {endpoint}"); let header_map = build_header_map(headers); let base_tls_config = ClientTlsConfig::new() .with_enabled_roots() .assume_http2(true); let tls_config = match tls.as_ref() { Some(tls) => build_grpc_tls_config(endpoint, base_tls_config, tls)?, None => base_tls_config, }; let exporter = LogExporter::builder() .with_tonic() .with_endpoint(endpoint) .with_metadata(MetadataMap::from_headers(header_map)) .with_tls_config(tls_config) .build()?; builder = builder.with_batch_exporter(exporter); } OtelExporter::OtlpHttp { endpoint, headers, protocol, tls, } => { debug!("Using OTLP Http exporter: {endpoint}"); let protocol = match protocol { OtelHttpProtocol::Binary => Protocol::HttpBinary, OtelHttpProtocol::Json => Protocol::HttpJson, }; let mut exporter_builder = LogExporter::builder() .with_http() .with_endpoint(endpoint) .with_protocol(protocol) .with_headers(headers.clone()); if let Some(tls) = tls.as_ref() { let client = build_http_client(tls, OTEL_EXPORTER_OTLP_LOGS_TIMEOUT)?; exporter_builder = exporter_builder.with_http_client(client); } let exporter = exporter_builder.build()?; builder = builder.with_batch_exporter(exporter); } } Ok(builder.build()) } fn build_tracer_provider( resource: &Resource, exporter: &OtelExporter, ) -> Result<SdkTracerProvider, Box<dyn Error>> { let span_exporter = match exporter { OtelExporter::None => return Ok(SdkTracerProvider::builder().build()), OtelExporter::OtlpGrpc { endpoint, headers, tls, } => { debug!("Using OTLP Grpc exporter for traces: {endpoint}"); let header_map = build_header_map(headers); let base_tls_config = ClientTlsConfig::new() .with_enabled_roots() .assume_http2(true); let tls_config = match tls.as_ref() { Some(tls) => build_grpc_tls_config(endpoint, base_tls_config, tls)?, None => base_tls_config, }; SpanExporter::builder() .with_tonic() .with_endpoint(endpoint) .with_metadata(MetadataMap::from_headers(header_map)) .with_tls_config(tls_config) .build()? } OtelExporter::OtlpHttp { endpoint, headers, protocol, tls, } => { debug!("Using OTLP Http exporter for traces: {endpoint}"); let protocol = match protocol { OtelHttpProtocol::Binary => Protocol::HttpBinary, OtelHttpProtocol::Json => Protocol::HttpJson, }; let mut exporter_builder = SpanExporter::builder() .with_http() .with_endpoint(endpoint) .with_protocol(protocol) .with_headers(headers.clone()); if let Some(tls) = tls.as_ref() { let client = build_http_client(tls, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT)?; exporter_builder = exporter_builder.with_http_client(client); } exporter_builder.build()? } }; let processor = BatchSpanProcessor::builder(span_exporter).build(); Ok(SdkTracerProvider::builder() .with_resource(resource.clone()) .with_span_processor(processor) .build()) } fn build_header_map(headers: &HashMap<String, String>) -> HeaderMap { let mut header_map = HeaderMap::new(); for (key, value) in headers { if let Ok(name) = HeaderName::from_bytes(key.as_bytes()) && let Ok(val) = HeaderValue::from_str(value) { header_map.insert(name, val); } } header_map } fn build_grpc_tls_config( endpoint: &str, tls_config: ClientTlsConfig, tls: &OtelTlsConfig, ) -> Result<ClientTlsConfig, Box<dyn Error>> { let uri: Uri = endpoint.parse()?; let host = uri.host().ok_or_else(|| { config_error(format!( "OTLP gRPC endpoint {endpoint} does not include a host" )) })?; let mut config = tls_config.domain_name(host.to_owned()); if let Some(path) = tls.ca_certificate.as_ref() { let (pem, _) = read_bytes(path)?; config = config.ca_certificate(TonicCertificate::from_pem(pem)); } match (&tls.client_certificate, &tls.client_private_key) { (Some(cert_path), Some(key_path)) => { let (cert_pem, _) = read_bytes(cert_path)?; let (key_pem, _) = read_bytes(key_path)?; config = config.identity(TonicIdentity::from_pem(cert_pem, key_pem)); } (Some(_), None) | (None, Some(_)) => { return Err(config_error( "client_certificate and client_private_key must both be provided for mTLS", )); } (None, None) => {} } Ok(config) } /// Build a blocking HTTP client with TLS configuration for the OTLP HTTP exporter. /// /// We use `reqwest::blocking::Client` instead of the async client because the /// `opentelemetry_sdk` `BatchLogProcessor` spawns a dedicated OS thread that uses /// `futures_executor::block_on()` rather than tokio. When the async reqwest client's /// timeout calls `tokio::time::sleep()`, it panics with "no reactor running". fn build_http_client( tls: &OtelTlsConfig, timeout_var: &str, ) -> Result<reqwest::blocking::Client, Box<dyn Error>> { // Wrap in block_in_place because reqwest::blocking::Client creates its own // internal tokio runtime, which would panic if built directly from an async context. tokio::task::block_in_place(|| build_http_client_inner(tls, timeout_var)) } fn build_http_client_inner( tls: &OtelTlsConfig, timeout_var: &str, ) -> Result<reqwest::blocking::Client, Box<dyn Error>> { let mut builder = reqwest::blocking::Client::builder().timeout(resolve_otlp_timeout(timeout_var)); if let Some(path) = tls.ca_certificate.as_ref() { let (pem, location) = read_bytes(path)?; let certificate = ReqwestCertificate::from_pem(pem.as_slice()).map_err(|error| { config_error(format!( "failed to parse certificate {}: {error}", location.display() )) })?; // Disable built-in root certificates and use only our custom CA builder = builder .tls_built_in_root_certs(false) .add_root_certificate(certificate); } match (&tls.client_certificate, &tls.client_private_key) { (Some(cert_path), Some(key_path)) => { let (mut cert_pem, cert_location) = read_bytes(cert_path)?; let (key_pem, key_location) = read_bytes(key_path)?; cert_pem.extend_from_slice(key_pem.as_slice()); let identity = ReqwestIdentity::from_pem(cert_pem.as_slice()).map_err(|error| { config_error(format!( "failed to parse client identity using {} and {}: {error}", cert_location.display(), key_location.display() )) })?; builder = builder.identity(identity).https_only(true); } (Some(_), None) | (None, Some(_)) => { return Err(config_error( "client_certificate and client_private_key must both be provided for mTLS", )); } (None, None) => {} } builder .build() .map_err(|error| Box::new(error) as Box<dyn Error>) } fn resolve_otlp_timeout(signal_var: &str) -> Duration { if let Some(timeout) = read_timeout_env(signal_var) { return timeout; } if let Some(timeout) = read_timeout_env(OTEL_EXPORTER_OTLP_TIMEOUT) { return timeout; } OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT } fn read_timeout_env(var: &str) -> Option<Duration> { let value = env::var(var).ok()?; let parsed = value.parse::<i64>().ok()?; if parsed < 0 { return None; } Some(Duration::from_millis(parsed as u64)) } fn read_bytes(path: &AbsolutePathBuf) -> Result<(Vec<u8>, PathBuf), Box<dyn Error>> { match fs::read(path) { Ok(bytes) => Ok((bytes, path.to_path_buf())), Err(error) => Err(Box::new(io::Error::new( error.kind(), format!("failed to read {}: {error}", path.display()), ))), } } fn config_error(message: impl Into<String>) -> Box<dyn Error> { Box::new(io::Error::new(ErrorKind::InvalidData, message.into())) } #[cfg(test)] mod tests { use super::*; use opentelemetry::trace::SpanId; use opentelemetry::trace::TraceContextExt; use opentelemetry::trace::TraceId; #[test] fn parses_valid_traceparent() { let trace_id = "00000000000000000000000000000001"; let span_id = "0000000000000002"; let context = extract_traceparent_context(format!("00-{trace_id}-{span_id}-01"), None) .expect("trace context"); let span = context.span(); let span_context = span.span_context(); assert_eq!( span_context.trace_id(), TraceId::from_hex(trace_id).unwrap() ); assert_eq!(span_context.span_id(), SpanId::from_hex(span_id).unwrap()); assert!(span_context.is_remote()); } #[test] fn invalid_traceparent_returns_none() { assert!(extract_traceparent_context("not-a-traceparent".to_string(), None).is_none()); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/otel/src/otel_manager.rs
codex-rs/otel/src/otel_manager.rs
use crate::otel_provider::traceparent_context_from_env; use chrono::SecondsFormat; use chrono::Utc; use codex_api::ResponseEvent; use codex_app_server_protocol::AuthMode; use codex_protocol::ConversationId; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::models::ResponseItem; use codex_protocol::openai_models::ReasoningEffort; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::ReviewDecision; use codex_protocol::protocol::SandboxPolicy; use codex_protocol::protocol::SessionSource; use codex_protocol::user_input::UserInput; use eventsource_stream::Event as StreamEvent; use eventsource_stream::EventStreamError as StreamError; use reqwest::Error; use reqwest::Response; use serde::Serialize; use std::borrow::Cow; use std::fmt::Display; use std::future::Future; use std::time::Duration; use std::time::Instant; use strum_macros::Display; use tokio::time::error::Elapsed; use tracing::Span; use tracing::trace_span; use tracing_opentelemetry::OpenTelemetrySpanExt; #[derive(Debug, Clone, Serialize, Display)] #[serde(rename_all = "snake_case")] pub enum ToolDecisionSource { Config, User, } #[derive(Debug, Clone)] pub struct OtelEventMetadata { conversation_id: ConversationId, auth_mode: Option<String>, account_id: Option<String>, account_email: Option<String>, model: String, slug: String, log_user_prompts: bool, app_version: &'static str, terminal_type: String, } #[derive(Debug, Clone)] pub struct OtelManager { metadata: OtelEventMetadata, session_span: Span, } impl OtelManager { #[allow(clippy::too_many_arguments)] pub fn new( conversation_id: ConversationId, model: &str, slug: &str, account_id: Option<String>, account_email: Option<String>, auth_mode: Option<AuthMode>, log_user_prompts: bool, terminal_type: String, session_source: SessionSource, ) -> OtelManager { let session_span = trace_span!("new_session", conversation_id = %conversation_id, session_source = %session_source); if let Some(context) = traceparent_context_from_env() { let _ = session_span.set_parent(context); } Self { metadata: OtelEventMetadata { conversation_id, auth_mode: auth_mode.map(|m| m.to_string()), account_id, account_email, model: model.to_owned(), slug: slug.to_owned(), log_user_prompts, app_version: env!("CARGO_PKG_VERSION"), terminal_type, }, session_span, } } pub fn with_model(&self, model: &str, slug: &str) -> Self { let mut manager = self.clone(); manager.metadata.model = model.to_owned(); manager.metadata.slug = slug.to_owned(); manager } pub fn current_span(&self) -> &Span { &self.session_span } pub fn record_responses(&self, handle_responses_span: &Span, event: &ResponseEvent) { handle_responses_span.record("otel.name", OtelManager::responses_type(event)); match event { ResponseEvent::OutputItemDone(item) => { handle_responses_span.record("from", "output_item_done"); if let ResponseItem::FunctionCall { name, .. } = &item { handle_responses_span.record("tool_name", name.as_str()); } } ResponseEvent::OutputItemAdded(item) => { handle_responses_span.record("from", "output_item_added"); if let ResponseItem::FunctionCall { name, .. } = &item { handle_responses_span.record("tool_name", name.as_str()); } } _ => {} } } #[allow(clippy::too_many_arguments)] pub fn conversation_starts( &self, provider_name: &str, reasoning_effort: Option<ReasoningEffort>, reasoning_summary: ReasoningSummary, context_window: Option<i64>, auto_compact_token_limit: Option<i64>, approval_policy: AskForApproval, sandbox_policy: SandboxPolicy, mcp_servers: Vec<&str>, active_profile: Option<String>, ) { tracing::event!( tracing::Level::INFO, event.name = "codex.conversation_starts", event.timestamp = %timestamp(), conversation.id = %self.metadata.conversation_id, app.version = %self.metadata.app_version, auth_mode = self.metadata.auth_mode, user.account_id = self.metadata.account_id, user.email = self.metadata.account_email, terminal.type = %self.metadata.terminal_type, model = %self.metadata.model, slug = %self.metadata.slug, provider_name = %provider_name, reasoning_effort = reasoning_effort.map(|e| e.to_string()), reasoning_summary = %reasoning_summary, context_window = context_window, auto_compact_token_limit = auto_compact_token_limit, approval_policy = %approval_policy, sandbox_policy = %sandbox_policy, mcp_servers = mcp_servers.join(", "), active_profile = active_profile, ) } pub async fn log_request<F, Fut>(&self, attempt: u64, f: F) -> Result<Response, Error> where F: FnOnce() -> Fut, Fut: Future<Output = Result<Response, Error>>, { let start = std::time::Instant::now(); let response = f().await; let duration = start.elapsed(); let (status, error) = match &response { Ok(response) => (Some(response.status().as_u16()), None), Err(error) => (error.status().map(|s| s.as_u16()), Some(error.to_string())), }; self.record_api_request(attempt, status, error.as_deref(), duration); response } pub fn record_api_request( &self, attempt: u64, status: Option<u16>, error: Option<&str>, duration: Duration, ) { tracing::event!( tracing::Level::INFO, event.name = "codex.api_request", event.timestamp = %timestamp(), conversation.id = %self.metadata.conversation_id, app.version = %self.metadata.app_version, auth_mode = self.metadata.auth_mode, user.account_id = self.metadata.account_id, user.email = self.metadata.account_email, terminal.type = %self.metadata.terminal_type, model = %self.metadata.model, slug = %self.metadata.slug, duration_ms = %duration.as_millis(), http.response.status_code = status, error.message = error, attempt = attempt, ); } pub fn log_sse_event<E>( &self, response: &Result<Option<Result<StreamEvent, StreamError<E>>>, Elapsed>, duration: Duration, ) where E: Display, { match response { Ok(Some(Ok(sse))) => { if sse.data.trim() == "[DONE]" { self.sse_event(&sse.event, duration); } else { match serde_json::from_str::<serde_json::Value>(&sse.data) { Ok(error) if sse.event == "response.failed" => { self.sse_event_failed(Some(&sse.event), duration, &error); } Ok(content) if sse.event == "response.output_item.done" => { match serde_json::from_value::<ResponseItem>(content) { Ok(_) => self.sse_event(&sse.event, duration), Err(_) => { self.sse_event_failed( Some(&sse.event), duration, &"failed to parse response.output_item.done", ); } }; } Ok(_) => { self.sse_event(&sse.event, duration); } Err(error) => { self.sse_event_failed(Some(&sse.event), duration, &error); } } } } Ok(Some(Err(error))) => { self.sse_event_failed(None, duration, error); } Ok(None) => {} Err(_) => { self.sse_event_failed(None, duration, &"idle timeout waiting for SSE"); } } } fn sse_event(&self, kind: &str, duration: Duration) { tracing::event!( tracing::Level::INFO, event.name = "codex.sse_event", event.timestamp = %timestamp(), event.kind = %kind, conversation.id = %self.metadata.conversation_id, app.version = %self.metadata.app_version, auth_mode = self.metadata.auth_mode, user.account_id = self.metadata.account_id, user.email = self.metadata.account_email, terminal.type = %self.metadata.terminal_type, model = %self.metadata.model, slug = %self.metadata.slug, duration_ms = %duration.as_millis(), ); } pub fn sse_event_failed<T>(&self, kind: Option<&String>, duration: Duration, error: &T) where T: Display, { match kind { Some(kind) => tracing::event!( tracing::Level::INFO, event.name = "codex.sse_event", event.timestamp = %timestamp(), event.kind = %kind, conversation.id = %self.metadata.conversation_id, app.version = %self.metadata.app_version, auth_mode = self.metadata.auth_mode, user.account_id = self.metadata.account_id, user.email = self.metadata.account_email, terminal.type = %self.metadata.terminal_type, model = %self.metadata.model, slug = %self.metadata.slug, duration_ms = %duration.as_millis(), error.message = %error, ), None => tracing::event!( tracing::Level::INFO, event.name = "codex.sse_event", event.timestamp = %timestamp(), conversation.id = %self.metadata.conversation_id, app.version = %self.metadata.app_version, auth_mode = self.metadata.auth_mode, user.account_id = self.metadata.account_id, user.email = self.metadata.account_email, terminal.type = %self.metadata.terminal_type, model = %self.metadata.model, slug = %self.metadata.slug, duration_ms = %duration.as_millis(), error.message = %error, ), } } pub fn see_event_completed_failed<T>(&self, error: &T) where T: Display, { tracing::event!( tracing::Level::INFO, event.name = "codex.sse_event", event.kind = %"response.completed", event.timestamp = %timestamp(), conversation.id = %self.metadata.conversation_id, app.version = %self.metadata.app_version, auth_mode = self.metadata.auth_mode, user.account_id = self.metadata.account_id, user.email = self.metadata.account_email, terminal.type = %self.metadata.terminal_type, model = %self.metadata.model, slug = %self.metadata.slug, error.message = %error, ) } pub fn sse_event_completed( &self, input_token_count: i64, output_token_count: i64, cached_token_count: Option<i64>, reasoning_token_count: Option<i64>, tool_token_count: i64, ) { tracing::event!( tracing::Level::INFO, event.name = "codex.sse_event", event.timestamp = %timestamp(), event.kind = %"response.completed", conversation.id = %self.metadata.conversation_id, app.version = %self.metadata.app_version, auth_mode = self.metadata.auth_mode, user.account_id = self.metadata.account_id, user.email = self.metadata.account_email, terminal.type = %self.metadata.terminal_type, model = %self.metadata.model, slug = %self.metadata.slug, input_token_count = %input_token_count, output_token_count = %output_token_count, cached_token_count = cached_token_count, reasoning_token_count = reasoning_token_count, tool_token_count = %tool_token_count, ); } pub fn user_prompt(&self, items: &[UserInput]) { let prompt = items .iter() .flat_map(|item| match item { UserInput::Text { text } => Some(text.as_str()), _ => None, }) .collect::<String>(); let prompt_to_log = if self.metadata.log_user_prompts { prompt.as_str() } else { "[REDACTED]" }; tracing::event!( tracing::Level::INFO, event.name = "codex.user_prompt", event.timestamp = %timestamp(), conversation.id = %self.metadata.conversation_id, app.version = %self.metadata.app_version, auth_mode = self.metadata.auth_mode, user.account_id = self.metadata.account_id, user.email = self.metadata.account_email, terminal.type = %self.metadata.terminal_type, model = %self.metadata.model, slug = %self.metadata.slug, prompt_length = %prompt.chars().count(), prompt = %prompt_to_log, ); } pub fn tool_decision( &self, tool_name: &str, call_id: &str, decision: &ReviewDecision, source: ToolDecisionSource, ) { tracing::event!( tracing::Level::INFO, event.name = "codex.tool_decision", event.timestamp = %timestamp(), conversation.id = %self.metadata.conversation_id, app.version = %self.metadata.app_version, auth_mode = self.metadata.auth_mode, user.account_id = self.metadata.account_id, user.email = self.metadata.account_email, terminal.type = %self.metadata.terminal_type, model = %self.metadata.model, slug = %self.metadata.slug, tool_name = %tool_name, call_id = %call_id, decision = %decision.clone().to_string().to_lowercase(), source = %source.to_string(), ); } pub async fn log_tool_result<F, Fut, E>( &self, tool_name: &str, call_id: &str, arguments: &str, f: F, ) -> Result<(String, bool), E> where F: FnOnce() -> Fut, Fut: Future<Output = Result<(String, bool), E>>, E: Display, { let start = Instant::now(); let result = f().await; let duration = start.elapsed(); let (output, success) = match &result { Ok((preview, success)) => (Cow::Borrowed(preview.as_str()), *success), Err(error) => (Cow::Owned(error.to_string()), false), }; self.tool_result( tool_name, call_id, arguments, duration, success, output.as_ref(), ); result } pub fn log_tool_failed(&self, tool_name: &str, error: &str) { tracing::event!( tracing::Level::INFO, event.name = "codex.tool_result", event.timestamp = %timestamp(), conversation.id = %self.metadata.conversation_id, app.version = %self.metadata.app_version, auth_mode = self.metadata.auth_mode, user.account_id = self.metadata.account_id, user.email = self.metadata.account_email, terminal.type = %self.metadata.terminal_type, model = %self.metadata.model, slug = %self.metadata.slug, tool_name = %tool_name, duration_ms = %Duration::ZERO.as_millis(), success = %false, output = %error, ); } pub fn tool_result( &self, tool_name: &str, call_id: &str, arguments: &str, duration: Duration, success: bool, output: &str, ) { let success_str = if success { "true" } else { "false" }; tracing::event!( tracing::Level::INFO, event.name = "codex.tool_result", event.timestamp = %timestamp(), conversation.id = %self.metadata.conversation_id, app.version = %self.metadata.app_version, auth_mode = self.metadata.auth_mode, user.account_id = self.metadata.account_id, user.email = self.metadata.account_email, terminal.type = %self.metadata.terminal_type, model = %self.metadata.model, slug = %self.metadata.slug, tool_name = %tool_name, call_id = %call_id, arguments = %arguments, duration_ms = %duration.as_millis(), success = %success_str, output = %output, ); } fn responses_type(event: &ResponseEvent) -> String { match event { ResponseEvent::Created => "created".into(), ResponseEvent::OutputItemDone(item) => OtelManager::responses_item_type(item), ResponseEvent::OutputItemAdded(item) => OtelManager::responses_item_type(item), ResponseEvent::Completed { .. } => "completed".into(), ResponseEvent::OutputTextDelta(_) => "text_delta".into(), ResponseEvent::ReasoningSummaryDelta { .. } => "reasoning_summary_delta".into(), ResponseEvent::ReasoningContentDelta { .. } => "reasoning_content_delta".into(), ResponseEvent::ReasoningSummaryPartAdded { .. } => { "reasoning_summary_part_added".into() } ResponseEvent::RateLimits(_) => "rate_limits".into(), ResponseEvent::ModelsEtag(_) => "models_etag".into(), } } fn responses_item_type(item: &ResponseItem) -> String { match item { ResponseItem::Message { role, .. } => format!("message_from_{role}"), ResponseItem::Reasoning { .. } => "reasoning".into(), ResponseItem::LocalShellCall { .. } => "local_shell_call".into(), ResponseItem::FunctionCall { .. } => "function_call".into(), ResponseItem::FunctionCallOutput { .. } => "function_call_output".into(), ResponseItem::CustomToolCall { .. } => "custom_tool_call".into(), ResponseItem::CustomToolCallOutput { .. } => "custom_tool_call_output".into(), ResponseItem::WebSearchCall { .. } => "web_search_call".into(), ResponseItem::GhostSnapshot { .. } => "ghost_snapshot".into(), ResponseItem::Compaction { .. } => "compaction".into(), ResponseItem::Other => "other".into(), } } } fn timestamp() -> String { Utc::now().to_rfc3339_opts(SecondsFormat::Millis, true) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/chatgpt/src/lib.rs
codex-rs/chatgpt/src/lib.rs
pub mod apply_command; mod chatgpt_client; mod chatgpt_token; pub mod get_task;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/chatgpt/src/apply_command.rs
codex-rs/chatgpt/src/apply_command.rs
use std::path::PathBuf; use clap::Parser; use codex_common::CliConfigOverrides; use codex_core::config::Config; use crate::chatgpt_token::init_chatgpt_token_from_auth; use crate::get_task::GetTaskResponse; use crate::get_task::OutputItem; use crate::get_task::PrOutputItem; use crate::get_task::get_task; /// Applies the latest diff from a Codex agent task. #[derive(Debug, Parser)] pub struct ApplyCommand { pub task_id: String, #[clap(flatten)] pub config_overrides: CliConfigOverrides, } pub async fn run_apply_command( apply_cli: ApplyCommand, cwd: Option<PathBuf>, ) -> anyhow::Result<()> { let config = Config::load_with_cli_overrides( apply_cli .config_overrides .parse_overrides() .map_err(anyhow::Error::msg)?, ) .await?; init_chatgpt_token_from_auth(&config.codex_home, config.cli_auth_credentials_store_mode) .await?; let task_response = get_task(&config, apply_cli.task_id).await?; apply_diff_from_task(task_response, cwd).await } pub async fn apply_diff_from_task( task_response: GetTaskResponse, cwd: Option<PathBuf>, ) -> anyhow::Result<()> { let diff_turn = match task_response.current_diff_task_turn { Some(turn) => turn, None => anyhow::bail!("No diff turn found"), }; let output_diff = diff_turn.output_items.iter().find_map(|item| match item { OutputItem::Pr(PrOutputItem { output_diff }) => Some(output_diff), _ => None, }); match output_diff { Some(output_diff) => apply_diff(&output_diff.diff, cwd).await, None => anyhow::bail!("No PR output item found"), } } async fn apply_diff(diff: &str, cwd: Option<PathBuf>) -> anyhow::Result<()> { let cwd = cwd.unwrap_or(std::env::current_dir().unwrap_or_else(|_| std::env::temp_dir())); let req = codex_git::ApplyGitRequest { cwd, diff: diff.to_string(), revert: false, preflight: false, }; let res = codex_git::apply_git_patch(&req)?; if res.exit_code != 0 { anyhow::bail!( "Git apply failed (applied={}, skipped={}, conflicts={})\nstdout:\n{}\nstderr:\n{}", res.applied_paths.len(), res.skipped_paths.len(), res.conflicted_paths.len(), res.stdout, res.stderr ); } println!("Successfully applied diff"); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/chatgpt/src/get_task.rs
codex-rs/chatgpt/src/get_task.rs
use codex_core::config::Config; use serde::Deserialize; use crate::chatgpt_client::chatgpt_get_request; #[derive(Debug, Deserialize)] pub struct GetTaskResponse { pub current_diff_task_turn: Option<AssistantTurn>, } // Only relevant fields for our extraction #[derive(Debug, Deserialize)] pub struct AssistantTurn { pub output_items: Vec<OutputItem>, } #[derive(Debug, Deserialize)] #[serde(tag = "type")] pub enum OutputItem { #[serde(rename = "pr")] Pr(PrOutputItem), #[serde(other)] Other, } #[derive(Debug, Deserialize)] pub struct PrOutputItem { pub output_diff: OutputDiff, } #[derive(Debug, Deserialize)] pub struct OutputDiff { pub diff: String, } pub(crate) async fn get_task(config: &Config, task_id: String) -> anyhow::Result<GetTaskResponse> { let path = format!("/wham/tasks/{task_id}"); chatgpt_get_request(config, path).await }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/chatgpt/src/chatgpt_client.rs
codex-rs/chatgpt/src/chatgpt_client.rs
use codex_core::config::Config; use codex_core::default_client::create_client; use crate::chatgpt_token::get_chatgpt_token_data; use crate::chatgpt_token::init_chatgpt_token_from_auth; use anyhow::Context; use serde::de::DeserializeOwned; /// Make a GET request to the ChatGPT backend API. pub(crate) async fn chatgpt_get_request<T: DeserializeOwned>( config: &Config, path: String, ) -> anyhow::Result<T> { let chatgpt_base_url = &config.chatgpt_base_url; init_chatgpt_token_from_auth(&config.codex_home, config.cli_auth_credentials_store_mode) .await?; // Make direct HTTP request to ChatGPT backend API with the token let client = create_client(); let url = format!("{chatgpt_base_url}{path}"); let token = get_chatgpt_token_data().ok_or_else(|| anyhow::anyhow!("ChatGPT token not available"))?; let account_id = token.account_id.ok_or_else(|| { anyhow::anyhow!("ChatGPT account ID not available, please re-run `codex login`") }); let response = client .get(&url) .bearer_auth(&token.access_token) .header("chatgpt-account-id", account_id?) .header("Content-Type", "application/json") .send() .await .context("Failed to send request")?; if response.status().is_success() { let result: T = response .json() .await .context("Failed to parse JSON response")?; Ok(result) } else { let status = response.status(); let body = response.text().await.unwrap_or_default(); anyhow::bail!("Request failed with status {status}: {body}") } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/chatgpt/src/chatgpt_token.rs
codex-rs/chatgpt/src/chatgpt_token.rs
use codex_core::CodexAuth; use std::path::Path; use std::sync::LazyLock; use std::sync::RwLock; use codex_core::auth::AuthCredentialsStoreMode; use codex_core::token_data::TokenData; static CHATGPT_TOKEN: LazyLock<RwLock<Option<TokenData>>> = LazyLock::new(|| RwLock::new(None)); pub fn get_chatgpt_token_data() -> Option<TokenData> { CHATGPT_TOKEN.read().ok()?.clone() } pub fn set_chatgpt_token_data(value: TokenData) { if let Ok(mut guard) = CHATGPT_TOKEN.write() { *guard = Some(value); } } /// Initialize the ChatGPT token from auth.json file pub async fn init_chatgpt_token_from_auth( codex_home: &Path, auth_credentials_store_mode: AuthCredentialsStoreMode, ) -> std::io::Result<()> { let auth = CodexAuth::from_auth_storage(codex_home, auth_credentials_store_mode)?; if let Some(auth) = auth { let token_data = auth.get_token_data().await?; set_chatgpt_token_data(token_data); } Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/chatgpt/tests/all.rs
codex-rs/chatgpt/tests/all.rs
// Single integration test binary that aggregates all test modules. // The submodules live in `tests/suite/`. mod suite;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/chatgpt/tests/suite/apply_command_e2e.rs
codex-rs/chatgpt/tests/suite/apply_command_e2e.rs
use codex_chatgpt::apply_command::apply_diff_from_task; use codex_chatgpt::get_task::GetTaskResponse; use std::path::Path; use tempfile::TempDir; use tokio::process::Command; /// Creates a temporary git repository with initial commit async fn create_temp_git_repo() -> anyhow::Result<TempDir> { let temp_dir = TempDir::new()?; let repo_path = temp_dir.path(); let envs = vec![ ("GIT_CONFIG_GLOBAL", "/dev/null"), ("GIT_CONFIG_NOSYSTEM", "1"), ]; let output = Command::new("git") .envs(envs.clone()) .args(["init"]) .current_dir(repo_path) .output() .await?; if !output.status.success() { anyhow::bail!( "Failed to initialize git repo: {}", String::from_utf8_lossy(&output.stderr) ); } Command::new("git") .envs(envs.clone()) .args(["config", "user.email", "test@example.com"]) .current_dir(repo_path) .output() .await?; Command::new("git") .envs(envs.clone()) .args(["config", "user.name", "Test User"]) .current_dir(repo_path) .output() .await?; std::fs::write(repo_path.join("README.md"), "# Test Repo\n")?; Command::new("git") .envs(envs.clone()) .args(["add", "README.md"]) .current_dir(repo_path) .output() .await?; let output = Command::new("git") .envs(envs.clone()) .args(["commit", "-m", "Initial commit"]) .current_dir(repo_path) .output() .await?; if !output.status.success() { anyhow::bail!( "Failed to create initial commit: {}", String::from_utf8_lossy(&output.stderr) ); } Ok(temp_dir) } async fn mock_get_task_with_fixture() -> anyhow::Result<GetTaskResponse> { let fixture_path = Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/task_turn_fixture.json"); let fixture_content = std::fs::read_to_string(fixture_path)?; let response: GetTaskResponse = serde_json::from_str(&fixture_content)?; Ok(response) } #[tokio::test] async fn test_apply_command_creates_fibonacci_file() { let temp_repo = create_temp_git_repo() .await .expect("Failed to create temp git repo"); let repo_path = temp_repo.path(); let task_response = mock_get_task_with_fixture() .await .expect("Failed to load fixture"); apply_diff_from_task(task_response, Some(repo_path.to_path_buf())) .await .expect("Failed to apply diff from task"); // Assert that fibonacci.js was created in scripts/ directory let fibonacci_path = repo_path.join("scripts/fibonacci.js"); assert!(fibonacci_path.exists(), "fibonacci.js was not created"); // Verify the file contents match expected let contents = std::fs::read_to_string(&fibonacci_path).expect("Failed to read fibonacci.js"); assert!( contents.contains("function fibonacci(n)"), "fibonacci.js doesn't contain expected function" ); assert!( contents.contains("#!/usr/bin/env node"), "fibonacci.js doesn't have shebang" ); assert!( contents.contains("module.exports = fibonacci;"), "fibonacci.js doesn't export function" ); // Verify file has correct number of lines (31 as specified in fixture) let line_count = contents.lines().count(); assert_eq!( line_count, 31, "fibonacci.js should have 31 lines, got {line_count}", ); } #[tokio::test] async fn test_apply_command_with_merge_conflicts() { let temp_repo = create_temp_git_repo() .await .expect("Failed to create temp git repo"); let repo_path = temp_repo.path(); // Create conflicting fibonacci.js file first let scripts_dir = repo_path.join("scripts"); std::fs::create_dir_all(&scripts_dir).expect("Failed to create scripts directory"); let conflicting_content = r#"#!/usr/bin/env node // This is a different fibonacci implementation function fib(num) { if (num <= 1) return num; return fib(num - 1) + fib(num - 2); } console.log("Running fibonacci..."); console.log(fib(10)); "#; let fibonacci_path = scripts_dir.join("fibonacci.js"); std::fs::write(&fibonacci_path, conflicting_content).expect("Failed to write conflicting file"); Command::new("git") .args(["add", "scripts/fibonacci.js"]) .current_dir(repo_path) .output() .await .expect("Failed to add fibonacci.js"); Command::new("git") .args(["commit", "-m", "Add conflicting fibonacci implementation"]) .current_dir(repo_path) .output() .await .expect("Failed to commit conflicting file"); let original_dir = std::env::current_dir().expect("Failed to get current dir"); std::env::set_current_dir(repo_path).expect("Failed to change directory"); struct DirGuard(std::path::PathBuf); impl Drop for DirGuard { fn drop(&mut self) { let _ = std::env::set_current_dir(&self.0); } } let _guard = DirGuard(original_dir); let task_response = mock_get_task_with_fixture() .await .expect("Failed to load fixture"); let apply_result = apply_diff_from_task(task_response, Some(repo_path.to_path_buf())).await; assert!( apply_result.is_err(), "Expected apply to fail due to merge conflicts" ); let contents = std::fs::read_to_string(&fibonacci_path).expect("Failed to read fibonacci.js"); assert!( contents.contains("<<<<<<< HEAD") || contents.contains("=======") || contents.contains(">>>>>>> "), "fibonacci.js should contain merge conflict markers, got: {contents}", ); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/chatgpt/tests/suite/mod.rs
codex-rs/chatgpt/tests/suite/mod.rs
// Aggregates all former standalone integration tests as modules. mod apply_command_e2e;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/responses-api-proxy/src/lib.rs
codex-rs/responses-api-proxy/src/lib.rs
use std::fs::File; use std::fs::{self}; use std::io::Write; use std::net::SocketAddr; use std::net::TcpListener; use std::path::Path; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; use anyhow::Context; use anyhow::Result; use anyhow::anyhow; use clap::Parser; use reqwest::Url; use reqwest::blocking::Client; use reqwest::header::AUTHORIZATION; use reqwest::header::HOST; use reqwest::header::HeaderMap; use reqwest::header::HeaderName; use reqwest::header::HeaderValue; use serde::Serialize; use tiny_http::Header; use tiny_http::Method; use tiny_http::Request; use tiny_http::Response; use tiny_http::Server; use tiny_http::StatusCode; mod read_api_key; use read_api_key::read_auth_header_from_stdin; /// CLI arguments for the proxy. #[derive(Debug, Clone, Parser)] #[command(name = "responses-api-proxy", about = "Minimal OpenAI responses proxy")] pub struct Args { /// Port to listen on. If not set, an ephemeral port is used. #[arg(long)] pub port: Option<u16>, /// Path to a JSON file to write startup info (single line). Includes {"port": <u16>}. #[arg(long, value_name = "FILE")] pub server_info: Option<PathBuf>, /// Enable HTTP shutdown endpoint at GET /shutdown #[arg(long)] pub http_shutdown: bool, /// Absolute URL the proxy should forward requests to (defaults to OpenAI). #[arg(long, default_value = "https://api.openai.com/v1/responses")] pub upstream_url: String, } #[derive(Serialize)] struct ServerInfo { port: u16, pid: u32, } struct ForwardConfig { upstream_url: Url, host_header: HeaderValue, } /// Entry point for the library main, for parity with other crates. pub fn run_main(args: Args) -> Result<()> { let auth_header = read_auth_header_from_stdin()?; let upstream_url = Url::parse(&args.upstream_url).context("parsing --upstream-url")?; let host = match (upstream_url.host_str(), upstream_url.port()) { (Some(host), Some(port)) => format!("{host}:{port}"), (Some(host), None) => host.to_string(), _ => return Err(anyhow!("upstream URL must include a host")), }; let host_header = HeaderValue::from_str(&host).context("constructing Host header from upstream URL")?; let forward_config = Arc::new(ForwardConfig { upstream_url, host_header, }); let (listener, bound_addr) = bind_listener(args.port)?; if let Some(path) = args.server_info.as_ref() { write_server_info(path, bound_addr.port())?; } let server = Server::from_listener(listener, None) .map_err(|err| anyhow!("creating HTTP server: {err}"))?; let client = Arc::new( Client::builder() // Disable reqwest's 30s default so long-lived response streams keep flowing. .timeout(None::<Duration>) .build() .context("building reqwest client")?, ); eprintln!("responses-api-proxy listening on {bound_addr}"); let http_shutdown = args.http_shutdown; for request in server.incoming_requests() { let client = client.clone(); let forward_config = forward_config.clone(); std::thread::spawn(move || { if http_shutdown && request.method() == &Method::Get && request.url() == "/shutdown" { let _ = request.respond(Response::new_empty(StatusCode(200))); std::process::exit(0); } if let Err(e) = forward_request(&client, auth_header, &forward_config, request) { eprintln!("forwarding error: {e}"); } }); } Err(anyhow!("server stopped unexpectedly")) } fn bind_listener(port: Option<u16>) -> Result<(TcpListener, SocketAddr)> { let addr = SocketAddr::from(([127, 0, 0, 1], port.unwrap_or(0))); let listener = TcpListener::bind(addr).with_context(|| format!("failed to bind {addr}"))?; let bound = listener.local_addr().context("failed to read local_addr")?; Ok((listener, bound)) } fn write_server_info(path: &Path, port: u16) -> Result<()> { if let Some(parent) = path.parent() && !parent.as_os_str().is_empty() { fs::create_dir_all(parent)?; } let info = ServerInfo { port, pid: std::process::id(), }; let mut data = serde_json::to_string(&info)?; data.push('\n'); let mut f = File::create(path)?; f.write_all(data.as_bytes())?; Ok(()) } fn forward_request( client: &Client, auth_header: &'static str, config: &ForwardConfig, mut req: Request, ) -> Result<()> { // Only allow POST /v1/responses exactly, no query string. let method = req.method().clone(); let url_path = req.url().to_string(); let allow = method == Method::Post && url_path == "/v1/responses"; if !allow { let resp = Response::new_empty(StatusCode(403)); let _ = req.respond(resp); return Ok(()); } // Read request body let mut body = Vec::new(); let mut reader = req.as_reader(); std::io::Read::read_to_end(&mut reader, &mut body)?; // Build headers for upstream, forwarding everything from the incoming // request except Authorization (we replace it below). let mut headers = HeaderMap::new(); for header in req.headers() { let name_ascii = header.field.as_str(); let lower = name_ascii.to_ascii_lowercase(); if lower.as_str() == "authorization" || lower.as_str() == "host" { continue; } let header_name = match HeaderName::from_bytes(lower.as_bytes()) { Ok(name) => name, Err(_) => continue, }; if let Ok(value) = HeaderValue::from_bytes(header.value.as_bytes()) { headers.append(header_name, value); } } // As part of our effort to to keep `auth_header` secret, we use a // combination of `from_static()` and `set_sensitive(true)`. let mut auth_header_value = HeaderValue::from_static(auth_header); auth_header_value.set_sensitive(true); headers.insert(AUTHORIZATION, auth_header_value); headers.insert(HOST, config.host_header.clone()); let upstream_resp = client .post(config.upstream_url.clone()) .headers(headers) .body(body) .send() .context("forwarding request to upstream")?; // We have to create an adapter between a `reqwest::blocking::Response` // and a `tiny_http::Response`. Fortunately, `reqwest::blocking::Response` // implements `Read`, so we can use it directly as the body of the // `tiny_http::Response`. let status = upstream_resp.status(); let mut response_headers = Vec::new(); for (name, value) in upstream_resp.headers().iter() { // Skip headers that tiny_http manages itself. if matches!( name.as_str(), "content-length" | "transfer-encoding" | "connection" | "trailer" | "upgrade" ) { continue; } if let Ok(header) = Header::from_bytes(name.as_str().as_bytes(), value.as_bytes()) { response_headers.push(header); } } let content_length = upstream_resp.content_length().and_then(|len| { if len <= usize::MAX as u64 { Some(len as usize) } else { None } }); let response = Response::new( StatusCode(status.as_u16()), response_headers, upstream_resp, content_length, None, ); let _ = req.respond(response); Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/responses-api-proxy/src/read_api_key.rs
codex-rs/responses-api-proxy/src/read_api_key.rs
use anyhow::Context; use anyhow::Result; use anyhow::anyhow; use zeroize::Zeroize; /// Use a generous buffer size to avoid truncation and to allow for longer API /// keys in the future. const BUFFER_SIZE: usize = 1024; const AUTH_HEADER_PREFIX: &[u8] = b"Bearer "; /// Reads the auth token from stdin and returns a static `Authorization` header /// value with the auth token used with `Bearer`. The header value is returned /// as a `&'static str` whose bytes are locked in memory to avoid accidental /// exposure. #[cfg(unix)] pub(crate) fn read_auth_header_from_stdin() -> Result<&'static str> { read_auth_header_with(read_from_unix_stdin) } #[cfg(windows)] pub(crate) fn read_auth_header_from_stdin() -> Result<&'static str> { use std::io::Read; // Use of `stdio::io::stdin()` has the problem mentioned in the docstring on // the UNIX version of `read_from_unix_stdin()`, so this should ultimately // be replaced the low-level Windows equivalent. Because we do not have an // equivalent of mlock() on Windows right now, it is not pressing until we // address that issue. read_auth_header_with(|buffer| std::io::stdin().read(buffer)) } /// We perform a low-level read with `read(2)` because `stdio::io::stdin()` has /// an internal BufReader: /// /// https://github.com/rust-lang/rust/blob/bcbbdcb8522fd3cb4a8dde62313b251ab107694d/library/std/src/io/stdio.rs#L250-L252 /// /// that can end up retaining a copy of stdin data in memory with no way to zero /// it out, whereas we aim to guarantee there is exactly one copy of the API key /// in memory, protected by mlock(2). #[cfg(unix)] fn read_from_unix_stdin(buffer: &mut [u8]) -> std::io::Result<usize> { use libc::c_void; use libc::read; // Perform a single read(2) call into the provided buffer slice. // Looping and newline/EOF handling are managed by the caller. loop { let result = unsafe { read( libc::STDIN_FILENO, buffer.as_mut_ptr().cast::<c_void>(), buffer.len(), ) }; if result == 0 { return Ok(0); } if result < 0 { let err = std::io::Error::last_os_error(); if err.kind() == std::io::ErrorKind::Interrupted { continue; } return Err(err); } return Ok(result as usize); } } fn read_auth_header_with<F>(mut read_fn: F) -> Result<&'static str> where F: FnMut(&mut [u8]) -> std::io::Result<usize>, { // TAKE CARE WHEN MODIFYING THIS CODE!!! // // This function goes to great lengths to avoid leaving the API key in // memory longer than necessary and to avoid copying it around. We read // directly into a stack buffer so the only heap allocation should be the // one to create the String (with the exact size) for the header value, // which we then immediately protect with mlock(2). let mut buf = [0u8; BUFFER_SIZE]; buf[..AUTH_HEADER_PREFIX.len()].copy_from_slice(AUTH_HEADER_PREFIX); let prefix_len = AUTH_HEADER_PREFIX.len(); let capacity = buf.len() - prefix_len; let mut total_read = 0usize; // number of bytes read into the token region let mut saw_newline = false; let mut saw_eof = false; while total_read < capacity { let slice = &mut buf[prefix_len + total_read..]; let read = match read_fn(slice) { Ok(n) => n, Err(err) => { buf.zeroize(); return Err(err.into()); } }; if read == 0 { saw_eof = true; break; } // Search only the newly written region for a newline. let newly_written = &slice[..read]; if let Some(pos) = newly_written.iter().position(|&b| b == b'\n') { total_read += pos + 1; // include the newline for trimming below saw_newline = true; break; } total_read += read; // Continue loop; if buffer fills without newline/EOF we'll error below. } // If buffer filled and we did not see newline or EOF, error out. if total_read == capacity && !saw_newline && !saw_eof { buf.zeroize(); return Err(anyhow!( "API key is too large to fit in the {BUFFER_SIZE}-byte buffer" )); } let mut total = prefix_len + total_read; while total > prefix_len && (buf[total - 1] == b'\n' || buf[total - 1] == b'\r') { total -= 1; } if total == AUTH_HEADER_PREFIX.len() { buf.zeroize(); return Err(anyhow!( "API key must be provided via stdin (e.g. printenv OPENAI_API_KEY | codex responses-api-proxy)" )); } if let Err(err) = validate_auth_header_bytes(&buf[AUTH_HEADER_PREFIX.len()..total]) { buf.zeroize(); return Err(err); } let header_str = match std::str::from_utf8(&buf[..total]) { Ok(value) => value, Err(err) => { // In theory, validate_auth_header_bytes() should have caught // any invalid UTF-8 sequences, but just in case... buf.zeroize(); return Err(err).context("reading Authorization header from stdin as UTF-8"); } }; let header_value = String::from(header_str); buf.zeroize(); let leaked: &'static mut str = header_value.leak(); mlock_str(leaked); Ok(leaked) } #[cfg(unix)] fn mlock_str(value: &str) { use libc::_SC_PAGESIZE; use libc::c_void; use libc::mlock; use libc::sysconf; if value.is_empty() { return; } let page_size = unsafe { sysconf(_SC_PAGESIZE) }; if page_size <= 0 { return; } let page_size = page_size as usize; if page_size == 0 { return; } let addr = value.as_ptr() as usize; let len = value.len(); let start = addr & !(page_size - 1); let addr_end = match addr.checked_add(len) { Some(v) => match v.checked_add(page_size - 1) { Some(total) => total, None => return, }, None => return, }; let end = addr_end & !(page_size - 1); let size = end.saturating_sub(start); if size == 0 { return; } let _ = unsafe { mlock(start as *const c_void, size) }; } #[cfg(not(unix))] fn mlock_str(_value: &str) {} /// The key should match /^[A-Za-z0-9\-_]+$/. Ensure there is no funny business /// with NUL characters and whatnot. fn validate_auth_header_bytes(key_bytes: &[u8]) -> Result<()> { if key_bytes .iter() .all(|byte| byte.is_ascii_alphanumeric() || matches!(byte, b'-' | b'_')) { return Ok(()); } Err(anyhow!( "API key may only contain ASCII letters, numbers, '-' or '_'" )) } #[cfg(test)] mod tests { use super::*; use std::collections::VecDeque; use std::io; #[test] fn reads_key_with_no_newlines() { let mut sent = false; let result = read_auth_header_with(|buf| { if sent { return Ok(0); } let data = b"sk-abc123"; buf[..data.len()].copy_from_slice(data); sent = true; Ok(data.len()) }) .unwrap(); assert_eq!(result, "Bearer sk-abc123"); } #[test] fn reads_key_with_short_reads() { let mut chunks: VecDeque<&[u8]> = VecDeque::from(vec![b"sk-".as_ref(), b"abc".as_ref(), b"123\n".as_ref()]); let result = read_auth_header_with(|buf| match chunks.pop_front() { Some(chunk) if !chunk.is_empty() => { buf[..chunk.len()].copy_from_slice(chunk); Ok(chunk.len()) } _ => Ok(0), }) .unwrap(); assert_eq!(result, "Bearer sk-abc123"); } #[test] fn reads_key_and_trims_newlines() { let mut sent = false; let result = read_auth_header_with(|buf| { if sent { return Ok(0); } let data = b"sk-abc123\r\n"; buf[..data.len()].copy_from_slice(data); sent = true; Ok(data.len()) }) .unwrap(); assert_eq!(result, "Bearer sk-abc123"); } #[test] fn errors_when_no_input_provided() { let err = read_auth_header_with(|_| Ok(0)).unwrap_err(); let message = format!("{err:#}"); assert!(message.contains("must be provided")); } #[test] fn errors_when_buffer_filled() { let err = read_auth_header_with(|buf| { let data = vec![b'a'; BUFFER_SIZE - AUTH_HEADER_PREFIX.len()]; buf[..data.len()].copy_from_slice(&data); Ok(data.len()) }) .unwrap_err(); let message = format!("{err:#}"); let expected_error = format!("API key is too large to fit in the {BUFFER_SIZE}-byte buffer"); assert!(message.contains(&expected_error)); } #[test] fn propagates_io_error() { let err = read_auth_header_with(|_| Err(io::Error::other("boom"))).unwrap_err(); let io_error = err.downcast_ref::<io::Error>().unwrap(); assert_eq!(io_error.kind(), io::ErrorKind::Other); assert_eq!(io_error.to_string(), "boom"); } #[test] fn errors_on_invalid_utf8() { let mut sent = false; let err = read_auth_header_with(|buf| { if sent { return Ok(0); } let data = b"sk-abc\xff"; buf[..data.len()].copy_from_slice(data); sent = true; Ok(data.len()) }) .unwrap_err(); let message = format!("{err:#}"); assert!(message.contains("API key may only contain ASCII letters, numbers, '-' or '_'")); } #[test] fn errors_on_invalid_characters() { let mut sent = false; let err = read_auth_header_with(|buf| { if sent { return Ok(0); } let data = b"sk-abc!23"; buf[..data.len()].copy_from_slice(data); sent = true; Ok(data.len()) }) .unwrap_err(); let message = format!("{err:#}"); assert!(message.contains("API key may only contain ASCII letters, numbers, '-' or '_'")); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/responses-api-proxy/src/main.rs
codex-rs/responses-api-proxy/src/main.rs
use clap::Parser; use codex_responses_api_proxy::Args as ResponsesApiProxyArgs; #[ctor::ctor] fn pre_main() { codex_process_hardening::pre_main_hardening(); } pub fn main() -> anyhow::Result<()> { let args = ResponsesApiProxyArgs::parse(); codex_responses_api_proxy::run_main(args) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-backend-openapi-models/src/lib.rs
codex-rs/codex-backend-openapi-models/src/lib.rs
#![allow(clippy::unwrap_used, clippy::expect_used)] // Re-export generated OpenAPI models. // The regen script populates `src/models/*.rs` and writes `src/models/mod.rs`. // This module intentionally contains no hand-written types. pub mod models;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-backend-openapi-models/src/models/git_pull_request.rs
codex-rs/codex-backend-openapi-models/src/models/git_pull_request.rs
/* * codex-backend * * codex-backend * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::Deserialize; use serde::Serialize; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct GitPullRequest { #[serde(rename = "number")] pub number: i32, #[serde(rename = "url")] pub url: String, #[serde(rename = "state")] pub state: String, #[serde(rename = "merged")] pub merged: bool, #[serde(rename = "mergeable")] pub mergeable: bool, #[serde(rename = "draft", skip_serializing_if = "Option::is_none")] pub draft: Option<bool>, #[serde(rename = "title", skip_serializing_if = "Option::is_none")] pub title: Option<String>, #[serde(rename = "body", skip_serializing_if = "Option::is_none")] pub body: Option<String>, #[serde(rename = "base", skip_serializing_if = "Option::is_none")] pub base: Option<String>, #[serde(rename = "head", skip_serializing_if = "Option::is_none")] pub head: Option<String>, #[serde(rename = "base_sha", skip_serializing_if = "Option::is_none")] pub base_sha: Option<String>, #[serde(rename = "head_sha", skip_serializing_if = "Option::is_none")] pub head_sha: Option<String>, #[serde(rename = "merge_commit_sha", skip_serializing_if = "Option::is_none")] pub merge_commit_sha: Option<String>, #[serde(rename = "comments", skip_serializing_if = "Option::is_none")] pub comments: Option<serde_json::Value>, #[serde(rename = "diff", skip_serializing_if = "Option::is_none")] pub diff: Option<serde_json::Value>, #[serde(rename = "user", skip_serializing_if = "Option::is_none")] pub user: Option<serde_json::Value>, } impl GitPullRequest { pub fn new( number: i32, url: String, state: String, merged: bool, mergeable: bool, ) -> GitPullRequest { GitPullRequest { number, url, state, merged, mergeable, draft: None, title: None, body: None, base: None, head: None, base_sha: None, head_sha: None, merge_commit_sha: None, comments: None, diff: None, user: None, } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-backend-openapi-models/src/models/rate_limit_status_details.rs
codex-rs/codex-backend-openapi-models/src/models/rate_limit_status_details.rs
/* * codex-backend * * codex-backend * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use crate::models; use serde::Deserialize; use serde::Serialize; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct RateLimitStatusDetails { #[serde(rename = "allowed")] pub allowed: bool, #[serde(rename = "limit_reached")] pub limit_reached: bool, #[serde( rename = "primary_window", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none" )] pub primary_window: Option<Option<Box<models::RateLimitWindowSnapshot>>>, #[serde( rename = "secondary_window", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none" )] pub secondary_window: Option<Option<Box<models::RateLimitWindowSnapshot>>>, } impl RateLimitStatusDetails { pub fn new(allowed: bool, limit_reached: bool) -> RateLimitStatusDetails { RateLimitStatusDetails { allowed, limit_reached, primary_window: None, secondary_window: None, } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-backend-openapi-models/src/models/code_task_details_response.rs
codex-rs/codex-backend-openapi-models/src/models/code_task_details_response.rs
/* * codex-backend * * codex-backend * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use crate::models; use serde::Deserialize; use serde::Serialize; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct CodeTaskDetailsResponse { #[serde(rename = "task")] pub task: Box<models::TaskResponse>, #[serde(rename = "current_user_turn", skip_serializing_if = "Option::is_none")] pub current_user_turn: Option<std::collections::HashMap<String, serde_json::Value>>, #[serde( rename = "current_assistant_turn", skip_serializing_if = "Option::is_none" )] pub current_assistant_turn: Option<std::collections::HashMap<String, serde_json::Value>>, #[serde( rename = "current_diff_task_turn", skip_serializing_if = "Option::is_none" )] pub current_diff_task_turn: Option<std::collections::HashMap<String, serde_json::Value>>, } impl CodeTaskDetailsResponse { pub fn new(task: models::TaskResponse) -> CodeTaskDetailsResponse { CodeTaskDetailsResponse { task: Box::new(task), current_user_turn: None, current_assistant_turn: None, current_diff_task_turn: None, } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-backend-openapi-models/src/models/external_pull_request_response.rs
codex-rs/codex-backend-openapi-models/src/models/external_pull_request_response.rs
/* * codex-backend * * codex-backend * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use crate::models; use serde::Deserialize; use serde::Serialize; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct ExternalPullRequestResponse { #[serde(rename = "id")] pub id: String, #[serde(rename = "assistant_turn_id")] pub assistant_turn_id: String, #[serde(rename = "pull_request")] pub pull_request: Box<models::GitPullRequest>, #[serde(rename = "codex_updated_sha", skip_serializing_if = "Option::is_none")] pub codex_updated_sha: Option<String>, } impl ExternalPullRequestResponse { pub fn new( id: String, assistant_turn_id: String, pull_request: models::GitPullRequest, ) -> ExternalPullRequestResponse { ExternalPullRequestResponse { id, assistant_turn_id, pull_request: Box::new(pull_request), codex_updated_sha: None, } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-backend-openapi-models/src/models/paginated_list_task_list_item_.rs
codex-rs/codex-backend-openapi-models/src/models/paginated_list_task_list_item_.rs
/* * codex-backend * * codex-backend * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use crate::models; use serde::Deserialize; use serde::Serialize; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct PaginatedListTaskListItem { #[serde(rename = "items")] pub items: Vec<models::TaskListItem>, #[serde(rename = "cursor", skip_serializing_if = "Option::is_none")] pub cursor: Option<String>, } impl PaginatedListTaskListItem { pub fn new(items: Vec<models::TaskListItem>) -> PaginatedListTaskListItem { PaginatedListTaskListItem { items, cursor: None, } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-backend-openapi-models/src/models/task_response.rs
codex-rs/codex-backend-openapi-models/src/models/task_response.rs
/* * codex-backend * * codex-backend * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use crate::models; use serde::Deserialize; use serde::Serialize; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct TaskResponse { #[serde(rename = "id")] pub id: String, #[serde(rename = "created_at", skip_serializing_if = "Option::is_none")] pub created_at: Option<f64>, #[serde(rename = "title")] pub title: String, #[serde( rename = "has_generated_title", skip_serializing_if = "Option::is_none" )] pub has_generated_title: Option<bool>, #[serde(rename = "current_turn_id", skip_serializing_if = "Option::is_none")] pub current_turn_id: Option<String>, #[serde(rename = "has_unread_turn", skip_serializing_if = "Option::is_none")] pub has_unread_turn: Option<bool>, #[serde( rename = "denormalized_metadata", skip_serializing_if = "Option::is_none" )] pub denormalized_metadata: Option<std::collections::HashMap<String, serde_json::Value>>, #[serde(rename = "archived")] pub archived: bool, #[serde(rename = "external_pull_requests")] pub external_pull_requests: Vec<models::ExternalPullRequestResponse>, } impl TaskResponse { pub fn new( id: String, title: String, archived: bool, external_pull_requests: Vec<models::ExternalPullRequestResponse>, ) -> TaskResponse { TaskResponse { id, created_at: None, title, has_generated_title: None, current_turn_id: None, has_unread_turn: None, denormalized_metadata: None, archived, external_pull_requests, } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-backend-openapi-models/src/models/credit_status_details.rs
codex-rs/codex-backend-openapi-models/src/models/credit_status_details.rs
/* * codex-backend * * codex-backend * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::Deserialize; use serde::Serialize; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct CreditStatusDetails { #[serde(rename = "has_credits")] pub has_credits: bool, #[serde(rename = "unlimited")] pub unlimited: bool, #[serde( rename = "balance", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none" )] pub balance: Option<Option<String>>, #[serde( rename = "approx_local_messages", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none" )] pub approx_local_messages: Option<Option<Vec<serde_json::Value>>>, #[serde( rename = "approx_cloud_messages", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none" )] pub approx_cloud_messages: Option<Option<Vec<serde_json::Value>>>, } impl CreditStatusDetails { pub fn new(has_credits: bool, unlimited: bool) -> CreditStatusDetails { CreditStatusDetails { has_credits, unlimited, balance: None, approx_local_messages: None, approx_cloud_messages: None, } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-backend-openapi-models/src/models/rate_limit_window_snapshot.rs
codex-rs/codex-backend-openapi-models/src/models/rate_limit_window_snapshot.rs
/* * codex-backend * * codex-backend * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::Deserialize; use serde::Serialize; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct RateLimitWindowSnapshot { #[serde(rename = "used_percent")] pub used_percent: i32, #[serde(rename = "limit_window_seconds")] pub limit_window_seconds: i32, #[serde(rename = "reset_after_seconds")] pub reset_after_seconds: i32, #[serde(rename = "reset_at")] pub reset_at: i32, } impl RateLimitWindowSnapshot { pub fn new( used_percent: i32, limit_window_seconds: i32, reset_after_seconds: i32, reset_at: i32, ) -> RateLimitWindowSnapshot { RateLimitWindowSnapshot { used_percent, limit_window_seconds, reset_after_seconds, reset_at, } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-backend-openapi-models/src/models/mod.rs
codex-rs/codex-backend-openapi-models/src/models/mod.rs
// Curated minimal export list for current workspace usage. // NOTE: This file was previously auto-generated by the OpenAPI generator. // Currently export only the types referenced by the workspace // The process for this will change // Cloud Tasks pub mod code_task_details_response; pub use self::code_task_details_response::CodeTaskDetailsResponse; pub mod task_response; pub use self::task_response::TaskResponse; pub mod external_pull_request_response; pub use self::external_pull_request_response::ExternalPullRequestResponse; pub mod git_pull_request; pub use self::git_pull_request::GitPullRequest; pub mod task_list_item; pub use self::task_list_item::TaskListItem; pub mod paginated_list_task_list_item_; pub use self::paginated_list_task_list_item_::PaginatedListTaskListItem; // Rate Limits pub mod rate_limit_status_payload; pub use self::rate_limit_status_payload::PlanType; pub use self::rate_limit_status_payload::RateLimitStatusPayload; pub mod rate_limit_status_details; pub use self::rate_limit_status_details::RateLimitStatusDetails; pub mod rate_limit_window_snapshot; pub use self::rate_limit_window_snapshot::RateLimitWindowSnapshot; pub mod credit_status_details; pub use self::credit_status_details::CreditStatusDetails;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-backend-openapi-models/src/models/task_list_item.rs
codex-rs/codex-backend-openapi-models/src/models/task_list_item.rs
/* * codex-backend * * codex-backend * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use crate::models; use serde::Deserialize; use serde::Serialize; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct TaskListItem { #[serde(rename = "id")] pub id: String, #[serde(rename = "title")] pub title: String, #[serde( rename = "has_generated_title", skip_serializing_if = "Option::is_none" )] pub has_generated_title: Option<bool>, #[serde(rename = "updated_at", skip_serializing_if = "Option::is_none")] pub updated_at: Option<f64>, #[serde(rename = "created_at", skip_serializing_if = "Option::is_none")] pub created_at: Option<f64>, #[serde( rename = "task_status_display", skip_serializing_if = "Option::is_none" )] pub task_status_display: Option<std::collections::HashMap<String, serde_json::Value>>, #[serde(rename = "archived")] pub archived: bool, #[serde(rename = "has_unread_turn")] pub has_unread_turn: bool, #[serde(rename = "pull_requests", skip_serializing_if = "Option::is_none")] pub pull_requests: Option<Vec<models::ExternalPullRequestResponse>>, } impl TaskListItem { pub fn new( id: String, title: String, has_generated_title: Option<bool>, archived: bool, has_unread_turn: bool, ) -> TaskListItem { TaskListItem { id, title, has_generated_title, updated_at: None, created_at: None, task_status_display: None, archived, has_unread_turn, pull_requests: None, } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/codex-backend-openapi-models/src/models/rate_limit_status_payload.rs
codex-rs/codex-backend-openapi-models/src/models/rate_limit_status_payload.rs
/* * codex-backend * * codex-backend * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use crate::models; use serde::Deserialize; use serde::Serialize; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct RateLimitStatusPayload { #[serde(rename = "plan_type")] pub plan_type: PlanType, #[serde( rename = "rate_limit", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none" )] pub rate_limit: Option<Option<Box<models::RateLimitStatusDetails>>>, #[serde( rename = "credits", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none" )] pub credits: Option<Option<Box<models::CreditStatusDetails>>>, } impl RateLimitStatusPayload { pub fn new(plan_type: PlanType) -> RateLimitStatusPayload { RateLimitStatusPayload { plan_type, rate_limit: None, credits: None, } } } #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum PlanType { #[serde(rename = "guest")] Guest, #[serde(rename = "free")] Free, #[serde(rename = "go")] Go, #[serde(rename = "plus")] Plus, #[serde(rename = "pro")] Pro, #[serde(rename = "free_workspace")] FreeWorkspace, #[serde(rename = "team")] Team, #[serde(rename = "business")] Business, #[serde(rename = "education")] Education, #[serde(rename = "quorum")] Quorum, #[serde(rename = "k12")] K12, #[serde(rename = "enterprise")] Enterprise, #[serde(rename = "edu")] Edu, } impl Default for PlanType { fn default() -> PlanType { Self::Guest } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/safety.rs
codex-rs/core/src/safety.rs
use std::path::Component; use std::path::Path; use std::path::PathBuf; use codex_apply_patch::ApplyPatchAction; use codex_apply_patch::ApplyPatchFileChange; use crate::exec::SandboxType; use crate::util::resolve_path; use crate::protocol::AskForApproval; use crate::protocol::SandboxPolicy; #[cfg(target_os = "windows")] use std::sync::atomic::AtomicBool; #[cfg(target_os = "windows")] use std::sync::atomic::Ordering; #[cfg(target_os = "windows")] static WINDOWS_SANDBOX_ENABLED: AtomicBool = AtomicBool::new(false); #[cfg(target_os = "windows")] static WINDOWS_ELEVATED_SANDBOX_ENABLED: AtomicBool = AtomicBool::new(false); #[cfg(target_os = "windows")] pub fn set_windows_sandbox_enabled(enabled: bool) { WINDOWS_SANDBOX_ENABLED.store(enabled, Ordering::Relaxed); } #[cfg(not(target_os = "windows"))] #[allow(dead_code)] pub fn set_windows_sandbox_enabled(_enabled: bool) {} #[cfg(target_os = "windows")] pub fn set_windows_elevated_sandbox_enabled(enabled: bool) { WINDOWS_ELEVATED_SANDBOX_ENABLED.store(enabled, Ordering::Relaxed); } #[cfg(not(target_os = "windows"))] #[allow(dead_code)] pub fn set_windows_elevated_sandbox_enabled(_enabled: bool) {} #[cfg(target_os = "windows")] pub fn is_windows_elevated_sandbox_enabled() -> bool { WINDOWS_ELEVATED_SANDBOX_ENABLED.load(Ordering::Relaxed) } #[cfg(not(target_os = "windows"))] #[allow(dead_code)] pub fn is_windows_elevated_sandbox_enabled() -> bool { false } #[derive(Debug, PartialEq)] pub enum SafetyCheck { AutoApprove { sandbox_type: SandboxType, user_explicitly_approved: bool, }, AskUser, Reject { reason: String, }, } pub fn assess_patch_safety( action: &ApplyPatchAction, policy: AskForApproval, sandbox_policy: &SandboxPolicy, cwd: &Path, ) -> SafetyCheck { if action.is_empty() { return SafetyCheck::Reject { reason: "empty patch".to_string(), }; } match policy { AskForApproval::OnFailure | AskForApproval::Never | AskForApproval::OnRequest => { // Continue to see if this can be auto-approved. } // TODO(ragona): I'm not sure this is actually correct? I believe in this case // we want to continue to the writable paths check before asking the user. AskForApproval::UnlessTrusted => { return SafetyCheck::AskUser; } } // Even though the patch appears to be constrained to writable paths, it is // possible that paths in the patch are hard links to files outside the // writable roots, so we should still run `apply_patch` in a sandbox in that case. if is_write_patch_constrained_to_writable_paths(action, sandbox_policy, cwd) || policy == AskForApproval::OnFailure { if matches!( sandbox_policy, SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. } ) { // DangerFullAccess is intended to bypass sandboxing entirely. SafetyCheck::AutoApprove { sandbox_type: SandboxType::None, user_explicitly_approved: false, } } else { // Only auto‑approve when we can actually enforce a sandbox. Otherwise // fall back to asking the user because the patch may touch arbitrary // paths outside the project. match get_platform_sandbox() { Some(sandbox_type) => SafetyCheck::AutoApprove { sandbox_type, user_explicitly_approved: false, }, None => SafetyCheck::AskUser, } } } else if policy == AskForApproval::Never { SafetyCheck::Reject { reason: "writing outside of the project; rejected by user approval settings" .to_string(), } } else { SafetyCheck::AskUser } } pub fn get_platform_sandbox() -> Option<SandboxType> { if cfg!(target_os = "macos") { Some(SandboxType::MacosSeatbelt) } else if cfg!(target_os = "linux") { Some(SandboxType::LinuxSeccomp) } else if cfg!(target_os = "windows") { #[cfg(target_os = "windows")] { if WINDOWS_SANDBOX_ENABLED.load(Ordering::Relaxed) { return Some(SandboxType::WindowsRestrictedToken); } } None } else { None } } fn is_write_patch_constrained_to_writable_paths( action: &ApplyPatchAction, sandbox_policy: &SandboxPolicy, cwd: &Path, ) -> bool { // Early‑exit if there are no declared writable roots. let writable_roots = match sandbox_policy { SandboxPolicy::ReadOnly => { return false; } SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. } => { return true; } SandboxPolicy::WorkspaceWrite { .. } => sandbox_policy.get_writable_roots_with_cwd(cwd), }; // Normalize a path by removing `.` and resolving `..` without touching the // filesystem (works even if the file does not exist). fn normalize(path: &Path) -> Option<PathBuf> { let mut out = PathBuf::new(); for comp in path.components() { match comp { Component::ParentDir => { out.pop(); } Component::CurDir => { /* skip */ } other => out.push(other.as_os_str()), } } Some(out) } // Determine whether `path` is inside **any** writable root. Both `path` // and roots are converted to absolute, normalized forms before the // prefix check. let is_path_writable = |p: &PathBuf| { let abs = resolve_path(cwd, p); let abs = match normalize(&abs) { Some(v) => v, None => return false, }; writable_roots .iter() .any(|writable_root| writable_root.is_path_writable(&abs)) }; for (path, change) in action.changes() { match change { ApplyPatchFileChange::Add { .. } | ApplyPatchFileChange::Delete { .. } => { if !is_path_writable(path) { return false; } } ApplyPatchFileChange::Update { move_path, .. } => { if !is_path_writable(path) { return false; } if let Some(dest) = move_path && !is_path_writable(dest) { return false; } } } } true } #[cfg(test)] mod tests { use super::*; use codex_utils_absolute_path::AbsolutePathBuf; use tempfile::TempDir; #[test] fn test_writable_roots_constraint() { // Use a temporary directory as our workspace to avoid touching // the real current working directory. let tmp = TempDir::new().unwrap(); let cwd = tmp.path().to_path_buf(); let parent = cwd.parent().unwrap().to_path_buf(); // Helper to build a single‑entry patch that adds a file at `p`. let make_add_change = |p: PathBuf| ApplyPatchAction::new_add_for_test(&p, "".to_string()); let add_inside = make_add_change(cwd.join("inner.txt")); let add_outside = make_add_change(parent.join("outside.txt")); // Policy limited to the workspace only; exclude system temp roots so // only `cwd` is writable by default. let policy_workspace_only = SandboxPolicy::WorkspaceWrite { writable_roots: vec![], network_access: false, exclude_tmpdir_env_var: true, exclude_slash_tmp: true, }; assert!(is_write_patch_constrained_to_writable_paths( &add_inside, &policy_workspace_only, &cwd, )); assert!(!is_write_patch_constrained_to_writable_paths( &add_outside, &policy_workspace_only, &cwd, )); // With the parent dir explicitly added as a writable root, the // outside write should be permitted. let policy_with_parent = SandboxPolicy::WorkspaceWrite { writable_roots: vec![AbsolutePathBuf::try_from(parent).unwrap()], network_access: false, exclude_tmpdir_env_var: true, exclude_slash_tmp: true, }; assert!(is_write_patch_constrained_to_writable_paths( &add_outside, &policy_with_parent, &cwd, )); } #[test] fn external_sandbox_auto_approves_in_on_request() { let tmp = TempDir::new().unwrap(); let cwd = tmp.path().to_path_buf(); let add_inside = ApplyPatchAction::new_add_for_test(&cwd.join("inner.txt"), "".to_string()); let policy = SandboxPolicy::ExternalSandbox { network_access: codex_protocol::protocol::NetworkAccess::Enabled, }; assert_eq!( assess_patch_safety(&add_inside, AskForApproval::OnRequest, &policy, &cwd,), SafetyCheck::AutoApprove { sandbox_type: SandboxType::None, user_explicitly_approved: false, } ); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/review_prompts.rs
codex-rs/core/src/review_prompts.rs
use codex_git::merge_base_with_head; use codex_protocol::protocol::ReviewRequest; use codex_protocol::protocol::ReviewTarget; use std::path::Path; #[derive(Clone, Debug, PartialEq)] pub struct ResolvedReviewRequest { pub target: ReviewTarget, pub prompt: String, pub user_facing_hint: String, } const UNCOMMITTED_PROMPT: &str = "Review the current code changes (staged, unstaged, and untracked files) and provide prioritized findings."; const BASE_BRANCH_PROMPT_BACKUP: &str = "Review the code changes against the base branch '{branch}'. Start by finding the merge diff between the current branch and {branch}'s upstream e.g. (`git merge-base HEAD \"$(git rev-parse --abbrev-ref \"{branch}@{upstream}\")\"`), then run `git diff` against that SHA to see what changes we would merge into the {branch} branch. Provide prioritized, actionable findings."; const BASE_BRANCH_PROMPT: &str = "Review the code changes against the base branch '{baseBranch}'. The merge base commit for this comparison is {mergeBaseSha}. Run `git diff {mergeBaseSha}` to inspect the changes relative to {baseBranch}. Provide prioritized, actionable findings."; const COMMIT_PROMPT_WITH_TITLE: &str = "Review the code changes introduced by commit {sha} (\"{title}\"). Provide prioritized, actionable findings."; const COMMIT_PROMPT: &str = "Review the code changes introduced by commit {sha}. Provide prioritized, actionable findings."; pub fn resolve_review_request( request: ReviewRequest, cwd: &Path, ) -> anyhow::Result<ResolvedReviewRequest> { let target = request.target; let prompt = review_prompt(&target, cwd)?; let user_facing_hint = request .user_facing_hint .unwrap_or_else(|| user_facing_hint(&target)); Ok(ResolvedReviewRequest { target, prompt, user_facing_hint, }) } pub fn review_prompt(target: &ReviewTarget, cwd: &Path) -> anyhow::Result<String> { match target { ReviewTarget::UncommittedChanges => Ok(UNCOMMITTED_PROMPT.to_string()), ReviewTarget::BaseBranch { branch } => { if let Some(commit) = merge_base_with_head(cwd, branch)? { Ok(BASE_BRANCH_PROMPT .replace("{baseBranch}", branch) .replace("{mergeBaseSha}", &commit)) } else { Ok(BASE_BRANCH_PROMPT_BACKUP.replace("{branch}", branch)) } } ReviewTarget::Commit { sha, title } => { if let Some(title) = title { Ok(COMMIT_PROMPT_WITH_TITLE .replace("{sha}", sha) .replace("{title}", title)) } else { Ok(COMMIT_PROMPT.replace("{sha}", sha)) } } ReviewTarget::Custom { instructions } => { let prompt = instructions.trim(); if prompt.is_empty() { anyhow::bail!("Review prompt cannot be empty"); } Ok(prompt.to_string()) } } } pub fn user_facing_hint(target: &ReviewTarget) -> String { match target { ReviewTarget::UncommittedChanges => "current changes".to_string(), ReviewTarget::BaseBranch { branch } => format!("changes against '{branch}'"), ReviewTarget::Commit { sha, title } => { let short_sha: String = sha.chars().take(7).collect(); if let Some(title) = title { format!("commit {short_sha}: {title}") } else { format!("commit {short_sha}") } } ReviewTarget::Custom { instructions } => instructions.trim().to_string(), } } impl From<ResolvedReviewRequest> for ReviewRequest { fn from(resolved: ResolvedReviewRequest) -> Self { ReviewRequest { target: resolved.target, user_facing_hint: Some(resolved.user_facing_hint), } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/project_doc.rs
codex-rs/core/src/project_doc.rs
//! Project-level documentation discovery. //! //! Project-level documentation is primarily stored in files named `AGENTS.md`. //! Additional fallback filenames can be configured via `project_doc_fallback_filenames`. //! We include the concatenation of all files found along the path from the //! repository root to the current working directory as follows: //! //! 1. Determine the Git repository root by walking upwards from the current //! working directory until a `.git` directory or file is found. If no Git //! root is found, only the current working directory is considered. //! 2. Collect every `AGENTS.md` found from the repository root down to the //! current working directory (inclusive) and concatenate their contents in //! that order. //! 3. We do **not** walk past the Git root. use crate::config::Config; use crate::skills::SkillMetadata; use crate::skills::render_skills_section; use dunce::canonicalize as normalize_path; use std::path::PathBuf; use tokio::io::AsyncReadExt; use tracing::error; /// Default filename scanned for project-level docs. pub const DEFAULT_PROJECT_DOC_FILENAME: &str = "AGENTS.md"; /// Preferred local override for project-level docs. pub const LOCAL_PROJECT_DOC_FILENAME: &str = "AGENTS.override.md"; /// When both `Config::instructions` and the project doc are present, they will /// be concatenated with the following separator. const PROJECT_DOC_SEPARATOR: &str = "\n\n--- project-doc ---\n\n"; /// Combines `Config::instructions` and `AGENTS.md` (if present) into a single /// string of instructions. pub(crate) async fn get_user_instructions( config: &Config, skills: Option<&[SkillMetadata]>, ) -> Option<String> { let skills_section = skills.and_then(render_skills_section); let project_docs = match read_project_docs(config).await { Ok(docs) => docs, Err(e) => { error!("error trying to find project doc: {e:#}"); return config.user_instructions.clone(); } }; let combined_project_docs = merge_project_docs_with_skills(project_docs, skills_section); let mut parts: Vec<String> = Vec::new(); if let Some(instructions) = config.user_instructions.clone() { parts.push(instructions); } if let Some(project_doc) = combined_project_docs { if !parts.is_empty() { parts.push(PROJECT_DOC_SEPARATOR.to_string()); } parts.push(project_doc); } if parts.is_empty() { None } else { Some(parts.concat()) } } /// Attempt to locate and load the project documentation. /// /// On success returns `Ok(Some(contents))` where `contents` is the /// concatenation of all discovered docs. If no documentation file is found the /// function returns `Ok(None)`. Unexpected I/O failures bubble up as `Err` so /// callers can decide how to handle them. pub async fn read_project_docs(config: &Config) -> std::io::Result<Option<String>> { let max_total = config.project_doc_max_bytes; if max_total == 0 { return Ok(None); } let paths = discover_project_doc_paths(config)?; if paths.is_empty() { return Ok(None); } let mut remaining: u64 = max_total as u64; let mut parts: Vec<String> = Vec::new(); for p in paths { if remaining == 0 { break; } let file = match tokio::fs::File::open(&p).await { Ok(f) => f, Err(e) if e.kind() == std::io::ErrorKind::NotFound => continue, Err(e) => return Err(e), }; let size = file.metadata().await?.len(); let mut reader = tokio::io::BufReader::new(file).take(remaining); let mut data: Vec<u8> = Vec::new(); reader.read_to_end(&mut data).await?; if size > remaining { tracing::warn!( "Project doc `{}` exceeds remaining budget ({} bytes) - truncating.", p.display(), remaining, ); } let text = String::from_utf8_lossy(&data).to_string(); if !text.trim().is_empty() { parts.push(text); remaining = remaining.saturating_sub(data.len() as u64); } } if parts.is_empty() { Ok(None) } else { Ok(Some(parts.join("\n\n"))) } } /// Discover the list of AGENTS.md files using the same search rules as /// `read_project_docs`, but return the file paths instead of concatenated /// contents. The list is ordered from repository root to the current working /// directory (inclusive). Symlinks are allowed. When `project_doc_max_bytes` /// is zero, returns an empty list. pub fn discover_project_doc_paths(config: &Config) -> std::io::Result<Vec<PathBuf>> { let mut dir = config.cwd.clone(); if let Ok(canon) = normalize_path(&dir) { dir = canon; } // Build chain from cwd upwards and detect git root. let mut chain: Vec<PathBuf> = vec![dir.clone()]; let mut git_root: Option<PathBuf> = None; let mut cursor = dir; while let Some(parent) = cursor.parent() { let git_marker = cursor.join(".git"); let git_exists = match std::fs::metadata(&git_marker) { Ok(_) => true, Err(e) if e.kind() == std::io::ErrorKind::NotFound => false, Err(e) => return Err(e), }; if git_exists { git_root = Some(cursor.clone()); break; } chain.push(parent.to_path_buf()); cursor = parent.to_path_buf(); } let search_dirs: Vec<PathBuf> = if let Some(root) = git_root { let mut dirs: Vec<PathBuf> = Vec::new(); let mut saw_root = false; for p in chain.iter().rev() { if !saw_root { if p == &root { saw_root = true; } else { continue; } } dirs.push(p.clone()); } dirs } else { vec![config.cwd.clone()] }; let mut found: Vec<PathBuf> = Vec::new(); let candidate_filenames = candidate_filenames(config); for d in search_dirs { for name in &candidate_filenames { let candidate = d.join(name); match std::fs::symlink_metadata(&candidate) { Ok(md) => { let ft = md.file_type(); // Allow regular files and symlinks; opening will later fail for dangling links. if ft.is_file() || ft.is_symlink() { found.push(candidate); break; } } Err(e) if e.kind() == std::io::ErrorKind::NotFound => continue, Err(e) => return Err(e), } } } Ok(found) } fn candidate_filenames<'a>(config: &'a Config) -> Vec<&'a str> { let mut names: Vec<&'a str> = Vec::with_capacity(2 + config.project_doc_fallback_filenames.len()); names.push(LOCAL_PROJECT_DOC_FILENAME); names.push(DEFAULT_PROJECT_DOC_FILENAME); for candidate in &config.project_doc_fallback_filenames { let candidate = candidate.as_str(); if candidate.is_empty() { continue; } if !names.contains(&candidate) { names.push(candidate); } } names } fn merge_project_docs_with_skills( project_doc: Option<String>, skills_section: Option<String>, ) -> Option<String> { match (project_doc, skills_section) { (Some(doc), Some(skills)) => Some(format!("{doc}\n\n{skills}")), (Some(doc), None) => Some(doc), (None, Some(skills)) => Some(skills), (None, None) => None, } } #[cfg(test)] mod tests { use super::*; use crate::config::ConfigBuilder; use crate::skills::load_skills; use std::fs; use std::path::PathBuf; use tempfile::TempDir; /// Helper that returns a `Config` pointing at `root` and using `limit` as /// the maximum number of bytes to embed from AGENTS.md. The caller can /// optionally specify a custom `instructions` string – when `None` the /// value is cleared to mimic a scenario where no system instructions have /// been configured. async fn make_config(root: &TempDir, limit: usize, instructions: Option<&str>) -> Config { let codex_home = TempDir::new().unwrap(); let mut config = ConfigBuilder::default() .codex_home(codex_home.path().to_path_buf()) .build() .await .expect("defaults for test should always succeed"); config.cwd = root.path().to_path_buf(); config.project_doc_max_bytes = limit; config.user_instructions = instructions.map(ToOwned::to_owned); config } async fn make_config_with_fallback( root: &TempDir, limit: usize, instructions: Option<&str>, fallbacks: &[&str], ) -> Config { let mut config = make_config(root, limit, instructions).await; config.project_doc_fallback_filenames = fallbacks .iter() .map(std::string::ToString::to_string) .collect(); config } /// AGENTS.md missing – should yield `None`. #[tokio::test] async fn no_doc_file_returns_none() { let tmp = tempfile::tempdir().expect("tempdir"); let res = get_user_instructions(&make_config(&tmp, 4096, None).await, None).await; assert!( res.is_none(), "Expected None when AGENTS.md is absent and no system instructions provided" ); assert!(res.is_none(), "Expected None when AGENTS.md is absent"); } /// Small file within the byte-limit is returned unmodified. #[tokio::test] async fn doc_smaller_than_limit_is_returned() { let tmp = tempfile::tempdir().expect("tempdir"); fs::write(tmp.path().join("AGENTS.md"), "hello world").unwrap(); let res = get_user_instructions(&make_config(&tmp, 4096, None).await, None) .await .expect("doc expected"); assert_eq!( res, "hello world", "The document should be returned verbatim when it is smaller than the limit and there are no existing instructions" ); } /// Oversize file is truncated to `project_doc_max_bytes`. #[tokio::test] async fn doc_larger_than_limit_is_truncated() { const LIMIT: usize = 1024; let tmp = tempfile::tempdir().expect("tempdir"); let huge = "A".repeat(LIMIT * 2); // 2 KiB fs::write(tmp.path().join("AGENTS.md"), &huge).unwrap(); let res = get_user_instructions(&make_config(&tmp, LIMIT, None).await, None) .await .expect("doc expected"); assert_eq!(res.len(), LIMIT, "doc should be truncated to LIMIT bytes"); assert_eq!(res, huge[..LIMIT]); } /// When `cwd` is nested inside a repo, the search should locate AGENTS.md /// placed at the repository root (identified by `.git`). #[tokio::test] async fn finds_doc_in_repo_root() { let repo = tempfile::tempdir().expect("tempdir"); // Simulate a git repository. Note .git can be a file or a directory. std::fs::write( repo.path().join(".git"), "gitdir: /path/to/actual/git/dir\n", ) .unwrap(); // Put the doc at the repo root. fs::write(repo.path().join("AGENTS.md"), "root level doc").unwrap(); // Now create a nested working directory: repo/workspace/crate_a let nested = repo.path().join("workspace/crate_a"); std::fs::create_dir_all(&nested).unwrap(); // Build config pointing at the nested dir. let mut cfg = make_config(&repo, 4096, None).await; cfg.cwd = nested; let res = get_user_instructions(&cfg, None) .await .expect("doc expected"); assert_eq!(res, "root level doc"); } /// Explicitly setting the byte-limit to zero disables project docs. #[tokio::test] async fn zero_byte_limit_disables_docs() { let tmp = tempfile::tempdir().expect("tempdir"); fs::write(tmp.path().join("AGENTS.md"), "something").unwrap(); let res = get_user_instructions(&make_config(&tmp, 0, None).await, None).await; assert!( res.is_none(), "With limit 0 the function should return None" ); } /// When both system instructions *and* a project doc are present the two /// should be concatenated with the separator. #[tokio::test] async fn merges_existing_instructions_with_project_doc() { let tmp = tempfile::tempdir().expect("tempdir"); fs::write(tmp.path().join("AGENTS.md"), "proj doc").unwrap(); const INSTRUCTIONS: &str = "base instructions"; let res = get_user_instructions(&make_config(&tmp, 4096, Some(INSTRUCTIONS)).await, None) .await .expect("should produce a combined instruction string"); let expected = format!("{INSTRUCTIONS}{PROJECT_DOC_SEPARATOR}{}", "proj doc"); assert_eq!(res, expected); } /// If there are existing system instructions but the project doc is /// missing we expect the original instructions to be returned unchanged. #[tokio::test] async fn keeps_existing_instructions_when_doc_missing() { let tmp = tempfile::tempdir().expect("tempdir"); const INSTRUCTIONS: &str = "some instructions"; let res = get_user_instructions(&make_config(&tmp, 4096, Some(INSTRUCTIONS)).await, None).await; assert_eq!(res, Some(INSTRUCTIONS.to_string())); } /// When both the repository root and the working directory contain /// AGENTS.md files, their contents are concatenated from root to cwd. #[tokio::test] async fn concatenates_root_and_cwd_docs() { let repo = tempfile::tempdir().expect("tempdir"); // Simulate a git repository. std::fs::write( repo.path().join(".git"), "gitdir: /path/to/actual/git/dir\n", ) .unwrap(); // Repo root doc. fs::write(repo.path().join("AGENTS.md"), "root doc").unwrap(); // Nested working directory with its own doc. let nested = repo.path().join("workspace/crate_a"); std::fs::create_dir_all(&nested).unwrap(); fs::write(nested.join("AGENTS.md"), "crate doc").unwrap(); let mut cfg = make_config(&repo, 4096, None).await; cfg.cwd = nested; let res = get_user_instructions(&cfg, None) .await .expect("doc expected"); assert_eq!(res, "root doc\n\ncrate doc"); } /// AGENTS.override.md is preferred over AGENTS.md when both are present. #[tokio::test] async fn agents_local_md_preferred() { let tmp = tempfile::tempdir().expect("tempdir"); fs::write(tmp.path().join(DEFAULT_PROJECT_DOC_FILENAME), "versioned").unwrap(); fs::write(tmp.path().join(LOCAL_PROJECT_DOC_FILENAME), "local").unwrap(); let cfg = make_config(&tmp, 4096, None).await; let res = get_user_instructions(&cfg, None) .await .expect("local doc expected"); assert_eq!(res, "local"); let discovery = discover_project_doc_paths(&cfg).expect("discover paths"); assert_eq!(discovery.len(), 1); assert_eq!( discovery[0].file_name().unwrap().to_string_lossy(), LOCAL_PROJECT_DOC_FILENAME ); } /// When AGENTS.md is absent but a configured fallback exists, the fallback is used. #[tokio::test] async fn uses_configured_fallback_when_agents_missing() { let tmp = tempfile::tempdir().expect("tempdir"); fs::write(tmp.path().join("EXAMPLE.md"), "example instructions").unwrap(); let cfg = make_config_with_fallback(&tmp, 4096, None, &["EXAMPLE.md"]).await; let res = get_user_instructions(&cfg, None) .await .expect("fallback doc expected"); assert_eq!(res, "example instructions"); } /// AGENTS.md remains preferred when both AGENTS.md and fallbacks are present. #[tokio::test] async fn agents_md_preferred_over_fallbacks() { let tmp = tempfile::tempdir().expect("tempdir"); fs::write(tmp.path().join("AGENTS.md"), "primary").unwrap(); fs::write(tmp.path().join("EXAMPLE.md"), "secondary").unwrap(); let cfg = make_config_with_fallback(&tmp, 4096, None, &["EXAMPLE.md", ".example.md"]).await; let res = get_user_instructions(&cfg, None) .await .expect("AGENTS.md should win"); assert_eq!(res, "primary"); let discovery = discover_project_doc_paths(&cfg).expect("discover paths"); assert_eq!(discovery.len(), 1); assert!( discovery[0] .file_name() .unwrap() .to_string_lossy() .eq(DEFAULT_PROJECT_DOC_FILENAME) ); } #[tokio::test] async fn skills_are_appended_to_project_doc() { let tmp = tempfile::tempdir().expect("tempdir"); fs::write(tmp.path().join("AGENTS.md"), "base doc").unwrap(); let cfg = make_config(&tmp, 4096, None).await; create_skill( cfg.codex_home.clone(), "pdf-processing", "extract from pdfs", ); let skills = load_skills(&cfg); let res = get_user_instructions( &cfg, skills.errors.is_empty().then_some(skills.skills.as_slice()), ) .await .expect("instructions expected"); let expected_path = dunce::canonicalize( cfg.codex_home .join("skills/pdf-processing/SKILL.md") .as_path(), ) .unwrap_or_else(|_| cfg.codex_home.join("skills/pdf-processing/SKILL.md")); let expected_path_str = expected_path.to_string_lossy().replace('\\', "/"); let usage_rules = "- Discovery: Available skills are listed in project docs and may also appear in a runtime \"## Skills\" section (name + description + file path). These are the sources of truth; skill bodies live on disk at the listed paths.\n- Trigger rules: If the user names a skill (with `$SkillName` or plain text) OR the task clearly matches a skill's description, you must use that skill for that turn. Multiple mentions mean use them all. Do not carry skills across turns unless re-mentioned.\n- Missing/blocked: If a named skill isn't in the list or the path can't be read, say so briefly and continue with the best fallback.\n- How to use a skill (progressive disclosure):\n 1) After deciding to use a skill, open its `SKILL.md`. Read only enough to follow the workflow.\n 2) If `SKILL.md` points to extra folders such as `references/`, load only the specific files needed for the request; don't bulk-load everything.\n 3) If `scripts/` exist, prefer running or patching them instead of retyping large code blocks.\n 4) If `assets/` or templates exist, reuse them instead of recreating from scratch.\n- Description as trigger: The YAML `description` in `SKILL.md` is the primary trigger signal; rely on it to decide applicability. If unsure, ask a brief clarification before proceeding.\n- Coordination and sequencing:\n - If multiple skills apply, choose the minimal set that covers the request and state the order you'll use them.\n - Announce which skill(s) you're using and why (one short line). If you skip an obvious skill, say why.\n- Context hygiene:\n - Keep context small: summarize long sections instead of pasting them; only load extra files when needed.\n - Avoid deeply nested references; prefer one-hop files explicitly linked from `SKILL.md`.\n - When variants exist (frameworks, providers, domains), pick only the relevant reference file(s) and note that choice.\n- Safety and fallback: If a skill can't be applied cleanly (missing files, unclear instructions), state the issue, pick the next-best approach, and continue."; let expected = format!( "base doc\n\n## Skills\nThese skills are discovered at startup from multiple local sources. Each entry includes a name, description, and file path so you can open the source for full instructions.\n- pdf-processing: extract from pdfs (file: {expected_path_str})\n{usage_rules}" ); assert_eq!(res, expected); } #[tokio::test] async fn skills_render_without_project_doc() { let tmp = tempfile::tempdir().expect("tempdir"); let cfg = make_config(&tmp, 4096, None).await; create_skill(cfg.codex_home.clone(), "linting", "run clippy"); let skills = load_skills(&cfg); let res = get_user_instructions( &cfg, skills.errors.is_empty().then_some(skills.skills.as_slice()), ) .await .expect("instructions expected"); let expected_path = dunce::canonicalize(cfg.codex_home.join("skills/linting/SKILL.md").as_path()) .unwrap_or_else(|_| cfg.codex_home.join("skills/linting/SKILL.md")); let expected_path_str = expected_path.to_string_lossy().replace('\\', "/"); let usage_rules = "- Discovery: Available skills are listed in project docs and may also appear in a runtime \"## Skills\" section (name + description + file path). These are the sources of truth; skill bodies live on disk at the listed paths.\n- Trigger rules: If the user names a skill (with `$SkillName` or plain text) OR the task clearly matches a skill's description, you must use that skill for that turn. Multiple mentions mean use them all. Do not carry skills across turns unless re-mentioned.\n- Missing/blocked: If a named skill isn't in the list or the path can't be read, say so briefly and continue with the best fallback.\n- How to use a skill (progressive disclosure):\n 1) After deciding to use a skill, open its `SKILL.md`. Read only enough to follow the workflow.\n 2) If `SKILL.md` points to extra folders such as `references/`, load only the specific files needed for the request; don't bulk-load everything.\n 3) If `scripts/` exist, prefer running or patching them instead of retyping large code blocks.\n 4) If `assets/` or templates exist, reuse them instead of recreating from scratch.\n- Description as trigger: The YAML `description` in `SKILL.md` is the primary trigger signal; rely on it to decide applicability. If unsure, ask a brief clarification before proceeding.\n- Coordination and sequencing:\n - If multiple skills apply, choose the minimal set that covers the request and state the order you'll use them.\n - Announce which skill(s) you're using and why (one short line). If you skip an obvious skill, say why.\n- Context hygiene:\n - Keep context small: summarize long sections instead of pasting them; only load extra files when needed.\n - Avoid deeply nested references; prefer one-hop files explicitly linked from `SKILL.md`.\n - When variants exist (frameworks, providers, domains), pick only the relevant reference file(s) and note that choice.\n- Safety and fallback: If a skill can't be applied cleanly (missing files, unclear instructions), state the issue, pick the next-best approach, and continue."; let expected = format!( "## Skills\nThese skills are discovered at startup from multiple local sources. Each entry includes a name, description, and file path so you can open the source for full instructions.\n- linting: run clippy (file: {expected_path_str})\n{usage_rules}" ); assert_eq!(res, expected); } fn create_skill(codex_home: PathBuf, name: &str, description: &str) { let skill_dir = codex_home.join(format!("skills/{name}")); fs::create_dir_all(&skill_dir).unwrap(); let content = format!("---\nname: {name}\ndescription: {description}\n---\n\n# Body\n"); fs::write(skill_dir.join("SKILL.md"), content).unwrap(); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/codex_delegate.rs
codex-rs/core/src/codex_delegate.rs
use std::sync::Arc; use std::sync::atomic::AtomicU64; use async_channel::Receiver; use async_channel::Sender; use codex_async_utils::OrCancelExt; use codex_protocol::protocol::ApplyPatchApprovalRequestEvent; use codex_protocol::protocol::Event; use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::ExecApprovalRequestEvent; use codex_protocol::protocol::Op; use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::SubAgentSource; use codex_protocol::protocol::Submission; use codex_protocol::user_input::UserInput; use std::time::Duration; use tokio::time::timeout; use tokio_util::sync::CancellationToken; use crate::AuthManager; use crate::codex::Codex; use crate::codex::CodexSpawnOk; use crate::codex::SUBMISSION_CHANNEL_CAPACITY; use crate::codex::Session; use crate::codex::TurnContext; use crate::config::Config; use crate::error::CodexErr; use crate::models_manager::manager::ModelsManager; use codex_protocol::protocol::InitialHistory; /// Start an interactive sub-Codex conversation and return IO channels. /// /// The returned `events_rx` yields non-approval events emitted by the sub-agent. /// Approval requests are handled via `parent_session` and are not surfaced. /// The returned `ops_tx` allows the caller to submit additional `Op`s to the sub-agent. pub(crate) async fn run_codex_conversation_interactive( config: Config, auth_manager: Arc<AuthManager>, models_manager: Arc<ModelsManager>, parent_session: Arc<Session>, parent_ctx: Arc<TurnContext>, cancel_token: CancellationToken, initial_history: Option<InitialHistory>, ) -> Result<Codex, CodexErr> { let (tx_sub, rx_sub) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY); let (tx_ops, rx_ops) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY); let CodexSpawnOk { codex, .. } = Codex::spawn( config, auth_manager, models_manager, Arc::clone(&parent_session.services.skills_manager), initial_history.unwrap_or(InitialHistory::New), SessionSource::SubAgent(SubAgentSource::Review), ) .await?; let codex = Arc::new(codex); // Use a child token so parent cancel cascades but we can scope it to this task let cancel_token_events = cancel_token.child_token(); let cancel_token_ops = cancel_token.child_token(); // Forward events from the sub-agent to the consumer, filtering approvals and // routing them to the parent session for decisions. let parent_session_clone = Arc::clone(&parent_session); let parent_ctx_clone = Arc::clone(&parent_ctx); let codex_for_events = Arc::clone(&codex); tokio::spawn(async move { forward_events( codex_for_events, tx_sub, parent_session_clone, parent_ctx_clone, cancel_token_events, ) .await; }); // Forward ops from the caller to the sub-agent. let codex_for_ops = Arc::clone(&codex); tokio::spawn(async move { forward_ops(codex_for_ops, rx_ops, cancel_token_ops).await; }); Ok(Codex { next_id: AtomicU64::new(0), tx_sub: tx_ops, rx_event: rx_sub, }) } /// Convenience wrapper for one-time use with an initial prompt. /// /// Internally calls the interactive variant, then immediately submits the provided input. #[allow(clippy::too_many_arguments)] pub(crate) async fn run_codex_conversation_one_shot( config: Config, auth_manager: Arc<AuthManager>, models_manager: Arc<ModelsManager>, input: Vec<UserInput>, parent_session: Arc<Session>, parent_ctx: Arc<TurnContext>, cancel_token: CancellationToken, initial_history: Option<InitialHistory>, ) -> Result<Codex, CodexErr> { // Use a child token so we can stop the delegate after completion without // requiring the caller to cancel the parent token. let child_cancel = cancel_token.child_token(); let io = run_codex_conversation_interactive( config, auth_manager, models_manager, parent_session, parent_ctx, child_cancel.clone(), initial_history, ) .await?; // Send the initial input to kick off the one-shot turn. io.submit(Op::UserInput { items: input }).await?; // Bridge events so we can observe completion and shut down automatically. let (tx_bridge, rx_bridge) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY); let ops_tx = io.tx_sub.clone(); let io_for_bridge = io; tokio::spawn(async move { while let Ok(event) = io_for_bridge.next_event().await { let should_shutdown = matches!( event.msg, EventMsg::TaskComplete(_) | EventMsg::TurnAborted(_) ); let _ = tx_bridge.send(event).await; if should_shutdown { let _ = ops_tx .send(Submission { id: "shutdown".to_string(), op: Op::Shutdown {}, }) .await; child_cancel.cancel(); break; } } }); // For one-shot usage, return a closed `tx_sub` so callers cannot submit // additional ops after the initial request. Create a channel and drop the // receiver to close it immediately. let (tx_closed, rx_closed) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY); drop(rx_closed); Ok(Codex { next_id: AtomicU64::new(0), rx_event: rx_bridge, tx_sub: tx_closed, }) } async fn forward_events( codex: Arc<Codex>, tx_sub: Sender<Event>, parent_session: Arc<Session>, parent_ctx: Arc<TurnContext>, cancel_token: CancellationToken, ) { let cancelled = cancel_token.cancelled(); tokio::pin!(cancelled); loop { tokio::select! { _ = &mut cancelled => { shutdown_delegate(&codex).await; break; } event = codex.next_event() => { let event = match event { Ok(event) => event, Err(_) => break, }; match event { // ignore all legacy delta events Event { id: _, msg: EventMsg::AgentMessageDelta(_) | EventMsg::AgentReasoningDelta(_), } => {} Event { id: _, msg: EventMsg::TokenCount(_), } => {} Event { id: _, msg: EventMsg::SessionConfigured(_), } => {} Event { id, msg: EventMsg::ExecApprovalRequest(event), } => { // Initiate approval via parent session; do not surface to consumer. handle_exec_approval( &codex, id, &parent_session, &parent_ctx, event, &cancel_token, ) .await; } Event { id, msg: EventMsg::ApplyPatchApprovalRequest(event), } => { handle_patch_approval( &codex, id, &parent_session, &parent_ctx, event, &cancel_token, ) .await; } other => { match tx_sub.send(other).or_cancel(&cancel_token).await { Ok(Ok(())) => {} _ => { shutdown_delegate(&codex).await; break; } } } } } } } } /// Ask the delegate to stop and drain its events so background sends do not hit a closed channel. async fn shutdown_delegate(codex: &Codex) { let _ = codex.submit(Op::Interrupt).await; let _ = codex.submit(Op::Shutdown {}).await; let _ = timeout(Duration::from_millis(500), async { while let Ok(event) = codex.next_event().await { if matches!( event.msg, EventMsg::TurnAborted(_) | EventMsg::TaskComplete(_) ) { break; } } }) .await; } /// Forward ops from a caller to a sub-agent, respecting cancellation. async fn forward_ops( codex: Arc<Codex>, rx_ops: Receiver<Submission>, cancel_token_ops: CancellationToken, ) { loop { let op: Op = match rx_ops.recv().or_cancel(&cancel_token_ops).await { Ok(Ok(Submission { id: _, op })) => op, Ok(Err(_)) | Err(_) => break, }; let _ = codex.submit(op).await; } } /// Handle an ExecApprovalRequest by consulting the parent session and replying. async fn handle_exec_approval( codex: &Codex, id: String, parent_session: &Session, parent_ctx: &TurnContext, event: ExecApprovalRequestEvent, cancel_token: &CancellationToken, ) { // Race approval with cancellation and timeout to avoid hangs. let approval_fut = parent_session.request_command_approval( parent_ctx, parent_ctx.sub_id.clone(), event.command, event.cwd, event.reason, event.proposed_execpolicy_amendment, ); let decision = await_approval_with_cancel( approval_fut, parent_session, &parent_ctx.sub_id, cancel_token, ) .await; let _ = codex.submit(Op::ExecApproval { id, decision }).await; } /// Handle an ApplyPatchApprovalRequest by consulting the parent session and replying. async fn handle_patch_approval( codex: &Codex, id: String, parent_session: &Session, parent_ctx: &TurnContext, event: ApplyPatchApprovalRequestEvent, cancel_token: &CancellationToken, ) { let decision_rx = parent_session .request_patch_approval( parent_ctx, parent_ctx.sub_id.clone(), event.changes, event.reason, event.grant_root, ) .await; let decision = await_approval_with_cancel( async move { decision_rx.await.unwrap_or_default() }, parent_session, &parent_ctx.sub_id, cancel_token, ) .await; let _ = codex.submit(Op::PatchApproval { id, decision }).await; } /// Await an approval decision, aborting on cancellation. async fn await_approval_with_cancel<F>( fut: F, parent_session: &Session, sub_id: &str, cancel_token: &CancellationToken, ) -> codex_protocol::protocol::ReviewDecision where F: core::future::Future<Output = codex_protocol::protocol::ReviewDecision>, { tokio::select! { biased; _ = cancel_token.cancelled() => { parent_session .notify_approval(sub_id, codex_protocol::protocol::ReviewDecision::Abort) .await; codex_protocol::protocol::ReviewDecision::Abort } decision = fut => { decision } } } #[cfg(test)] mod tests { use super::*; use async_channel::bounded; use codex_protocol::models::ResponseItem; use codex_protocol::protocol::RawResponseItemEvent; use codex_protocol::protocol::TurnAbortReason; use codex_protocol::protocol::TurnAbortedEvent; use pretty_assertions::assert_eq; #[tokio::test] async fn forward_events_cancelled_while_send_blocked_shuts_down_delegate() { let (tx_events, rx_events) = bounded(1); let (tx_sub, rx_sub) = bounded(SUBMISSION_CHANNEL_CAPACITY); let codex = Arc::new(Codex { next_id: AtomicU64::new(0), tx_sub, rx_event: rx_events, }); let (session, ctx, _rx_evt) = crate::codex::make_session_and_context_with_rx().await; let (tx_out, rx_out) = bounded(1); tx_out .send(Event { id: "full".to_string(), msg: EventMsg::TurnAborted(TurnAbortedEvent { reason: TurnAbortReason::Interrupted, }), }) .await .unwrap(); let cancel = CancellationToken::new(); let forward = tokio::spawn(forward_events( Arc::clone(&codex), tx_out.clone(), session, ctx, cancel.clone(), )); tx_events .send(Event { id: "evt".to_string(), msg: EventMsg::RawResponseItem(RawResponseItemEvent { item: ResponseItem::CustomToolCall { id: None, status: None, call_id: "call-1".to_string(), name: "tool".to_string(), input: "{}".to_string(), }, }), }) .await .unwrap(); drop(tx_events); cancel.cancel(); timeout(std::time::Duration::from_millis(1000), forward) .await .expect("forward_events hung") .expect("forward_events join error"); let received = rx_out.recv().await.expect("prefilled event missing"); assert_eq!("full", received.id); let mut ops = Vec::new(); while let Ok(sub) = rx_sub.try_recv() { ops.push(sub.op); } assert!( ops.iter().any(|op| matches!(op, Op::Interrupt)), "expected Interrupt op after cancellation" ); assert!( ops.iter().any(|op| matches!(op, Op::Shutdown)), "expected Shutdown op after cancellation" ); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/exec_env.rs
codex-rs/core/src/exec_env.rs
use crate::config::types::EnvironmentVariablePattern; use crate::config::types::ShellEnvironmentPolicy; use crate::config::types::ShellEnvironmentPolicyInherit; use std::collections::HashMap; use std::collections::HashSet; /// Construct an environment map based on the rules in the specified policy. The /// resulting map can be passed directly to `Command::envs()` after calling /// `env_clear()` to ensure no unintended variables are leaked to the spawned /// process. /// /// The derivation follows the algorithm documented in the struct-level comment /// for [`ShellEnvironmentPolicy`]. pub fn create_env(policy: &ShellEnvironmentPolicy) -> HashMap<String, String> { populate_env(std::env::vars(), policy) } fn populate_env<I>(vars: I, policy: &ShellEnvironmentPolicy) -> HashMap<String, String> where I: IntoIterator<Item = (String, String)>, { // Step 1 – determine the starting set of variables based on the // `inherit` strategy. let mut env_map: HashMap<String, String> = match policy.inherit { ShellEnvironmentPolicyInherit::All => vars.into_iter().collect(), ShellEnvironmentPolicyInherit::None => HashMap::new(), ShellEnvironmentPolicyInherit::Core => { const CORE_VARS: &[&str] = &[ "HOME", "LOGNAME", "PATH", "SHELL", "USER", "USERNAME", "TMPDIR", "TEMP", "TMP", ]; let allow: HashSet<&str> = CORE_VARS.iter().copied().collect(); vars.into_iter() .filter(|(k, _)| allow.contains(k.as_str())) .collect() } }; // Internal helper – does `name` match **any** pattern in `patterns`? let matches_any = |name: &str, patterns: &[EnvironmentVariablePattern]| -> bool { patterns.iter().any(|pattern| pattern.matches(name)) }; // Step 2 – Apply the default exclude if not disabled. if !policy.ignore_default_excludes { let default_excludes = vec![ EnvironmentVariablePattern::new_case_insensitive("*KEY*"), EnvironmentVariablePattern::new_case_insensitive("*SECRET*"), EnvironmentVariablePattern::new_case_insensitive("*TOKEN*"), ]; env_map.retain(|k, _| !matches_any(k, &default_excludes)); } // Step 3 – Apply custom excludes. if !policy.exclude.is_empty() { env_map.retain(|k, _| !matches_any(k, &policy.exclude)); } // Step 4 – Apply user-provided overrides. for (key, val) in &policy.r#set { env_map.insert(key.clone(), val.clone()); } // Step 5 – If include_only is non-empty, keep *only* the matching vars. if !policy.include_only.is_empty() { env_map.retain(|k, _| matches_any(k, &policy.include_only)); } env_map } #[cfg(test)] mod tests { use super::*; use crate::config::types::ShellEnvironmentPolicyInherit; use maplit::hashmap; fn make_vars(pairs: &[(&str, &str)]) -> Vec<(String, String)> { pairs .iter() .map(|(k, v)| (k.to_string(), v.to_string())) .collect() } #[test] fn test_core_inherit_defaults_keep_sensitive_vars() { let vars = make_vars(&[ ("PATH", "/usr/bin"), ("HOME", "/home/user"), ("API_KEY", "secret"), ("SECRET_TOKEN", "t"), ]); let policy = ShellEnvironmentPolicy::default(); // inherit All, default excludes ignored let result = populate_env(vars, &policy); let expected: HashMap<String, String> = hashmap! { "PATH".to_string() => "/usr/bin".to_string(), "HOME".to_string() => "/home/user".to_string(), "API_KEY".to_string() => "secret".to_string(), "SECRET_TOKEN".to_string() => "t".to_string(), }; assert_eq!(result, expected); } #[test] fn test_core_inherit_with_default_excludes_enabled() { let vars = make_vars(&[ ("PATH", "/usr/bin"), ("HOME", "/home/user"), ("API_KEY", "secret"), ("SECRET_TOKEN", "t"), ]); let policy = ShellEnvironmentPolicy { ignore_default_excludes: false, // apply KEY/SECRET/TOKEN filter ..Default::default() }; let result = populate_env(vars, &policy); let expected: HashMap<String, String> = hashmap! { "PATH".to_string() => "/usr/bin".to_string(), "HOME".to_string() => "/home/user".to_string(), }; assert_eq!(result, expected); } #[test] fn test_include_only() { let vars = make_vars(&[("PATH", "/usr/bin"), ("FOO", "bar")]); let policy = ShellEnvironmentPolicy { // skip default excludes so nothing is removed prematurely ignore_default_excludes: true, include_only: vec![EnvironmentVariablePattern::new_case_insensitive("*PATH")], ..Default::default() }; let result = populate_env(vars, &policy); let expected: HashMap<String, String> = hashmap! { "PATH".to_string() => "/usr/bin".to_string(), }; assert_eq!(result, expected); } #[test] fn test_set_overrides() { let vars = make_vars(&[("PATH", "/usr/bin")]); let mut policy = ShellEnvironmentPolicy { ignore_default_excludes: true, ..Default::default() }; policy.r#set.insert("NEW_VAR".to_string(), "42".to_string()); let result = populate_env(vars, &policy); let expected: HashMap<String, String> = hashmap! { "PATH".to_string() => "/usr/bin".to_string(), "NEW_VAR".to_string() => "42".to_string(), }; assert_eq!(result, expected); } #[test] fn test_inherit_all() { let vars = make_vars(&[("PATH", "/usr/bin"), ("FOO", "bar")]); let policy = ShellEnvironmentPolicy { inherit: ShellEnvironmentPolicyInherit::All, ignore_default_excludes: true, // keep everything ..Default::default() }; let result = populate_env(vars.clone(), &policy); let expected: HashMap<String, String> = vars.into_iter().collect(); assert_eq!(result, expected); } #[test] fn test_inherit_all_with_default_excludes() { let vars = make_vars(&[("PATH", "/usr/bin"), ("API_KEY", "secret")]); let policy = ShellEnvironmentPolicy { inherit: ShellEnvironmentPolicyInherit::All, ignore_default_excludes: false, ..Default::default() }; let result = populate_env(vars, &policy); let expected: HashMap<String, String> = hashmap! { "PATH".to_string() => "/usr/bin".to_string(), }; assert_eq!(result, expected); } #[test] fn test_inherit_none() { let vars = make_vars(&[("PATH", "/usr/bin"), ("HOME", "/home")]); let mut policy = ShellEnvironmentPolicy { inherit: ShellEnvironmentPolicyInherit::None, ignore_default_excludes: true, ..Default::default() }; policy .r#set .insert("ONLY_VAR".to_string(), "yes".to_string()); let result = populate_env(vars, &policy); let expected: HashMap<String, String> = hashmap! { "ONLY_VAR".to_string() => "yes".to_string(), }; assert_eq!(result, expected); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/mcp_connection_manager.rs
codex-rs/core/src/mcp_connection_manager.rs
//! Connection manager for Model Context Protocol (MCP) servers. //! //! The [`McpConnectionManager`] owns one [`codex_rmcp_client::RmcpClient`] per //! configured server (keyed by the *server name*). It offers convenience //! helpers to query the available tools across *all* servers and returns them //! in a single aggregated map using the fully-qualified tool name //! `"<server><MCP_TOOL_NAME_DELIMITER><tool>"` as the key. use std::collections::HashMap; use std::collections::HashSet; use std::env; use std::ffi::OsString; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; use crate::mcp::auth::McpAuthStatusEntry; use anyhow::Context; use anyhow::Result; use anyhow::anyhow; use async_channel::Sender; use codex_async_utils::CancelErr; use codex_async_utils::OrCancelExt; use codex_protocol::approvals::ElicitationRequestEvent; use codex_protocol::protocol::Event; use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::McpStartupCompleteEvent; use codex_protocol::protocol::McpStartupFailure; use codex_protocol::protocol::McpStartupStatus; use codex_protocol::protocol::McpStartupUpdateEvent; use codex_protocol::protocol::SandboxPolicy; use codex_rmcp_client::ElicitationResponse; use codex_rmcp_client::OAuthCredentialsStoreMode; use codex_rmcp_client::RmcpClient; use codex_rmcp_client::SendElicitation; use futures::future::BoxFuture; use futures::future::FutureExt; use futures::future::Shared; use mcp_types::ClientCapabilities; use mcp_types::Implementation; use mcp_types::ListResourceTemplatesRequestParams; use mcp_types::ListResourceTemplatesResult; use mcp_types::ListResourcesRequestParams; use mcp_types::ListResourcesResult; use mcp_types::ReadResourceRequestParams; use mcp_types::ReadResourceResult; use mcp_types::RequestId; use mcp_types::Resource; use mcp_types::ResourceTemplate; use mcp_types::Tool; use serde::Deserialize; use serde::Serialize; use serde_json::json; use sha1::Digest; use sha1::Sha1; use tokio::sync::Mutex; use tokio::sync::oneshot; use tokio::task::JoinSet; use tokio_util::sync::CancellationToken; use tracing::instrument; use tracing::warn; use crate::codex::INITIAL_SUBMIT_ID; use crate::config::types::McpServerConfig; use crate::config::types::McpServerTransportConfig; /// Delimiter used to separate the server name from the tool name in a fully /// qualified tool name. /// /// OpenAI requires tool names to conform to `^[a-zA-Z0-9_-]+$`, so we must /// choose a delimiter from this character set. const MCP_TOOL_NAME_DELIMITER: &str = "__"; const MAX_TOOL_NAME_LENGTH: usize = 64; /// Default timeout for initializing MCP server & initially listing tools. pub const DEFAULT_STARTUP_TIMEOUT: Duration = Duration::from_secs(10); /// Default timeout for individual tool calls. const DEFAULT_TOOL_TIMEOUT: Duration = Duration::from_secs(60); fn qualify_tools<I>(tools: I) -> HashMap<String, ToolInfo> where I: IntoIterator<Item = ToolInfo>, { let mut used_names = HashSet::new(); let mut qualified_tools = HashMap::new(); for tool in tools { let mut qualified_name = format!( "mcp{}{}{}{}", MCP_TOOL_NAME_DELIMITER, tool.server_name, MCP_TOOL_NAME_DELIMITER, tool.tool_name ); if qualified_name.len() > MAX_TOOL_NAME_LENGTH { let mut hasher = Sha1::new(); hasher.update(qualified_name.as_bytes()); let sha1 = hasher.finalize(); let sha1_str = format!("{sha1:x}"); // Truncate to make room for the hash suffix let prefix_len = MAX_TOOL_NAME_LENGTH - sha1_str.len(); qualified_name = format!("{}{}", &qualified_name[..prefix_len], sha1_str); } if used_names.contains(&qualified_name) { warn!("skipping duplicated tool {}", qualified_name); continue; } used_names.insert(qualified_name.clone()); qualified_tools.insert(qualified_name, tool); } qualified_tools } #[derive(Clone)] pub(crate) struct ToolInfo { pub(crate) server_name: String, pub(crate) tool_name: String, pub(crate) tool: Tool, } type ResponderMap = HashMap<(String, RequestId), oneshot::Sender<ElicitationResponse>>; #[derive(Clone, Default)] struct ElicitationRequestManager { requests: Arc<Mutex<ResponderMap>>, } impl ElicitationRequestManager { async fn resolve( &self, server_name: String, id: RequestId, response: ElicitationResponse, ) -> Result<()> { self.requests .lock() .await .remove(&(server_name, id)) .ok_or_else(|| anyhow!("elicitation request not found"))? .send(response) .map_err(|e| anyhow!("failed to send elicitation response: {e:?}")) } fn make_sender(&self, server_name: String, tx_event: Sender<Event>) -> SendElicitation { let elicitation_requests = self.requests.clone(); Box::new(move |id, elicitation| { let elicitation_requests = elicitation_requests.clone(); let tx_event = tx_event.clone(); let server_name = server_name.clone(); async move { let (tx, rx) = oneshot::channel(); { let mut lock = elicitation_requests.lock().await; lock.insert((server_name.clone(), id.clone()), tx); } let _ = tx_event .send(Event { id: "mcp_elicitation_request".to_string(), msg: EventMsg::ElicitationRequest(ElicitationRequestEvent { server_name, id, message: elicitation.message, }), }) .await; rx.await .context("elicitation request channel closed unexpectedly") } .boxed() }) } } #[derive(Clone)] struct ManagedClient { client: Arc<RmcpClient>, tools: Vec<ToolInfo>, tool_filter: ToolFilter, tool_timeout: Option<Duration>, server_supports_sandbox_state_capability: bool, } impl ManagedClient { /// Returns once the server has ack'd the sandbox state update. async fn notify_sandbox_state_change(&self, sandbox_state: &SandboxState) -> Result<()> { if !self.server_supports_sandbox_state_capability { return Ok(()); } let _response = self .client .send_custom_request( MCP_SANDBOX_STATE_METHOD, Some(serde_json::to_value(sandbox_state)?), ) .await?; Ok(()) } } #[derive(Clone)] struct AsyncManagedClient { client: Shared<BoxFuture<'static, Result<ManagedClient, StartupOutcomeError>>>, } impl AsyncManagedClient { fn new( server_name: String, config: McpServerConfig, store_mode: OAuthCredentialsStoreMode, cancel_token: CancellationToken, tx_event: Sender<Event>, elicitation_requests: ElicitationRequestManager, ) -> Self { let tool_filter = ToolFilter::from_config(&config); let fut = async move { if let Err(error) = validate_mcp_server_name(&server_name) { return Err(error.into()); } let client = Arc::new(make_rmcp_client(&server_name, config.transport, store_mode).await?); match start_server_task( server_name, client, config.startup_timeout_sec.or(Some(DEFAULT_STARTUP_TIMEOUT)), config.tool_timeout_sec.unwrap_or(DEFAULT_TOOL_TIMEOUT), tool_filter, tx_event, elicitation_requests, ) .or_cancel(&cancel_token) .await { Ok(result) => result, Err(CancelErr::Cancelled) => Err(StartupOutcomeError::Cancelled), } }; Self { client: fut.boxed().shared(), } } async fn client(&self) -> Result<ManagedClient, StartupOutcomeError> { self.client.clone().await } async fn notify_sandbox_state_change(&self, sandbox_state: &SandboxState) -> Result<()> { let managed = self.client().await?; managed.notify_sandbox_state_change(sandbox_state).await } } pub const MCP_SANDBOX_STATE_CAPABILITY: &str = "codex/sandbox-state"; /// Custom MCP request to push sandbox state updates. /// When used, the `params` field of the notification is [`SandboxState`]. pub const MCP_SANDBOX_STATE_METHOD: &str = "codex/sandbox-state/update"; #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct SandboxState { pub sandbox_policy: SandboxPolicy, pub codex_linux_sandbox_exe: Option<PathBuf>, pub sandbox_cwd: PathBuf, } /// A thin wrapper around a set of running [`RmcpClient`] instances. #[derive(Default)] pub(crate) struct McpConnectionManager { clients: HashMap<String, AsyncManagedClient>, elicitation_requests: ElicitationRequestManager, } impl McpConnectionManager { pub async fn initialize( &mut self, mcp_servers: HashMap<String, McpServerConfig>, store_mode: OAuthCredentialsStoreMode, auth_entries: HashMap<String, McpAuthStatusEntry>, tx_event: Sender<Event>, cancel_token: CancellationToken, initial_sandbox_state: SandboxState, ) { if cancel_token.is_cancelled() { return; } let mut clients = HashMap::new(); let mut join_set = JoinSet::new(); let elicitation_requests = ElicitationRequestManager::default(); for (server_name, cfg) in mcp_servers.into_iter().filter(|(_, cfg)| cfg.enabled) { let cancel_token = cancel_token.child_token(); let _ = emit_update( &tx_event, McpStartupUpdateEvent { server: server_name.clone(), status: McpStartupStatus::Starting, }, ) .await; let async_managed_client = AsyncManagedClient::new( server_name.clone(), cfg, store_mode, cancel_token.clone(), tx_event.clone(), elicitation_requests.clone(), ); clients.insert(server_name.clone(), async_managed_client.clone()); let tx_event = tx_event.clone(); let auth_entry = auth_entries.get(&server_name).cloned(); let sandbox_state = initial_sandbox_state.clone(); join_set.spawn(async move { let outcome = async_managed_client.client().await; if cancel_token.is_cancelled() { return (server_name, Err(StartupOutcomeError::Cancelled)); } let status = match &outcome { Ok(_) => { // Send sandbox state notification immediately after Ready if let Err(e) = async_managed_client .notify_sandbox_state_change(&sandbox_state) .await { warn!( "Failed to notify sandbox state to MCP server {server_name}: {e:#}", ); } McpStartupStatus::Ready } Err(error) => { let error_str = mcp_init_error_display( server_name.as_str(), auth_entry.as_ref(), error, ); McpStartupStatus::Failed { error: error_str } } }; let _ = emit_update( &tx_event, McpStartupUpdateEvent { server: server_name.clone(), status, }, ) .await; (server_name, outcome) }); } self.clients = clients; self.elicitation_requests = elicitation_requests.clone(); tokio::spawn(async move { let outcomes = join_set.join_all().await; let mut summary = McpStartupCompleteEvent::default(); for (server_name, outcome) in outcomes { match outcome { Ok(_) => summary.ready.push(server_name), Err(StartupOutcomeError::Cancelled) => summary.cancelled.push(server_name), Err(StartupOutcomeError::Failed { error }) => { summary.failed.push(McpStartupFailure { server: server_name, error, }) } } } let _ = tx_event .send(Event { id: INITIAL_SUBMIT_ID.to_owned(), msg: EventMsg::McpStartupComplete(summary), }) .await; }); } async fn client_by_name(&self, name: &str) -> Result<ManagedClient> { self.clients .get(name) .ok_or_else(|| anyhow!("unknown MCP server '{name}'"))? .client() .await .context("failed to get client") } pub async fn resolve_elicitation( &self, server_name: String, id: RequestId, response: ElicitationResponse, ) -> Result<()> { self.elicitation_requests .resolve(server_name, id, response) .await } /// Returns a single map that contains all tools. Each key is the /// fully-qualified name for the tool. #[instrument(level = "trace", skip_all)] pub async fn list_all_tools(&self) -> HashMap<String, ToolInfo> { let mut tools = HashMap::new(); for managed_client in self.clients.values() { if let Ok(client) = managed_client.client().await { tools.extend(qualify_tools(filter_tools( client.tools, client.tool_filter, ))); } } tools } /// Returns a single map that contains all resources. Each key is the /// server name and the value is a vector of resources. pub async fn list_all_resources(&self) -> HashMap<String, Vec<Resource>> { let mut join_set = JoinSet::new(); let clients_snapshot = &self.clients; for (server_name, async_managed_client) in clients_snapshot { let server_name = server_name.clone(); let Ok(managed_client) = async_managed_client.client().await else { continue; }; let timeout = managed_client.tool_timeout; let client = managed_client.client.clone(); join_set.spawn(async move { let mut collected: Vec<Resource> = Vec::new(); let mut cursor: Option<String> = None; loop { let params = cursor.as_ref().map(|next| ListResourcesRequestParams { cursor: Some(next.clone()), }); let response = match client.list_resources(params, timeout).await { Ok(result) => result, Err(err) => return (server_name, Err(err)), }; collected.extend(response.resources); match response.next_cursor { Some(next) => { if cursor.as_ref() == Some(&next) { return ( server_name, Err(anyhow!("resources/list returned duplicate cursor")), ); } cursor = Some(next); } None => return (server_name, Ok(collected)), } } }); } let mut aggregated: HashMap<String, Vec<Resource>> = HashMap::new(); while let Some(join_res) = join_set.join_next().await { match join_res { Ok((server_name, Ok(resources))) => { aggregated.insert(server_name, resources); } Ok((server_name, Err(err))) => { warn!("Failed to list resources for MCP server '{server_name}': {err:#}"); } Err(err) => { warn!("Task panic when listing resources for MCP server: {err:#}"); } } } aggregated } /// Returns a single map that contains all resource templates. Each key is the /// server name and the value is a vector of resource templates. pub async fn list_all_resource_templates(&self) -> HashMap<String, Vec<ResourceTemplate>> { let mut join_set = JoinSet::new(); let clients_snapshot = &self.clients; for (server_name, async_managed_client) in clients_snapshot { let server_name_cloned = server_name.clone(); let Ok(managed_client) = async_managed_client.client().await else { continue; }; let client = managed_client.client.clone(); let timeout = managed_client.tool_timeout; join_set.spawn(async move { let mut collected: Vec<ResourceTemplate> = Vec::new(); let mut cursor: Option<String> = None; loop { let params = cursor .as_ref() .map(|next| ListResourceTemplatesRequestParams { cursor: Some(next.clone()), }); let response = match client.list_resource_templates(params, timeout).await { Ok(result) => result, Err(err) => return (server_name_cloned, Err(err)), }; collected.extend(response.resource_templates); match response.next_cursor { Some(next) => { if cursor.as_ref() == Some(&next) { return ( server_name_cloned, Err(anyhow!( "resources/templates/list returned duplicate cursor" )), ); } cursor = Some(next); } None => return (server_name_cloned, Ok(collected)), } } }); } let mut aggregated: HashMap<String, Vec<ResourceTemplate>> = HashMap::new(); while let Some(join_res) = join_set.join_next().await { match join_res { Ok((server_name, Ok(templates))) => { aggregated.insert(server_name, templates); } Ok((server_name, Err(err))) => { warn!( "Failed to list resource templates for MCP server '{server_name}': {err:#}" ); } Err(err) => { warn!("Task panic when listing resource templates for MCP server: {err:#}"); } } } aggregated } /// Invoke the tool indicated by the (server, tool) pair. pub async fn call_tool( &self, server: &str, tool: &str, arguments: Option<serde_json::Value>, ) -> Result<mcp_types::CallToolResult> { let client = self.client_by_name(server).await?; if !client.tool_filter.allows(tool) { return Err(anyhow!( "tool '{tool}' is disabled for MCP server '{server}'" )); } client .client .call_tool(tool.to_string(), arguments, client.tool_timeout) .await .with_context(|| format!("tool call failed for `{server}/{tool}`")) } /// List resources from the specified server. pub async fn list_resources( &self, server: &str, params: Option<ListResourcesRequestParams>, ) -> Result<ListResourcesResult> { let managed = self.client_by_name(server).await?; let timeout = managed.tool_timeout; managed .client .list_resources(params, timeout) .await .with_context(|| format!("resources/list failed for `{server}`")) } /// List resource templates from the specified server. pub async fn list_resource_templates( &self, server: &str, params: Option<ListResourceTemplatesRequestParams>, ) -> Result<ListResourceTemplatesResult> { let managed = self.client_by_name(server).await?; let client = managed.client.clone(); let timeout = managed.tool_timeout; client .list_resource_templates(params, timeout) .await .with_context(|| format!("resources/templates/list failed for `{server}`")) } /// Read a resource from the specified server. pub async fn read_resource( &self, server: &str, params: ReadResourceRequestParams, ) -> Result<ReadResourceResult> { let managed = self.client_by_name(server).await?; let client = managed.client.clone(); let timeout = managed.tool_timeout; let uri = params.uri.clone(); client .read_resource(params, timeout) .await .with_context(|| format!("resources/read failed for `{server}` ({uri})")) } pub async fn parse_tool_name(&self, tool_name: &str) -> Option<(String, String)> { self.list_all_tools() .await .get(tool_name) .map(|tool| (tool.server_name.clone(), tool.tool_name.clone())) } pub async fn notify_sandbox_state_change(&self, sandbox_state: &SandboxState) -> Result<()> { let mut join_set = JoinSet::new(); for async_managed_client in self.clients.values() { let sandbox_state = sandbox_state.clone(); let async_managed_client = async_managed_client.clone(); join_set.spawn(async move { async_managed_client .notify_sandbox_state_change(&sandbox_state) .await }); } while let Some(join_res) = join_set.join_next().await { match join_res { Ok(Ok(())) => {} Ok(Err(err)) => { warn!("Failed to notify sandbox state change to MCP server: {err:#}"); } Err(err) => { warn!("Task panic when notifying sandbox state change to MCP server: {err:#}"); } } } Ok(()) } } async fn emit_update( tx_event: &Sender<Event>, update: McpStartupUpdateEvent, ) -> Result<(), async_channel::SendError<Event>> { tx_event .send(Event { id: INITIAL_SUBMIT_ID.to_owned(), msg: EventMsg::McpStartupUpdate(update), }) .await } /// A tool is allowed to be used if both are true: /// 1. enabled is None (no allowlist is set) or the tool is explicitly enabled. /// 2. The tool is not explicitly disabled. #[derive(Default, Clone)] pub(crate) struct ToolFilter { enabled: Option<HashSet<String>>, disabled: HashSet<String>, } impl ToolFilter { fn from_config(cfg: &McpServerConfig) -> Self { let enabled = cfg .enabled_tools .as_ref() .map(|tools| tools.iter().cloned().collect::<HashSet<_>>()); let disabled = cfg .disabled_tools .as_ref() .map(|tools| tools.iter().cloned().collect::<HashSet<_>>()) .unwrap_or_default(); Self { enabled, disabled } } fn allows(&self, tool_name: &str) -> bool { if let Some(enabled) = &self.enabled && !enabled.contains(tool_name) { return false; } !self.disabled.contains(tool_name) } } fn filter_tools(tools: Vec<ToolInfo>, filter: ToolFilter) -> Vec<ToolInfo> { tools .into_iter() .filter(|tool| filter.allows(&tool.tool_name)) .collect() } fn resolve_bearer_token( server_name: &str, bearer_token_env_var: Option<&str>, ) -> Result<Option<String>> { let Some(env_var) = bearer_token_env_var else { return Ok(None); }; match env::var(env_var) { Ok(value) => { if value.is_empty() { Err(anyhow!( "Environment variable {env_var} for MCP server '{server_name}' is empty" )) } else { Ok(Some(value)) } } Err(env::VarError::NotPresent) => Err(anyhow!( "Environment variable {env_var} for MCP server '{server_name}' is not set" )), Err(env::VarError::NotUnicode(_)) => Err(anyhow!( "Environment variable {env_var} for MCP server '{server_name}' contains invalid Unicode" )), } } #[derive(Debug, Clone, thiserror::Error)] enum StartupOutcomeError { #[error("MCP startup cancelled")] Cancelled, // We can't store the original error here because anyhow::Error doesn't implement // `Clone`. #[error("MCP startup failed: {error}")] Failed { error: String }, } impl From<anyhow::Error> for StartupOutcomeError { fn from(error: anyhow::Error) -> Self { Self::Failed { error: error.to_string(), } } } async fn start_server_task( server_name: String, client: Arc<RmcpClient>, startup_timeout: Option<Duration>, // TODO: cancel_token should handle this. tool_timeout: Duration, tool_filter: ToolFilter, tx_event: Sender<Event>, elicitation_requests: ElicitationRequestManager, ) -> Result<ManagedClient, StartupOutcomeError> { let params = mcp_types::InitializeRequestParams { capabilities: ClientCapabilities { experimental: None, roots: None, sampling: None, // https://modelcontextprotocol.io/specification/2025-06-18/client/elicitation#capabilities // indicates this should be an empty object. elicitation: Some(json!({})), }, client_info: Implementation { name: "codex-mcp-client".to_owned(), version: env!("CARGO_PKG_VERSION").to_owned(), title: Some("Codex".into()), // This field is used by Codex when it is an MCP // server: it should not be used when Codex is // an MCP client. user_agent: None, }, protocol_version: mcp_types::MCP_SCHEMA_VERSION.to_owned(), }; let send_elicitation = elicitation_requests.make_sender(server_name.clone(), tx_event); let initialize_result = client .initialize(params, startup_timeout, send_elicitation) .await .map_err(StartupOutcomeError::from)?; let tools = list_tools_for_client(&server_name, &client, startup_timeout) .await .map_err(StartupOutcomeError::from)?; let server_supports_sandbox_state_capability = initialize_result .capabilities .experimental .as_ref() .and_then(|exp| exp.get(MCP_SANDBOX_STATE_CAPABILITY)) .is_some(); let managed = ManagedClient { client: Arc::clone(&client), tools, tool_timeout: Some(tool_timeout), tool_filter, server_supports_sandbox_state_capability, }; Ok(managed) } async fn make_rmcp_client( server_name: &str, transport: McpServerTransportConfig, store_mode: OAuthCredentialsStoreMode, ) -> Result<RmcpClient, StartupOutcomeError> { match transport { McpServerTransportConfig::Stdio { command, args, env, env_vars, cwd, } => { let command_os: OsString = command.into(); let args_os: Vec<OsString> = args.into_iter().map(Into::into).collect(); RmcpClient::new_stdio_client(command_os, args_os, env, &env_vars, cwd) .await .map_err(|err| StartupOutcomeError::from(anyhow!(err))) } McpServerTransportConfig::StreamableHttp { url, http_headers, env_http_headers, bearer_token_env_var, } => { let resolved_bearer_token = match resolve_bearer_token(server_name, bearer_token_env_var.as_deref()) { Ok(token) => token, Err(error) => return Err(error.into()), }; RmcpClient::new_streamable_http_client( server_name, &url, resolved_bearer_token, http_headers, env_http_headers, store_mode, ) .await .map_err(StartupOutcomeError::from) } } } async fn list_tools_for_client( server_name: &str, client: &Arc<RmcpClient>, timeout: Option<Duration>, ) -> Result<Vec<ToolInfo>> { let resp = client.list_tools(None, timeout).await?; Ok(resp .tools .into_iter() .map(|tool| ToolInfo { server_name: server_name.to_owned(), tool_name: tool.name.clone(), tool, }) .collect()) } fn validate_mcp_server_name(server_name: &str) -> Result<()> { let re = regex_lite::Regex::new(r"^[a-zA-Z0-9_-]+$")?; if !re.is_match(server_name) { return Err(anyhow!( "Invalid MCP server name '{server_name}': must match pattern {pattern}", pattern = re.as_str() )); } Ok(()) } fn mcp_init_error_display( server_name: &str, entry: Option<&McpAuthStatusEntry>, err: &StartupOutcomeError, ) -> String { if let Some(McpServerTransportConfig::StreamableHttp { url, bearer_token_env_var, http_headers, .. }) = &entry.map(|entry| &entry.config.transport) && url == "https://api.githubcopilot.com/mcp/" && bearer_token_env_var.is_none() && http_headers.as_ref().map(HashMap::is_empty).unwrap_or(true) { format!( "GitHub MCP does not support OAuth. Log in by adding a personal access token (https://github.com/settings/personal-access-tokens) to your environment and config.toml:\n[mcp_servers.{server_name}]\nbearer_token_env_var = CODEX_GITHUB_PERSONAL_ACCESS_TOKEN" ) } else if is_mcp_client_auth_required_error(err) { format!( "The {server_name} MCP server is not logged in. Run `codex mcp login {server_name}`." ) } else if is_mcp_client_startup_timeout_error(err) { let startup_timeout_secs = match entry { Some(entry) => match entry.config.startup_timeout_sec { Some(timeout) => timeout, None => DEFAULT_STARTUP_TIMEOUT, }, None => DEFAULT_STARTUP_TIMEOUT, } .as_secs(); format!( "MCP client for `{server_name}` timed out after {startup_timeout_secs} seconds. Add or adjust `startup_timeout_sec` in your config.toml:\n[mcp_servers.{server_name}]\nstartup_timeout_sec = XX" ) } else { format!("MCP client for `{server_name}` failed to start: {err:#}") } } fn is_mcp_client_auth_required_error(error: &StartupOutcomeError) -> bool { match error { StartupOutcomeError::Failed { error } => error.contains("Auth required"), _ => false, } } fn is_mcp_client_startup_timeout_error(error: &StartupOutcomeError) -> bool { match error { StartupOutcomeError::Failed { error } => { error.contains("request timed out") || error.contains("timed out handshaking with MCP server") } _ => false, } } #[cfg(test)] mod mcp_init_error_display_tests {} #[cfg(test)] mod tests { use super::*;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/lib.rs
codex-rs/core/src/lib.rs
//! Root of the `codex-core` library. // Prevent accidental direct writes to stdout/stderr in library code. All // user-visible output must go through the appropriate abstraction (e.g., // the TUI or the tracing stack). #![deny(clippy::print_stdout, clippy::print_stderr)] pub mod api_bridge; mod apply_patch; pub mod auth; pub mod bash; mod client; mod client_common; pub mod codex; mod codex_conversation; mod compact_remote; pub use codex_conversation::CodexConversation; mod codex_delegate; mod command_safety; pub mod config; pub mod config_loader; mod context_manager; pub mod custom_prompts; pub mod env; mod environment_context; pub mod error; pub mod exec; pub mod exec_env; mod exec_policy; pub mod features; mod flags; pub mod git_info; pub mod landlock; pub mod mcp; mod mcp_connection_manager; pub mod models_manager; pub use mcp_connection_manager::MCP_SANDBOX_STATE_CAPABILITY; pub use mcp_connection_manager::MCP_SANDBOX_STATE_METHOD; pub use mcp_connection_manager::SandboxState; mod mcp_tool_call; mod message_history; mod model_provider_info; pub mod parse_command; pub mod path_utils; pub mod powershell; pub mod sandboxing; mod stream_events_utils; mod text_encoding; pub mod token_data; mod truncate; mod unified_exec; mod user_instructions; pub use model_provider_info::CHAT_WIRE_API_DEPRECATION_SUMMARY; pub use model_provider_info::DEFAULT_LMSTUDIO_PORT; pub use model_provider_info::DEFAULT_OLLAMA_PORT; pub use model_provider_info::LMSTUDIO_OSS_PROVIDER_ID; pub use model_provider_info::ModelProviderInfo; pub use model_provider_info::OLLAMA_OSS_PROVIDER_ID; pub use model_provider_info::WireApi; pub use model_provider_info::built_in_model_providers; pub use model_provider_info::create_oss_provider_with_base_url; mod conversation_manager; mod event_mapping; pub mod review_format; pub mod review_prompts; pub use codex_protocol::protocol::InitialHistory; pub use conversation_manager::ConversationManager; pub use conversation_manager::NewConversation; // Re-export common auth types for workspace consumers pub use auth::AuthManager; pub use auth::CodexAuth; pub mod default_client; pub mod project_doc; mod rollout; pub(crate) mod safety; pub mod seatbelt; pub mod shell; pub mod shell_snapshot; pub mod skills; pub mod spawn; pub mod terminal; mod tools; pub mod turn_diff_tracker; pub use rollout::ARCHIVED_SESSIONS_SUBDIR; pub use rollout::INTERACTIVE_SESSION_SOURCES; pub use rollout::RolloutRecorder; pub use rollout::SESSIONS_SUBDIR; pub use rollout::SessionMeta; pub use rollout::find_conversation_path_by_id_str; pub use rollout::list::ConversationItem; pub use rollout::list::ConversationsPage; pub use rollout::list::Cursor; pub use rollout::list::parse_cursor; pub use rollout::list::read_head_for_summary; mod function_tool; mod state; mod tasks; mod user_notification; mod user_shell_command; pub mod util; pub use apply_patch::CODEX_APPLY_PATCH_ARG1; pub use command_safety::is_dangerous_command; pub use command_safety::is_safe_command; pub use exec_policy::ExecPolicyError; pub use exec_policy::load_exec_policy; pub use safety::get_platform_sandbox; pub use safety::set_windows_sandbox_enabled; // Re-export the protocol types from the standalone `codex-protocol` crate so existing // `codex_core::protocol::...` references continue to work across the workspace. pub use codex_protocol::protocol; // Re-export protocol config enums to ensure call sites can use the same types // as those in the protocol crate when constructing protocol messages. pub use codex_protocol::config_types as protocol_config_types; pub use client::ModelClient; pub use client_common::Prompt; pub use client_common::REVIEW_PROMPT; pub use client_common::ResponseEvent; pub use client_common::ResponseStream; pub use codex_protocol::models::ContentItem; pub use codex_protocol::models::LocalShellAction; pub use codex_protocol::models::LocalShellExecAction; pub use codex_protocol::models::LocalShellStatus; pub use codex_protocol::models::ResponseItem; pub use compact::content_items_to_text; pub use event_mapping::parse_turn_item; pub mod compact; pub mod otel_init;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/compact_remote.rs
codex-rs/core/src/compact_remote.rs
use std::sync::Arc; use crate::Prompt; use crate::codex::Session; use crate::codex::TurnContext; use crate::error::Result as CodexResult; use crate::protocol::CompactedItem; use crate::protocol::ContextCompactedEvent; use crate::protocol::EventMsg; use crate::protocol::RolloutItem; use crate::protocol::TaskStartedEvent; use codex_protocol::models::ResponseItem; pub(crate) async fn run_inline_remote_auto_compact_task( sess: Arc<Session>, turn_context: Arc<TurnContext>, ) { run_remote_compact_task_inner(&sess, &turn_context).await; } pub(crate) async fn run_remote_compact_task(sess: Arc<Session>, turn_context: Arc<TurnContext>) { let start_event = EventMsg::TaskStarted(TaskStartedEvent { model_context_window: turn_context.client.get_model_context_window(), }); sess.send_event(&turn_context, start_event).await; run_remote_compact_task_inner(&sess, &turn_context).await; } async fn run_remote_compact_task_inner(sess: &Arc<Session>, turn_context: &Arc<TurnContext>) { if let Err(err) = run_remote_compact_task_inner_impl(sess, turn_context).await { let event = EventMsg::Error( err.to_error_event(Some("Error running remote compact task".to_string())), ); sess.send_event(turn_context, event).await; } } async fn run_remote_compact_task_inner_impl( sess: &Arc<Session>, turn_context: &Arc<TurnContext>, ) -> CodexResult<()> { let mut history = sess.clone_history().await; let prompt = Prompt { input: history.get_history_for_prompt(), tools: vec![], parallel_tool_calls: false, base_instructions_override: turn_context.base_instructions.clone(), output_schema: None, }; let mut new_history = turn_context .client .compact_conversation_history(&prompt) .await?; // Required to keep `/undo` available after compaction let ghost_snapshots: Vec<ResponseItem> = history .get_history() .iter() .filter(|item| matches!(item, ResponseItem::GhostSnapshot { .. })) .cloned() .collect(); if !ghost_snapshots.is_empty() { new_history.extend(ghost_snapshots); } sess.replace_history(new_history.clone()).await; sess.recompute_token_usage(turn_context).await; let compacted_item = CompactedItem { message: String::new(), replacement_history: Some(new_history), }; sess.persist_rollout_items(&[RolloutItem::Compacted(compacted_item)]) .await; let event = EventMsg::ContextCompacted(ContextCompactedEvent {}); sess.send_event(turn_context, event).await; Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/turn_diff_tracker.rs
codex-rs/core/src/turn_diff_tracker.rs
use std::collections::HashMap; use std::fs; use std::path::Path; use std::path::PathBuf; use std::process::Command; use anyhow::Context; use anyhow::Result; use anyhow::anyhow; use sha1::digest::Output; use uuid::Uuid; use crate::protocol::FileChange; const ZERO_OID: &str = "0000000000000000000000000000000000000000"; const DEV_NULL: &str = "/dev/null"; struct BaselineFileInfo { path: PathBuf, content: Vec<u8>, mode: FileMode, oid: String, } /// Tracks sets of changes to files and exposes the overall unified diff. /// Internally, the way this works is now: /// 1. Maintain an in-memory baseline snapshot of files when they are first seen. /// For new additions, do not create a baseline so that diffs are shown as proper additions (using /dev/null). /// 2. Keep a stable internal filename (uuid) per external path for rename tracking. /// 3. To compute the aggregated unified diff, compare each baseline snapshot to the current file on disk entirely in-memory /// using the `similar` crate and emit unified diffs with rewritten external paths. #[derive(Default)] pub struct TurnDiffTracker { /// Map external path -> internal filename (uuid). external_to_temp_name: HashMap<PathBuf, String>, /// Internal filename -> baseline file info. baseline_file_info: HashMap<String, BaselineFileInfo>, /// Internal filename -> external path as of current accumulated state (after applying all changes). /// This is where renames are tracked. temp_name_to_current_path: HashMap<String, PathBuf>, /// Cache of known git worktree roots to avoid repeated filesystem walks. git_root_cache: Vec<PathBuf>, } impl TurnDiffTracker { pub fn new() -> Self { Self::default() } /// Front-run apply patch calls to track the starting contents of any modified files. /// - Creates an in-memory baseline snapshot for files that already exist on disk when first seen. /// - For additions, we intentionally do not create a baseline snapshot so that diffs are proper additions. /// - Also updates internal mappings for move/rename events. pub fn on_patch_begin(&mut self, changes: &HashMap<PathBuf, FileChange>) { for (path, change) in changes.iter() { // Ensure a stable internal filename exists for this external path. if !self.external_to_temp_name.contains_key(path) { let internal = Uuid::new_v4().to_string(); self.external_to_temp_name .insert(path.clone(), internal.clone()); self.temp_name_to_current_path .insert(internal.clone(), path.clone()); // If the file exists on disk now, snapshot as baseline; else leave missing to represent /dev/null. let baseline_file_info = if path.exists() { let mode = file_mode_for_path(path); let mode_val = mode.unwrap_or(FileMode::Regular); let content = blob_bytes(path, mode_val).unwrap_or_default(); let oid = if mode == Some(FileMode::Symlink) { format!("{:x}", git_blob_sha1_hex_bytes(&content)) } else { self.git_blob_oid_for_path(path) .unwrap_or_else(|| format!("{:x}", git_blob_sha1_hex_bytes(&content))) }; Some(BaselineFileInfo { path: path.clone(), content, mode: mode_val, oid, }) } else { Some(BaselineFileInfo { path: path.clone(), content: vec![], mode: FileMode::Regular, oid: ZERO_OID.to_string(), }) }; if let Some(baseline_file_info) = baseline_file_info { self.baseline_file_info .insert(internal.clone(), baseline_file_info); } } // Track rename/move in current mapping if provided in an Update. if let FileChange::Update { move_path: Some(dest), .. } = change { let uuid_filename = match self.external_to_temp_name.get(path) { Some(i) => i.clone(), None => { // This should be rare, but if we haven't mapped the source, create it with no baseline. let i = Uuid::new_v4().to_string(); self.baseline_file_info.insert( i.clone(), BaselineFileInfo { path: path.clone(), content: vec![], mode: FileMode::Regular, oid: ZERO_OID.to_string(), }, ); i } }; // Update current external mapping for temp file name. self.temp_name_to_current_path .insert(uuid_filename.clone(), dest.clone()); // Update forward file_mapping: external current -> internal name. self.external_to_temp_name.remove(path); self.external_to_temp_name .insert(dest.clone(), uuid_filename); }; } } fn get_path_for_internal(&self, internal: &str) -> Option<PathBuf> { self.temp_name_to_current_path .get(internal) .cloned() .or_else(|| { self.baseline_file_info .get(internal) .map(|info| info.path.clone()) }) } /// Find the git worktree root for a file/directory by walking up to the first ancestor containing a `.git` entry. /// Uses a simple cache of known roots and avoids negative-result caching for simplicity. fn find_git_root_cached(&mut self, start: &Path) -> Option<PathBuf> { let dir = if start.is_dir() { start } else { start.parent()? }; // Fast path: if any cached root is an ancestor of this path, use it. if let Some(root) = self .git_root_cache .iter() .find(|r| dir.starts_with(r)) .cloned() { return Some(root); } // Walk up to find a `.git` marker. let mut cur = dir.to_path_buf(); loop { let git_marker = cur.join(".git"); if git_marker.is_dir() || git_marker.is_file() { if !self.git_root_cache.iter().any(|r| r == &cur) { self.git_root_cache.push(cur.clone()); } return Some(cur); } // On Windows, avoid walking above the drive or UNC share root. #[cfg(windows)] { if is_windows_drive_or_unc_root(&cur) { return None; } } if let Some(parent) = cur.parent() { cur = parent.to_path_buf(); } else { return None; } } } /// Return a display string for `path` relative to its git root if found, else absolute. fn relative_to_git_root_str(&mut self, path: &Path) -> String { let s = if let Some(root) = self.find_git_root_cached(path) { if let Ok(rel) = path.strip_prefix(&root) { rel.display().to_string() } else { path.display().to_string() } } else { path.display().to_string() }; s.replace('\\', "/") } /// Ask git to compute the blob SHA-1 for the file at `path` within its repository. /// Returns None if no repository is found or git invocation fails. fn git_blob_oid_for_path(&mut self, path: &Path) -> Option<String> { let root = self.find_git_root_cached(path)?; // Compute a path relative to the repo root for better portability across platforms. let rel = path.strip_prefix(&root).unwrap_or(path); let output = Command::new("git") .arg("-C") .arg(&root) .arg("hash-object") .arg("--") .arg(rel) .output() .ok()?; if !output.status.success() { return None; } let s = String::from_utf8_lossy(&output.stdout).trim().to_string(); if s.len() == 40 { Some(s) } else { None } } /// Recompute the aggregated unified diff by comparing all of the in-memory snapshots that were /// collected before the first time they were touched by apply_patch during this turn with /// the current repo state. pub fn get_unified_diff(&mut self) -> Result<Option<String>> { let mut aggregated = String::new(); // Compute diffs per tracked internal file in a stable order by external path. let mut baseline_file_names: Vec<String> = self.baseline_file_info.keys().cloned().collect(); // Sort lexicographically by full repo-relative path to match git behavior. baseline_file_names.sort_by_key(|internal| { self.get_path_for_internal(internal) .map(|p| self.relative_to_git_root_str(&p)) .unwrap_or_default() }); for internal in baseline_file_names { aggregated.push_str(self.get_file_diff(&internal).as_str()); if !aggregated.ends_with('\n') { aggregated.push('\n'); } } if aggregated.trim().is_empty() { Ok(None) } else { Ok(Some(aggregated)) } } fn get_file_diff(&mut self, internal_file_name: &str) -> String { let mut aggregated = String::new(); // Snapshot lightweight fields only. let (baseline_external_path, baseline_mode, left_oid) = { if let Some(info) = self.baseline_file_info.get(internal_file_name) { (info.path.clone(), info.mode, info.oid.clone()) } else { (PathBuf::new(), FileMode::Regular, ZERO_OID.to_string()) } }; let current_external_path = match self.get_path_for_internal(internal_file_name) { Some(p) => p, None => return aggregated, }; let current_mode = file_mode_for_path(&current_external_path).unwrap_or(FileMode::Regular); let right_bytes = blob_bytes(&current_external_path, current_mode); // Compute displays with &mut self before borrowing any baseline content. let left_display = self.relative_to_git_root_str(&baseline_external_path); let right_display = self.relative_to_git_root_str(&current_external_path); // Compute right oid before borrowing baseline content. let right_oid = if let Some(b) = right_bytes.as_ref() { if current_mode == FileMode::Symlink { format!("{:x}", git_blob_sha1_hex_bytes(b)) } else { self.git_blob_oid_for_path(&current_external_path) .unwrap_or_else(|| format!("{:x}", git_blob_sha1_hex_bytes(b))) } } else { ZERO_OID.to_string() }; // Borrow baseline content only after all &mut self uses are done. let left_present = left_oid.as_str() != ZERO_OID; let left_bytes: Option<&[u8]> = if left_present { self.baseline_file_info .get(internal_file_name) .map(|i| i.content.as_slice()) } else { None }; // Fast path: identical bytes or both missing. if left_bytes == right_bytes.as_deref() { return aggregated; } aggregated.push_str(&format!("diff --git a/{left_display} b/{right_display}\n")); let is_add = !left_present && right_bytes.is_some(); let is_delete = left_present && right_bytes.is_none(); if is_add { aggregated.push_str(&format!("new file mode {current_mode}\n")); } else if is_delete { aggregated.push_str(&format!("deleted file mode {baseline_mode}\n")); } else if baseline_mode != current_mode { aggregated.push_str(&format!("old mode {baseline_mode}\n")); aggregated.push_str(&format!("new mode {current_mode}\n")); } let left_text = left_bytes.and_then(|b| std::str::from_utf8(b).ok()); let right_text = right_bytes .as_deref() .and_then(|b| std::str::from_utf8(b).ok()); let can_text_diff = matches!( (left_text, right_text, is_add, is_delete), (Some(_), Some(_), _, _) | (_, Some(_), true, _) | (Some(_), _, _, true) ); if can_text_diff { let l = left_text.unwrap_or(""); let r = right_text.unwrap_or(""); aggregated.push_str(&format!("index {left_oid}..{right_oid}\n")); let old_header = if left_present { format!("a/{left_display}") } else { DEV_NULL.to_string() }; let new_header = if right_bytes.is_some() { format!("b/{right_display}") } else { DEV_NULL.to_string() }; let diff = similar::TextDiff::from_lines(l, r); let unified = diff .unified_diff() .context_radius(3) .header(&old_header, &new_header) .to_string(); aggregated.push_str(&unified); } else { aggregated.push_str(&format!("index {left_oid}..{right_oid}\n")); let old_header = if left_present { format!("a/{left_display}") } else { DEV_NULL.to_string() }; let new_header = if right_bytes.is_some() { format!("b/{right_display}") } else { DEV_NULL.to_string() }; aggregated.push_str(&format!("--- {old_header}\n")); aggregated.push_str(&format!("+++ {new_header}\n")); aggregated.push_str("Binary files differ\n"); } aggregated } } /// Compute the Git SHA-1 blob object ID for the given content (bytes). fn git_blob_sha1_hex_bytes(data: &[u8]) -> Output<sha1::Sha1> { // Git blob hash is sha1 of: "blob <len>\0<data>" let header = format!("blob {}\0", data.len()); use sha1::Digest; let mut hasher = sha1::Sha1::new(); hasher.update(header.as_bytes()); hasher.update(data); hasher.finalize() } #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum FileMode { Regular, #[cfg(unix)] Executable, Symlink, } impl FileMode { fn as_str(self) -> &'static str { match self { FileMode::Regular => "100644", #[cfg(unix)] FileMode::Executable => "100755", FileMode::Symlink => "120000", } } } impl std::fmt::Display for FileMode { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str(self.as_str()) } } #[cfg(unix)] fn file_mode_for_path(path: &Path) -> Option<FileMode> { use std::os::unix::fs::PermissionsExt; let meta = fs::symlink_metadata(path).ok()?; let ft = meta.file_type(); if ft.is_symlink() { return Some(FileMode::Symlink); } let mode = meta.permissions().mode(); let is_exec = (mode & 0o111) != 0; Some(if is_exec { FileMode::Executable } else { FileMode::Regular }) } #[cfg(not(unix))] fn file_mode_for_path(_path: &Path) -> Option<FileMode> { // Default to non-executable on non-unix. Some(FileMode::Regular) } fn blob_bytes(path: &Path, mode: FileMode) -> Option<Vec<u8>> { if path.exists() { let contents = if mode == FileMode::Symlink { symlink_blob_bytes(path) .ok_or_else(|| anyhow!("failed to read symlink target for {}", path.display())) } else { fs::read(path) .with_context(|| format!("failed to read current file for diff {}", path.display())) }; contents.ok() } else { None } } #[cfg(unix)] fn symlink_blob_bytes(path: &Path) -> Option<Vec<u8>> { use std::os::unix::ffi::OsStrExt; let target = std::fs::read_link(path).ok()?; Some(target.as_os_str().as_bytes().to_vec()) } #[cfg(not(unix))] fn symlink_blob_bytes(_path: &Path) -> Option<Vec<u8>> { None } #[cfg(windows)] fn is_windows_drive_or_unc_root(p: &std::path::Path) -> bool { use std::path::Component; let mut comps = p.components(); matches!( (comps.next(), comps.next(), comps.next()), (Some(Component::Prefix(_)), Some(Component::RootDir), None) ) } #[cfg(test)] mod tests { use super::*; use pretty_assertions::assert_eq; use tempfile::tempdir; /// Compute the Git SHA-1 blob object ID for the given content (string). /// This delegates to the bytes version to avoid UTF-8 lossy conversions here. fn git_blob_sha1_hex(data: &str) -> String { format!("{:x}", git_blob_sha1_hex_bytes(data.as_bytes())) } fn normalize_diff_for_test(input: &str, root: &Path) -> String { let root_str = root.display().to_string().replace('\\', "/"); let replaced = input.replace(&root_str, "<TMP>"); // Split into blocks on lines starting with "diff --git ", sort blocks for determinism, and rejoin let mut blocks: Vec<String> = Vec::new(); let mut current = String::new(); for line in replaced.lines() { if line.starts_with("diff --git ") && !current.is_empty() { blocks.push(current); current = String::new(); } if !current.is_empty() { current.push('\n'); } current.push_str(line); } if !current.is_empty() { blocks.push(current); } blocks.sort(); let mut out = blocks.join("\n"); if !out.ends_with('\n') { out.push('\n'); } out } #[test] fn accumulates_add_and_update() { let mut acc = TurnDiffTracker::new(); let dir = tempdir().unwrap(); let file = dir.path().join("a.txt"); // First patch: add file (baseline should be /dev/null). let add_changes = HashMap::from([( file.clone(), FileChange::Add { content: "foo\n".to_string(), }, )]); acc.on_patch_begin(&add_changes); // Simulate apply: create the file on disk. fs::write(&file, "foo\n").unwrap(); let first = acc.get_unified_diff().unwrap().unwrap(); let first = normalize_diff_for_test(&first, dir.path()); let expected_first = { let mode = file_mode_for_path(&file).unwrap_or(FileMode::Regular); let right_oid = git_blob_sha1_hex("foo\n"); format!( r#"diff --git a/<TMP>/a.txt b/<TMP>/a.txt new file mode {mode} index {ZERO_OID}..{right_oid} --- {DEV_NULL} +++ b/<TMP>/a.txt @@ -0,0 +1 @@ +foo "#, ) }; assert_eq!(first, expected_first); // Second patch: update the file on disk. let update_changes = HashMap::from([( file.clone(), FileChange::Update { unified_diff: "".to_owned(), move_path: None, }, )]); acc.on_patch_begin(&update_changes); // Simulate apply: append a new line. fs::write(&file, "foo\nbar\n").unwrap(); let combined = acc.get_unified_diff().unwrap().unwrap(); let combined = normalize_diff_for_test(&combined, dir.path()); let expected_combined = { let mode = file_mode_for_path(&file).unwrap_or(FileMode::Regular); let right_oid = git_blob_sha1_hex("foo\nbar\n"); format!( r#"diff --git a/<TMP>/a.txt b/<TMP>/a.txt new file mode {mode} index {ZERO_OID}..{right_oid} --- {DEV_NULL} +++ b/<TMP>/a.txt @@ -0,0 +1,2 @@ +foo +bar "#, ) }; assert_eq!(combined, expected_combined); } #[test] fn accumulates_delete() { let dir = tempdir().unwrap(); let file = dir.path().join("b.txt"); fs::write(&file, "x\n").unwrap(); let mut acc = TurnDiffTracker::new(); let del_changes = HashMap::from([( file.clone(), FileChange::Delete { content: "x\n".to_string(), }, )]); acc.on_patch_begin(&del_changes); // Simulate apply: delete the file from disk. let baseline_mode = file_mode_for_path(&file).unwrap_or(FileMode::Regular); fs::remove_file(&file).unwrap(); let diff = acc.get_unified_diff().unwrap().unwrap(); let diff = normalize_diff_for_test(&diff, dir.path()); let expected = { let left_oid = git_blob_sha1_hex("x\n"); format!( r#"diff --git a/<TMP>/b.txt b/<TMP>/b.txt deleted file mode {baseline_mode} index {left_oid}..{ZERO_OID} --- a/<TMP>/b.txt +++ {DEV_NULL} @@ -1 +0,0 @@ -x "#, ) }; assert_eq!(diff, expected); } #[test] fn accumulates_move_and_update() { let dir = tempdir().unwrap(); let src = dir.path().join("src.txt"); let dest = dir.path().join("dst.txt"); fs::write(&src, "line\n").unwrap(); let mut acc = TurnDiffTracker::new(); let mv_changes = HashMap::from([( src.clone(), FileChange::Update { unified_diff: "".to_owned(), move_path: Some(dest.clone()), }, )]); acc.on_patch_begin(&mv_changes); // Simulate apply: move and update content. fs::rename(&src, &dest).unwrap(); fs::write(&dest, "line2\n").unwrap(); let out = acc.get_unified_diff().unwrap().unwrap(); let out = normalize_diff_for_test(&out, dir.path()); let expected = { let left_oid = git_blob_sha1_hex("line\n"); let right_oid = git_blob_sha1_hex("line2\n"); format!( r#"diff --git a/<TMP>/src.txt b/<TMP>/dst.txt index {left_oid}..{right_oid} --- a/<TMP>/src.txt +++ b/<TMP>/dst.txt @@ -1 +1 @@ -line +line2 "# ) }; assert_eq!(out, expected); } #[test] fn move_without_1change_yields_no_diff() { let dir = tempdir().unwrap(); let src = dir.path().join("moved.txt"); let dest = dir.path().join("renamed.txt"); fs::write(&src, "same\n").unwrap(); let mut acc = TurnDiffTracker::new(); let mv_changes = HashMap::from([( src.clone(), FileChange::Update { unified_diff: "".to_owned(), move_path: Some(dest.clone()), }, )]); acc.on_patch_begin(&mv_changes); // Simulate apply: move only, no content change. fs::rename(&src, &dest).unwrap(); let diff = acc.get_unified_diff().unwrap(); assert_eq!(diff, None); } #[test] fn move_declared_but_file_only_appears_at_dest_is_add() { let dir = tempdir().unwrap(); let src = dir.path().join("src.txt"); let dest = dir.path().join("dest.txt"); let mut acc = TurnDiffTracker::new(); let mv = HashMap::from([( src, FileChange::Update { unified_diff: "".into(), move_path: Some(dest.clone()), }, )]); acc.on_patch_begin(&mv); // No file existed initially; create only dest fs::write(&dest, "hello\n").unwrap(); let diff = acc.get_unified_diff().unwrap().unwrap(); let diff = normalize_diff_for_test(&diff, dir.path()); let expected = { let mode = file_mode_for_path(&dest).unwrap_or(FileMode::Regular); let right_oid = git_blob_sha1_hex("hello\n"); format!( r#"diff --git a/<TMP>/src.txt b/<TMP>/dest.txt new file mode {mode} index {ZERO_OID}..{right_oid} --- {DEV_NULL} +++ b/<TMP>/dest.txt @@ -0,0 +1 @@ +hello "#, ) }; assert_eq!(diff, expected); } #[test] fn update_persists_across_new_baseline_for_new_file() { let dir = tempdir().unwrap(); let a = dir.path().join("a.txt"); let b = dir.path().join("b.txt"); fs::write(&a, "foo\n").unwrap(); fs::write(&b, "z\n").unwrap(); let mut acc = TurnDiffTracker::new(); // First: update existing a.txt (baseline snapshot is created for a). let update_a = HashMap::from([( a.clone(), FileChange::Update { unified_diff: "".to_owned(), move_path: None, }, )]); acc.on_patch_begin(&update_a); // Simulate apply: modify a.txt on disk. fs::write(&a, "foo\nbar\n").unwrap(); let first = acc.get_unified_diff().unwrap().unwrap(); let first = normalize_diff_for_test(&first, dir.path()); let expected_first = { let left_oid = git_blob_sha1_hex("foo\n"); let right_oid = git_blob_sha1_hex("foo\nbar\n"); format!( r#"diff --git a/<TMP>/a.txt b/<TMP>/a.txt index {left_oid}..{right_oid} --- a/<TMP>/a.txt +++ b/<TMP>/a.txt @@ -1 +1,2 @@ foo +bar "# ) }; assert_eq!(first, expected_first); // Next: introduce a brand-new path b.txt into baseline snapshots via a delete change. let del_b = HashMap::from([( b.clone(), FileChange::Delete { content: "z\n".to_string(), }, )]); acc.on_patch_begin(&del_b); // Simulate apply: delete b.txt. let baseline_mode = file_mode_for_path(&b).unwrap_or(FileMode::Regular); fs::remove_file(&b).unwrap(); let combined = acc.get_unified_diff().unwrap().unwrap(); let combined = normalize_diff_for_test(&combined, dir.path()); let expected = { let left_oid_a = git_blob_sha1_hex("foo\n"); let right_oid_a = git_blob_sha1_hex("foo\nbar\n"); let left_oid_b = git_blob_sha1_hex("z\n"); format!( r#"diff --git a/<TMP>/a.txt b/<TMP>/a.txt index {left_oid_a}..{right_oid_a} --- a/<TMP>/a.txt +++ b/<TMP>/a.txt @@ -1 +1,2 @@ foo +bar diff --git a/<TMP>/b.txt b/<TMP>/b.txt deleted file mode {baseline_mode} index {left_oid_b}..{ZERO_OID} --- a/<TMP>/b.txt +++ {DEV_NULL} @@ -1 +0,0 @@ -z "#, ) }; assert_eq!(combined, expected); } #[test] fn binary_files_differ_update() { let dir = tempdir().unwrap(); let file = dir.path().join("bin.dat"); // Initial non-UTF8 bytes let left_bytes: Vec<u8> = vec![0xff, 0xfe, 0xfd, 0x00]; // Updated non-UTF8 bytes let right_bytes: Vec<u8> = vec![0x01, 0x02, 0x03, 0x00]; fs::write(&file, &left_bytes).unwrap(); let mut acc = TurnDiffTracker::new(); let update_changes = HashMap::from([( file.clone(), FileChange::Update { unified_diff: "".to_owned(), move_path: None, }, )]); acc.on_patch_begin(&update_changes); // Apply update on disk fs::write(&file, &right_bytes).unwrap(); let diff = acc.get_unified_diff().unwrap().unwrap(); let diff = normalize_diff_for_test(&diff, dir.path()); let expected = { let left_oid = format!("{:x}", git_blob_sha1_hex_bytes(&left_bytes)); let right_oid = format!("{:x}", git_blob_sha1_hex_bytes(&right_bytes)); format!( r#"diff --git a/<TMP>/bin.dat b/<TMP>/bin.dat index {left_oid}..{right_oid} --- a/<TMP>/bin.dat +++ b/<TMP>/bin.dat Binary files differ "# ) }; assert_eq!(diff, expected); } #[test] fn filenames_with_spaces_add_and_update() { let mut acc = TurnDiffTracker::new(); let dir = tempdir().unwrap(); let file = dir.path().join("name with spaces.txt"); // First patch: add file (baseline should be /dev/null). let add_changes = HashMap::from([( file.clone(), FileChange::Add { content: "foo\n".to_string(), }, )]); acc.on_patch_begin(&add_changes); // Simulate apply: create the file on disk. fs::write(&file, "foo\n").unwrap(); let first = acc.get_unified_diff().unwrap().unwrap(); let first = normalize_diff_for_test(&first, dir.path()); let expected_first = { let mode = file_mode_for_path(&file).unwrap_or(FileMode::Regular); let right_oid = git_blob_sha1_hex("foo\n"); format!( r#"diff --git a/<TMP>/name with spaces.txt b/<TMP>/name with spaces.txt new file mode {mode} index {ZERO_OID}..{right_oid} --- {DEV_NULL} +++ b/<TMP>/name with spaces.txt @@ -0,0 +1 @@ +foo "#, ) }; assert_eq!(first, expected_first); // Second patch: update the file on disk. let update_changes = HashMap::from([( file.clone(), FileChange::Update { unified_diff: "".to_owned(), move_path: None, }, )]); acc.on_patch_begin(&update_changes); // Simulate apply: append a new line with a space. fs::write(&file, "foo\nbar baz\n").unwrap(); let combined = acc.get_unified_diff().unwrap().unwrap(); let combined = normalize_diff_for_test(&combined, dir.path()); let expected_combined = { let mode = file_mode_for_path(&file).unwrap_or(FileMode::Regular); let right_oid = git_blob_sha1_hex("foo\nbar baz\n"); format!( r#"diff --git a/<TMP>/name with spaces.txt b/<TMP>/name with spaces.txt new file mode {mode} index {ZERO_OID}..{right_oid} --- {DEV_NULL} +++ b/<TMP>/name with spaces.txt @@ -0,0 +1,2 @@ +foo +bar baz "#, ) }; assert_eq!(combined, expected_combined); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/custom_prompts.rs
codex-rs/core/src/custom_prompts.rs
use codex_protocol::custom_prompts::CustomPrompt; use std::collections::HashSet; use std::path::Path; use std::path::PathBuf; use tokio::fs; /// Return the default prompts directory: `$CODEX_HOME/prompts`. /// If `CODEX_HOME` cannot be resolved, returns `None`. pub fn default_prompts_dir() -> Option<PathBuf> { crate::config::find_codex_home() .ok() .map(|home| home.join("prompts")) } /// Discover prompt files in the given directory, returning entries sorted by name. /// Non-files are ignored. If the directory does not exist or cannot be read, returns empty. pub async fn discover_prompts_in(dir: &Path) -> Vec<CustomPrompt> { discover_prompts_in_excluding(dir, &HashSet::new()).await } /// Discover prompt files in the given directory, excluding any with names in `exclude`. /// Returns entries sorted by name. Non-files are ignored. Missing/unreadable dir yields empty. pub async fn discover_prompts_in_excluding( dir: &Path, exclude: &HashSet<String>, ) -> Vec<CustomPrompt> { let mut out: Vec<CustomPrompt> = Vec::new(); let mut entries = match fs::read_dir(dir).await { Ok(entries) => entries, Err(_) => return out, }; while let Ok(Some(entry)) = entries.next_entry().await { let path = entry.path(); let is_file_like = fs::metadata(&path) .await .map(|m| m.is_file()) .unwrap_or(false); if !is_file_like { continue; } // Only include Markdown files with a .md extension. let is_md = path .extension() .and_then(|s| s.to_str()) .map(|ext| ext.eq_ignore_ascii_case("md")) .unwrap_or(false); if !is_md { continue; } let Some(name) = path .file_stem() .and_then(|s| s.to_str()) .map(str::to_string) else { continue; }; if exclude.contains(&name) { continue; } let content = match fs::read_to_string(&path).await { Ok(s) => s, Err(_) => continue, }; let (description, argument_hint, body) = parse_frontmatter(&content); out.push(CustomPrompt { name, path, content: body, description, argument_hint, }); } out.sort_by(|a, b| a.name.cmp(&b.name)); out } /// Parse optional YAML-like frontmatter at the beginning of `content`. /// Supported keys: /// - `description`: short description shown in the slash popup /// - `argument-hint` or `argument_hint`: brief hint string shown after the description /// Returns (description, argument_hint, body_without_frontmatter). fn parse_frontmatter(content: &str) -> (Option<String>, Option<String>, String) { let mut segments = content.split_inclusive('\n'); let Some(first_segment) = segments.next() else { return (None, None, String::new()); }; let first_line = first_segment.trim_end_matches(['\r', '\n']); if first_line.trim() != "---" { return (None, None, content.to_string()); } let mut desc: Option<String> = None; let mut hint: Option<String> = None; let mut frontmatter_closed = false; let mut consumed = first_segment.len(); for segment in segments { let line = segment.trim_end_matches(['\r', '\n']); let trimmed = line.trim(); if trimmed == "---" { frontmatter_closed = true; consumed += segment.len(); break; } if trimmed.is_empty() || trimmed.starts_with('#') { consumed += segment.len(); continue; } if let Some((k, v)) = trimmed.split_once(':') { let key = k.trim().to_ascii_lowercase(); let mut val = v.trim().to_string(); if val.len() >= 2 { let bytes = val.as_bytes(); let first = bytes[0]; let last = bytes[bytes.len() - 1]; if (first == b'\"' && last == b'\"') || (first == b'\'' && last == b'\'') { val = val[1..val.len().saturating_sub(1)].to_string(); } } match key.as_str() { "description" => desc = Some(val), "argument-hint" | "argument_hint" => hint = Some(val), _ => {} } } consumed += segment.len(); } if !frontmatter_closed { // Unterminated frontmatter: treat input as-is. return (None, None, content.to_string()); } let body = if consumed >= content.len() { String::new() } else { content[consumed..].to_string() }; (desc, hint, body) } #[cfg(test)] mod tests { use super::*; use std::fs; use tempfile::tempdir; #[tokio::test] async fn empty_when_dir_missing() { let tmp = tempdir().expect("create TempDir"); let missing = tmp.path().join("nope"); let found = discover_prompts_in(&missing).await; assert!(found.is_empty()); } #[tokio::test] async fn discovers_and_sorts_files() { let tmp = tempdir().expect("create TempDir"); let dir = tmp.path(); fs::write(dir.join("b.md"), b"b").unwrap(); fs::write(dir.join("a.md"), b"a").unwrap(); fs::create_dir(dir.join("subdir")).unwrap(); let found = discover_prompts_in(dir).await; let names: Vec<String> = found.into_iter().map(|e| e.name).collect(); assert_eq!(names, vec!["a", "b"]); } #[tokio::test] async fn excludes_builtins() { let tmp = tempdir().expect("create TempDir"); let dir = tmp.path(); fs::write(dir.join("init.md"), b"ignored").unwrap(); fs::write(dir.join("foo.md"), b"ok").unwrap(); let mut exclude = HashSet::new(); exclude.insert("init".to_string()); let found = discover_prompts_in_excluding(dir, &exclude).await; let names: Vec<String> = found.into_iter().map(|e| e.name).collect(); assert_eq!(names, vec!["foo"]); } #[tokio::test] async fn skips_non_utf8_files() { let tmp = tempdir().expect("create TempDir"); let dir = tmp.path(); // Valid UTF-8 file fs::write(dir.join("good.md"), b"hello").unwrap(); // Invalid UTF-8 content in .md file (e.g., lone 0xFF byte) fs::write(dir.join("bad.md"), vec![0xFF, 0xFE, b'\n']).unwrap(); let found = discover_prompts_in(dir).await; let names: Vec<String> = found.into_iter().map(|e| e.name).collect(); assert_eq!(names, vec!["good"]); } #[tokio::test] #[cfg(unix)] async fn discovers_symlinked_md_files() { let tmp = tempdir().expect("create TempDir"); let dir = tmp.path(); // Create a real file fs::write(dir.join("real.md"), b"real content").unwrap(); // Create a symlink to the real file std::os::unix::fs::symlink(dir.join("real.md"), dir.join("link.md")).unwrap(); let found = discover_prompts_in(dir).await; let names: Vec<String> = found.into_iter().map(|e| e.name).collect(); // Both real and link should be discovered, sorted alphabetically assert_eq!(names, vec!["link", "real"]); } #[tokio::test] async fn parses_frontmatter_and_strips_from_body() { let tmp = tempdir().expect("create TempDir"); let dir = tmp.path(); let file = dir.join("withmeta.md"); let text = "---\nname: ignored\ndescription: \"Quick review command\"\nargument-hint: \"[file] [priority]\"\n---\nActual body with $1 and $ARGUMENTS"; fs::write(&file, text).unwrap(); let found = discover_prompts_in(dir).await; assert_eq!(found.len(), 1); let p = &found[0]; assert_eq!(p.name, "withmeta"); assert_eq!(p.description.as_deref(), Some("Quick review command")); assert_eq!(p.argument_hint.as_deref(), Some("[file] [priority]")); // Body should not include the frontmatter delimiters. assert_eq!(p.content, "Actual body with $1 and $ARGUMENTS"); } #[test] fn parse_frontmatter_preserves_body_newlines() { let content = "---\r\ndescription: \"Line endings\"\r\nargument_hint: \"[arg]\"\r\n---\r\nFirst line\r\nSecond line\r\n"; let (desc, hint, body) = parse_frontmatter(content); assert_eq!(desc.as_deref(), Some("Line endings")); assert_eq!(hint.as_deref(), Some("[arg]")); assert_eq!(body, "First line\r\nSecond line\r\n"); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/apply_patch.rs
codex-rs/core/src/apply_patch.rs
use crate::codex::Session; use crate::codex::TurnContext; use crate::function_tool::FunctionCallError; use crate::protocol::FileChange; use crate::protocol::ReviewDecision; use crate::safety::SafetyCheck; use crate::safety::assess_patch_safety; use codex_apply_patch::ApplyPatchAction; use codex_apply_patch::ApplyPatchFileChange; use std::collections::HashMap; use std::path::PathBuf; pub const CODEX_APPLY_PATCH_ARG1: &str = "--codex-run-as-apply-patch"; pub(crate) enum InternalApplyPatchInvocation { /// The `apply_patch` call was handled programmatically, without any sort /// of sandbox, because the user explicitly approved it. This is the /// result to use with the `shell` function call that contained `apply_patch`. Output(Result<String, FunctionCallError>), /// The `apply_patch` call was approved, either automatically because it /// appears that it should be allowed based on the user's sandbox policy /// *or* because the user explicitly approved it. In either case, we use /// exec with [`CODEX_APPLY_PATCH_ARG1`] to realize the `apply_patch` call, /// but [`ApplyPatchExec::auto_approved`] is used to determine the sandbox /// used with the `exec()`. DelegateToExec(ApplyPatchExec), } #[derive(Debug)] pub(crate) struct ApplyPatchExec { pub(crate) action: ApplyPatchAction, pub(crate) user_explicitly_approved_this_action: bool, } pub(crate) async fn apply_patch( sess: &Session, turn_context: &TurnContext, call_id: &str, action: ApplyPatchAction, ) -> InternalApplyPatchInvocation { match assess_patch_safety( &action, turn_context.approval_policy, &turn_context.sandbox_policy, &turn_context.cwd, ) { SafetyCheck::AutoApprove { user_explicitly_approved, .. } => InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec { action, user_explicitly_approved_this_action: user_explicitly_approved, }), SafetyCheck::AskUser => { // Compute a readable summary of path changes to include in the // approval request so the user can make an informed decision. // // Note that it might be worth expanding this approval request to // give the user the option to expand the set of writable roots so // that similar patches can be auto-approved in the future during // this session. let rx_approve = sess .request_patch_approval( turn_context, call_id.to_owned(), convert_apply_patch_to_protocol(&action), None, None, ) .await; match rx_approve.await.unwrap_or_default() { ReviewDecision::Approved | ReviewDecision::ApprovedExecpolicyAmendment { .. } | ReviewDecision::ApprovedForSession => { InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec { action, user_explicitly_approved_this_action: true, }) } ReviewDecision::Denied | ReviewDecision::Abort => { InternalApplyPatchInvocation::Output(Err(FunctionCallError::RespondToModel( "patch rejected by user".to_string(), ))) } } } SafetyCheck::Reject { reason } => InternalApplyPatchInvocation::Output(Err( FunctionCallError::RespondToModel(format!("patch rejected: {reason}")), )), } } pub(crate) fn convert_apply_patch_to_protocol( action: &ApplyPatchAction, ) -> HashMap<PathBuf, FileChange> { let changes = action.changes(); let mut result = HashMap::with_capacity(changes.len()); for (path, change) in changes { let protocol_change = match change { ApplyPatchFileChange::Add { content } => FileChange::Add { content: content.clone(), }, ApplyPatchFileChange::Delete { content } => FileChange::Delete { content: content.clone(), }, ApplyPatchFileChange::Update { unified_diff, move_path, new_content: _new_content, } => FileChange::Update { unified_diff: unified_diff.clone(), move_path: move_path.clone(), }, }; result.insert(path.clone(), protocol_change); } result } #[cfg(test)] mod tests { use super::*; use pretty_assertions::assert_eq; use tempfile::tempdir; #[test] fn convert_apply_patch_maps_add_variant() { let tmp = tempdir().expect("tmp"); let p = tmp.path().join("a.txt"); // Create an action with a single Add change let action = ApplyPatchAction::new_add_for_test(&p, "hello".to_string()); let got = convert_apply_patch_to_protocol(&action); assert_eq!( got.get(&p), Some(&FileChange::Add { content: "hello".to_string() }) ); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/user_instructions.rs
codex-rs/core/src/user_instructions.rs
use serde::Deserialize; use serde::Serialize; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseItem; pub const USER_INSTRUCTIONS_OPEN_TAG_LEGACY: &str = "<user_instructions>"; pub const USER_INSTRUCTIONS_PREFIX: &str = "# AGENTS.md instructions for "; pub const SKILL_INSTRUCTIONS_PREFIX: &str = "<skill"; #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] #[serde(rename = "user_instructions", rename_all = "snake_case")] pub(crate) struct UserInstructions { pub directory: String, pub text: String, } impl UserInstructions { pub fn is_user_instructions(message: &[ContentItem]) -> bool { if let [ContentItem::InputText { text }] = message { text.starts_with(USER_INSTRUCTIONS_PREFIX) || text.starts_with(USER_INSTRUCTIONS_OPEN_TAG_LEGACY) } else { false } } } impl From<UserInstructions> for ResponseItem { fn from(ui: UserInstructions) -> Self { ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: format!( "{USER_INSTRUCTIONS_PREFIX}{directory}\n\n<INSTRUCTIONS>\n{contents}\n</INSTRUCTIONS>", directory = ui.directory, contents = ui.text ), }], } } } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] #[serde(rename = "skill_instructions", rename_all = "snake_case")] pub(crate) struct SkillInstructions { pub name: String, pub path: String, pub contents: String, } impl SkillInstructions { pub fn is_skill_instructions(message: &[ContentItem]) -> bool { if let [ContentItem::InputText { text }] = message { text.starts_with(SKILL_INSTRUCTIONS_PREFIX) } else { false } } } impl From<SkillInstructions> for ResponseItem { fn from(si: SkillInstructions) -> Self { ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: format!( "<skill>\n<name>{}</name>\n<path>{}</path>\n{}\n</skill>", si.name, si.path, si.contents ), }], } } } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] #[serde(rename = "developer_instructions", rename_all = "snake_case")] pub(crate) struct DeveloperInstructions { text: String, } impl DeveloperInstructions { pub fn new<T: Into<String>>(text: T) -> Self { Self { text: text.into() } } pub fn into_text(self) -> String { self.text } } impl From<DeveloperInstructions> for ResponseItem { fn from(di: DeveloperInstructions) -> Self { ResponseItem::Message { id: None, role: "developer".to_string(), content: vec![ContentItem::InputText { text: di.into_text(), }], } } } #[cfg(test)] mod tests { use super::*; use pretty_assertions::assert_eq; #[test] fn test_user_instructions() { let user_instructions = UserInstructions { directory: "test_directory".to_string(), text: "test_text".to_string(), }; let response_item: ResponseItem = user_instructions.into(); let ResponseItem::Message { role, content, .. } = response_item else { panic!("expected ResponseItem::Message"); }; assert_eq!(role, "user"); let [ContentItem::InputText { text }] = content.as_slice() else { panic!("expected one InputText content item"); }; assert_eq!( text, "# AGENTS.md instructions for test_directory\n\n<INSTRUCTIONS>\ntest_text\n</INSTRUCTIONS>", ); } #[test] fn test_is_user_instructions() { assert!(UserInstructions::is_user_instructions( &[ContentItem::InputText { text: "# AGENTS.md instructions for test_directory\n\n<INSTRUCTIONS>\ntest_text\n</INSTRUCTIONS>".to_string(), }] )); assert!(UserInstructions::is_user_instructions(&[ ContentItem::InputText { text: "<user_instructions>test_text</user_instructions>".to_string(), } ])); assert!(!UserInstructions::is_user_instructions(&[ ContentItem::InputText { text: "test_text".to_string(), } ])); } #[test] fn test_skill_instructions() { let skill_instructions = SkillInstructions { name: "demo-skill".to_string(), path: "skills/demo/SKILL.md".to_string(), contents: "body".to_string(), }; let response_item: ResponseItem = skill_instructions.into(); let ResponseItem::Message { role, content, .. } = response_item else { panic!("expected ResponseItem::Message"); }; assert_eq!(role, "user"); let [ContentItem::InputText { text }] = content.as_slice() else { panic!("expected one InputText content item"); }; assert_eq!( text, "<skill>\n<name>demo-skill</name>\n<path>skills/demo/SKILL.md</path>\nbody\n</skill>", ); } #[test] fn test_is_skill_instructions() { assert!(SkillInstructions::is_skill_instructions(&[ ContentItem::InputText { text: "<skill>\n<name>demo-skill</name>\n<path>skills/demo/SKILL.md</path>\nbody\n</skill>" .to_string(), } ])); assert!(!SkillInstructions::is_skill_instructions(&[ ContentItem::InputText { text: "regular text".to_string(), } ])); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false