repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/shell_snapshot.rs
codex-rs/core/src/shell_snapshot.rs
use std::path::Path; use std::path::PathBuf; use std::time::Duration; use crate::shell::Shell; use crate::shell::ShellType; use crate::shell::get_shell; use anyhow::Context; use anyhow::Result; use anyhow::anyhow; use anyhow::bail; use tokio::fs; use tokio::process::Command; use tokio::time::timeout; use uuid::Uuid; #[derive(Clone, Debug, PartialEq, Eq)] pub struct ShellSnapshot { pub path: PathBuf, } const SNAPSHOT_TIMEOUT: Duration = Duration::from_secs(10); impl ShellSnapshot { pub async fn try_new(codex_home: &Path, shell: &Shell) -> Option<Self> { let extension = match shell.shell_type { ShellType::PowerShell => "ps1", _ => "sh", }; let path = codex_home .join("shell_snapshots") .join(format!("{}.{}", Uuid::new_v4(), extension)); match write_shell_snapshot(shell.shell_type.clone(), &path).await { Ok(path) => { tracing::info!("Shell snapshot successfully created: {}", path.display()); Some(Self { path }) } Err(err) => { tracing::warn!( "Failed to create shell snapshot for {}: {err:?}", shell.name() ); None } } } } impl Drop for ShellSnapshot { fn drop(&mut self) { if let Err(err) = std::fs::remove_file(&self.path) { tracing::warn!( "Failed to delete shell snapshot at {:?}: {err:?}", self.path ); } } } pub async fn write_shell_snapshot(shell_type: ShellType, output_path: &Path) -> Result<PathBuf> { if shell_type == ShellType::PowerShell || shell_type == ShellType::Cmd { bail!("Shell snapshot not supported yet for {shell_type:?}"); } let shell = get_shell(shell_type.clone(), None) .with_context(|| format!("No available shell for {shell_type:?}"))?; let raw_snapshot = capture_snapshot(&shell).await?; let snapshot = strip_snapshot_preamble(&raw_snapshot)?; if let Some(parent) = output_path.parent() { let parent_display = parent.display(); fs::create_dir_all(parent) .await .with_context(|| format!("Failed to create snapshot parent {parent_display}"))?; } let snapshot_path = output_path.display(); fs::write(output_path, snapshot) .await .with_context(|| format!("Failed to write snapshot to {snapshot_path}"))?; Ok(output_path.to_path_buf()) } async fn capture_snapshot(shell: &Shell) -> Result<String> { let shell_type = shell.shell_type.clone(); match shell_type { ShellType::Zsh => run_shell_script(shell, zsh_snapshot_script()).await, ShellType::Bash => run_shell_script(shell, bash_snapshot_script()).await, ShellType::Sh => run_shell_script(shell, sh_snapshot_script()).await, ShellType::PowerShell => run_shell_script(shell, powershell_snapshot_script()).await, ShellType::Cmd => bail!("Shell snapshotting is not yet supported for {shell_type:?}"), } } fn strip_snapshot_preamble(snapshot: &str) -> Result<String> { let marker = "# Snapshot file"; let Some(start) = snapshot.find(marker) else { bail!("Snapshot output missing marker {marker}"); }; Ok(snapshot[start..].to_string()) } async fn run_shell_script(shell: &Shell, script: &str) -> Result<String> { run_shell_script_with_timeout(shell, script, SNAPSHOT_TIMEOUT).await } async fn run_shell_script_with_timeout( shell: &Shell, script: &str, snapshot_timeout: Duration, ) -> Result<String> { let args = shell.derive_exec_args(script, true); let shell_name = shell.name(); // Handler is kept as guard to control the drop. The `mut` pattern is required because .args() // returns a ref of handler. let mut handler = Command::new(&args[0]); handler.args(&args[1..]); handler.kill_on_drop(true); let output = timeout(snapshot_timeout, handler.output()) .await .map_err(|_| anyhow!("Snapshot command timed out for {shell_name}"))? .with_context(|| format!("Failed to execute {shell_name}"))?; if !output.status.success() { let status = output.status; let stderr = String::from_utf8_lossy(&output.stderr); bail!("Snapshot command exited with status {status}: {stderr}"); } Ok(String::from_utf8_lossy(&output.stdout).into_owned()) } fn zsh_snapshot_script() -> &'static str { r##"print '# Snapshot file' print '# Unset all aliases to avoid conflicts with functions' print 'unalias -a 2>/dev/null || true' print '# Functions' functions print '' setopt_count=$(setopt | wc -l | tr -d ' ') print "# setopts $setopt_count" setopt | sed 's/^/setopt /' print '' alias_count=$(alias -L | wc -l | tr -d ' ') print "# aliases $alias_count" alias -L print '' export_count=$(export -p | wc -l | tr -d ' ') print "# exports $export_count" export -p "## } fn bash_snapshot_script() -> &'static str { r##"echo '# Snapshot file' echo '# Unset all aliases to avoid conflicts with functions' unalias -a 2>/dev/null || true echo '# Functions' declare -f echo '' bash_opts=$(set -o | awk '$2=="on"{print $1}') bash_opt_count=$(printf '%s\n' "$bash_opts" | sed '/^$/d' | wc -l | tr -d ' ') echo "# setopts $bash_opt_count" if [ -n "$bash_opts" ]; then printf 'set -o %s\n' $bash_opts fi echo '' alias_count=$(alias -p | wc -l | tr -d ' ') echo "# aliases $alias_count" alias -p echo '' export_count=$(export -p | wc -l | tr -d ' ') echo "# exports $export_count" export -p "## } fn sh_snapshot_script() -> &'static str { r##"echo '# Snapshot file' echo '# Unset all aliases to avoid conflicts with functions' unalias -a 2>/dev/null || true echo '# Functions' if command -v typeset >/dev/null 2>&1; then typeset -f elif command -v declare >/dev/null 2>&1; then declare -f fi echo '' if set -o >/dev/null 2>&1; then sh_opts=$(set -o | awk '$2=="on"{print $1}') sh_opt_count=$(printf '%s\n' "$sh_opts" | sed '/^$/d' | wc -l | tr -d ' ') echo "# setopts $sh_opt_count" if [ -n "$sh_opts" ]; then printf 'set -o %s\n' $sh_opts fi else echo '# setopts 0' fi echo '' if alias >/dev/null 2>&1; then alias_count=$(alias | wc -l | tr -d ' ') echo "# aliases $alias_count" alias echo '' else echo '# aliases 0' fi if export -p >/dev/null 2>&1; then export_count=$(export -p | wc -l | tr -d ' ') echo "# exports $export_count" export -p else export_count=$(env | wc -l | tr -d ' ') echo "# exports $export_count" env | sort | while IFS='=' read -r key value; do escaped=$(printf "%s" "$value" | sed "s/'/'\"'\"'/g") printf "export %s='%s'\n" "$key" "$escaped" done fi "## } fn powershell_snapshot_script() -> &'static str { r##"$ErrorActionPreference = 'Stop' Write-Output '# Snapshot file' Write-Output '# Unset all aliases to avoid conflicts with functions' Write-Output 'Remove-Item Alias:* -ErrorAction SilentlyContinue' Write-Output '# Functions' Get-ChildItem Function: | ForEach-Object { "function {0} {{`n{1}`n}}" -f $_.Name, $_.Definition } Write-Output '' $aliases = Get-Alias Write-Output ("# aliases " + $aliases.Count) $aliases | ForEach-Object { "Set-Alias -Name {0} -Value {1}" -f $_.Name, $_.Definition } Write-Output '' $envVars = Get-ChildItem Env: Write-Output ("# exports " + $envVars.Count) $envVars | ForEach-Object { $escaped = $_.Value -replace "'", "''" "`$env:{0}='{1}'" -f $_.Name, $escaped } "## } #[cfg(test)] mod tests { use super::*; use pretty_assertions::assert_eq; #[cfg(target_os = "linux")] use std::os::unix::fs::PermissionsExt; #[cfg(target_os = "linux")] use std::process::Command as StdCommand; use tempfile::tempdir; #[cfg(not(target_os = "windows"))] fn assert_posix_snapshot_sections(snapshot: &str) { assert!(snapshot.contains("# Snapshot file")); assert!(snapshot.contains("aliases ")); assert!(snapshot.contains("exports ")); assert!( snapshot.contains("PATH"), "snapshot should capture a PATH export" ); assert!(snapshot.contains("setopts ")); } async fn get_snapshot(shell_type: ShellType) -> Result<String> { let dir = tempdir()?; let path = dir.path().join("snapshot.sh"); write_shell_snapshot(shell_type, &path).await?; let content = fs::read_to_string(&path).await?; Ok(content) } #[test] fn strip_snapshot_preamble_removes_leading_output() { let snapshot = "noise\n# Snapshot file\nexport PATH=/bin\n"; let cleaned = strip_snapshot_preamble(snapshot).expect("snapshot marker exists"); assert_eq!(cleaned, "# Snapshot file\nexport PATH=/bin\n"); } #[test] fn strip_snapshot_preamble_requires_marker() { let result = strip_snapshot_preamble("missing header"); assert!(result.is_err()); } #[cfg(unix)] #[tokio::test] async fn try_new_creates_and_deletes_snapshot_file() -> Result<()> { let dir = tempdir()?; let shell = Shell { shell_type: ShellType::Bash, shell_path: PathBuf::from("/bin/bash"), shell_snapshot: None, }; let snapshot = ShellSnapshot::try_new(dir.path(), &shell) .await .expect("snapshot should be created"); let path = snapshot.path.clone(); assert!(path.exists()); drop(snapshot); assert!(!path.exists()); Ok(()) } #[cfg(target_os = "linux")] #[tokio::test] async fn timed_out_snapshot_shell_is_terminated() -> Result<()> { use std::process::Stdio; use tokio::time::Duration as TokioDuration; use tokio::time::Instant; use tokio::time::sleep; let dir = tempdir()?; let shell_path = dir.path().join("hanging-shell.sh"); let pid_path = dir.path().join("pid"); let script = format!( "#!/bin/sh\n\ echo $$ > {}\n\ sleep 30\n", pid_path.display() ); fs::write(&shell_path, script).await?; let mut permissions = std::fs::metadata(&shell_path)?.permissions(); permissions.set_mode(0o755); std::fs::set_permissions(&shell_path, permissions)?; let shell = Shell { shell_type: ShellType::Sh, shell_path, shell_snapshot: None, }; let err = run_shell_script_with_timeout(&shell, "ignored", Duration::from_millis(500)) .await .expect_err("snapshot shell should time out"); assert!( err.to_string().contains("timed out"), "expected timeout error, got {err:?}" ); let pid = fs::read_to_string(&pid_path) .await .expect("snapshot shell writes its pid before timing out") .trim() .parse::<i32>()?; let deadline = Instant::now() + TokioDuration::from_secs(1); loop { let kill_status = StdCommand::new("kill") .arg("-0") .arg(pid.to_string()) .stderr(Stdio::null()) .stdout(Stdio::null()) .status()?; if !kill_status.success() { break; } if Instant::now() >= deadline { panic!("timed out snapshot shell is still alive after grace period"); } sleep(TokioDuration::from_millis(50)).await; } Ok(()) } #[cfg(target_os = "macos")] #[tokio::test] async fn macos_zsh_snapshot_includes_sections() -> Result<()> { let snapshot = get_snapshot(ShellType::Zsh).await?; assert_posix_snapshot_sections(&snapshot); Ok(()) } #[cfg(target_os = "linux")] #[tokio::test] async fn linux_bash_snapshot_includes_sections() -> Result<()> { let snapshot = get_snapshot(ShellType::Bash).await?; assert_posix_snapshot_sections(&snapshot); Ok(()) } #[cfg(target_os = "linux")] #[tokio::test] async fn linux_sh_snapshot_includes_sections() -> Result<()> { let snapshot = get_snapshot(ShellType::Sh).await?; assert_posix_snapshot_sections(&snapshot); Ok(()) } #[cfg(target_os = "windows")] #[ignore] #[tokio::test] async fn windows_powershell_snapshot_includes_sections() -> Result<()> { let snapshot = get_snapshot(ShellType::PowerShell).await?; assert!(snapshot.contains("# Snapshot file")); assert!(snapshot.contains("aliases ")); assert!(snapshot.contains("exports ")); Ok(()) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/user_shell_command.rs
codex-rs/core/src/user_shell_command.rs
use std::time::Duration; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseItem; use crate::codex::TurnContext; use crate::exec::ExecToolCallOutput; use crate::tools::format_exec_output_str; pub const USER_SHELL_COMMAND_OPEN: &str = "<user_shell_command>"; pub const USER_SHELL_COMMAND_CLOSE: &str = "</user_shell_command>"; pub fn is_user_shell_command_text(text: &str) -> bool { let trimmed = text.trim_start(); let lowered = trimmed.to_ascii_lowercase(); lowered.starts_with(USER_SHELL_COMMAND_OPEN) } fn format_duration_line(duration: Duration) -> String { let duration_seconds = duration.as_secs_f64(); format!("Duration: {duration_seconds:.4} seconds") } fn format_user_shell_command_body( command: &str, exec_output: &ExecToolCallOutput, turn_context: &TurnContext, ) -> String { let mut sections = Vec::new(); sections.push("<command>".to_string()); sections.push(command.to_string()); sections.push("</command>".to_string()); sections.push("<result>".to_string()); sections.push(format!("Exit code: {}", exec_output.exit_code)); sections.push(format_duration_line(exec_output.duration)); sections.push("Output:".to_string()); sections.push(format_exec_output_str( exec_output, turn_context.truncation_policy, )); sections.push("</result>".to_string()); sections.join("\n") } pub fn format_user_shell_command_record( command: &str, exec_output: &ExecToolCallOutput, turn_context: &TurnContext, ) -> String { let body = format_user_shell_command_body(command, exec_output, turn_context); format!("{USER_SHELL_COMMAND_OPEN}\n{body}\n{USER_SHELL_COMMAND_CLOSE}") } pub fn user_shell_command_record_item( command: &str, exec_output: &ExecToolCallOutput, turn_context: &TurnContext, ) -> ResponseItem { ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: format_user_shell_command_record(command, exec_output, turn_context), }], } } #[cfg(test)] mod tests { use super::*; use crate::codex::make_session_and_context; use crate::exec::StreamOutput; use pretty_assertions::assert_eq; #[test] fn detects_user_shell_command_text_variants() { assert!(is_user_shell_command_text( "<user_shell_command>\necho hi\n</user_shell_command>" )); assert!(!is_user_shell_command_text("echo hi")); } #[tokio::test] async fn formats_basic_record() { let exec_output = ExecToolCallOutput { exit_code: 0, stdout: StreamOutput::new("hi".to_string()), stderr: StreamOutput::new(String::new()), aggregated_output: StreamOutput::new("hi".to_string()), duration: Duration::from_secs(1), timed_out: false, }; let (_, turn_context) = make_session_and_context().await; let item = user_shell_command_record_item("echo hi", &exec_output, &turn_context); let ResponseItem::Message { content, .. } = item else { panic!("expected message"); }; let [ContentItem::InputText { text }] = content.as_slice() else { panic!("expected input text"); }; assert_eq!( text, "<user_shell_command>\n<command>\necho hi\n</command>\n<result>\nExit code: 0\nDuration: 1.0000 seconds\nOutput:\nhi\n</result>\n</user_shell_command>" ); } #[tokio::test] async fn uses_aggregated_output_over_streams() { let exec_output = ExecToolCallOutput { exit_code: 42, stdout: StreamOutput::new("stdout-only".to_string()), stderr: StreamOutput::new("stderr-only".to_string()), aggregated_output: StreamOutput::new("combined output wins".to_string()), duration: Duration::from_millis(120), timed_out: false, }; let (_, turn_context) = make_session_and_context().await; let record = format_user_shell_command_record("false", &exec_output, &turn_context); assert_eq!( record, "<user_shell_command>\n<command>\nfalse\n</command>\n<result>\nExit code: 42\nDuration: 0.1200 seconds\nOutput:\ncombined output wins\n</result>\n</user_shell_command>" ); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/compact.rs
codex-rs/core/src/compact.rs
use std::sync::Arc; use crate::ModelProviderInfo; use crate::Prompt; use crate::client_common::ResponseEvent; use crate::codex::Session; use crate::codex::TurnContext; use crate::codex::get_last_assistant_message_from_turn; use crate::error::CodexErr; use crate::error::Result as CodexResult; use crate::features::Feature; use crate::protocol::CompactedItem; use crate::protocol::ContextCompactedEvent; use crate::protocol::EventMsg; use crate::protocol::TaskStartedEvent; use crate::protocol::TurnContextItem; use crate::protocol::WarningEvent; use crate::truncate::TruncationPolicy; use crate::truncate::approx_token_count; use crate::truncate::truncate_text; use crate::util::backoff; use codex_protocol::items::TurnItem; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseInputItem; use codex_protocol::models::ResponseItem; use codex_protocol::protocol::RolloutItem; use codex_protocol::user_input::UserInput; use futures::prelude::*; use tracing::error; pub const SUMMARIZATION_PROMPT: &str = include_str!("../templates/compact/prompt.md"); pub const SUMMARY_PREFIX: &str = include_str!("../templates/compact/summary_prefix.md"); const COMPACT_USER_MESSAGE_MAX_TOKENS: usize = 20_000; pub(crate) fn should_use_remote_compact_task( session: &Session, provider: &ModelProviderInfo, ) -> bool { provider.is_openai() && session.enabled(Feature::RemoteCompaction) } pub(crate) async fn run_inline_auto_compact_task( sess: Arc<Session>, turn_context: Arc<TurnContext>, ) { let prompt = turn_context.compact_prompt().to_string(); let input = vec![UserInput::Text { text: prompt }]; run_compact_task_inner(sess, turn_context, input).await; } pub(crate) async fn run_compact_task( sess: Arc<Session>, turn_context: Arc<TurnContext>, input: Vec<UserInput>, ) { let start_event = EventMsg::TaskStarted(TaskStartedEvent { model_context_window: turn_context.client.get_model_context_window(), }); sess.send_event(&turn_context, start_event).await; run_compact_task_inner(sess.clone(), turn_context, input).await; } async fn run_compact_task_inner( sess: Arc<Session>, turn_context: Arc<TurnContext>, input: Vec<UserInput>, ) { let initial_input_for_turn: ResponseInputItem = ResponseInputItem::from(input); let mut history = sess.clone_history().await; history.record_items( &[initial_input_for_turn.into()], turn_context.truncation_policy, ); let mut truncated_count = 0usize; let max_retries = turn_context.client.get_provider().stream_max_retries(); let mut retries = 0; let rollout_item = RolloutItem::TurnContext(TurnContextItem { cwd: turn_context.cwd.clone(), approval_policy: turn_context.approval_policy, sandbox_policy: turn_context.sandbox_policy.clone(), model: turn_context.client.get_model(), effort: turn_context.client.get_reasoning_effort(), summary: turn_context.client.get_reasoning_summary(), base_instructions: turn_context.base_instructions.clone(), user_instructions: turn_context.user_instructions.clone(), developer_instructions: turn_context.developer_instructions.clone(), final_output_json_schema: turn_context.final_output_json_schema.clone(), truncation_policy: Some(turn_context.truncation_policy.into()), }); sess.persist_rollout_items(&[rollout_item]).await; loop { let turn_input = history.get_history_for_prompt(); let prompt = Prompt { input: turn_input.clone(), ..Default::default() }; let attempt_result = drain_to_completed(&sess, turn_context.as_ref(), &prompt).await; match attempt_result { Ok(()) => { if truncated_count > 0 { sess.notify_background_event( turn_context.as_ref(), format!( "Trimmed {truncated_count} older conversation item(s) before compacting so the prompt fits the model context window." ), ) .await; } break; } Err(CodexErr::Interrupted) => { return; } Err(e @ CodexErr::ContextWindowExceeded) => { if turn_input.len() > 1 { // Trim from the beginning to preserve cache (prefix-based) and keep recent messages intact. error!( "Context window exceeded while compacting; removing oldest history item. Error: {e}" ); history.remove_first_item(); truncated_count += 1; retries = 0; continue; } sess.set_total_tokens_full(turn_context.as_ref()).await; let event = EventMsg::Error(e.to_error_event(None)); sess.send_event(&turn_context, event).await; return; } Err(e) => { if retries < max_retries { retries += 1; let delay = backoff(retries); sess.notify_stream_error( turn_context.as_ref(), format!("Reconnecting... {retries}/{max_retries}"), e, ) .await; tokio::time::sleep(delay).await; continue; } else { let event = EventMsg::Error(e.to_error_event(None)); sess.send_event(&turn_context, event).await; return; } } } } let history_snapshot = sess.clone_history().await.get_history(); let summary_suffix = get_last_assistant_message_from_turn(&history_snapshot).unwrap_or_default(); let summary_text = format!("{SUMMARY_PREFIX}\n{summary_suffix}"); let user_messages = collect_user_messages(&history_snapshot); let initial_context = sess.build_initial_context(turn_context.as_ref()); let mut new_history = build_compacted_history(initial_context, &user_messages, &summary_text); let ghost_snapshots: Vec<ResponseItem> = history_snapshot .iter() .filter(|item| matches!(item, ResponseItem::GhostSnapshot { .. })) .cloned() .collect(); new_history.extend(ghost_snapshots); sess.replace_history(new_history).await; sess.recompute_token_usage(&turn_context).await; let rollout_item = RolloutItem::Compacted(CompactedItem { message: summary_text.clone(), replacement_history: None, }); sess.persist_rollout_items(&[rollout_item]).await; let event = EventMsg::ContextCompacted(ContextCompactedEvent {}); sess.send_event(&turn_context, event).await; let warning = EventMsg::Warning(WarningEvent { message: "Heads up: Long conversations and multiple compactions can cause the model to be less accurate. Start a new conversation when possible to keep conversations small and targeted.".to_string(), }); sess.send_event(&turn_context, warning).await; } pub fn content_items_to_text(content: &[ContentItem]) -> Option<String> { let mut pieces = Vec::new(); for item in content { match item { ContentItem::InputText { text } | ContentItem::OutputText { text } => { if !text.is_empty() { pieces.push(text.as_str()); } } ContentItem::InputImage { .. } => {} } } if pieces.is_empty() { None } else { Some(pieces.join("\n")) } } pub(crate) fn collect_user_messages(items: &[ResponseItem]) -> Vec<String> { items .iter() .filter_map(|item| match crate::event_mapping::parse_turn_item(item) { Some(TurnItem::UserMessage(user)) => { if is_summary_message(&user.message()) { None } else { Some(user.message()) } } _ => None, }) .collect() } pub(crate) fn is_summary_message(message: &str) -> bool { message.starts_with(format!("{SUMMARY_PREFIX}\n").as_str()) } pub(crate) fn build_compacted_history( initial_context: Vec<ResponseItem>, user_messages: &[String], summary_text: &str, ) -> Vec<ResponseItem> { build_compacted_history_with_limit( initial_context, user_messages, summary_text, COMPACT_USER_MESSAGE_MAX_TOKENS, ) } fn build_compacted_history_with_limit( mut history: Vec<ResponseItem>, user_messages: &[String], summary_text: &str, max_tokens: usize, ) -> Vec<ResponseItem> { let mut selected_messages: Vec<String> = Vec::new(); if max_tokens > 0 { let mut remaining = max_tokens; for message in user_messages.iter().rev() { if remaining == 0 { break; } let tokens = approx_token_count(message); if tokens <= remaining { selected_messages.push(message.clone()); remaining = remaining.saturating_sub(tokens); } else { let truncated = truncate_text(message, TruncationPolicy::Tokens(remaining)); selected_messages.push(truncated); break; } } selected_messages.reverse(); } for message in &selected_messages { history.push(ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: message.clone(), }], }); } let summary_text = if summary_text.is_empty() { "(no summary available)".to_string() } else { summary_text.to_string() }; history.push(ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: summary_text }], }); history } async fn drain_to_completed( sess: &Session, turn_context: &TurnContext, prompt: &Prompt, ) -> CodexResult<()> { let mut stream = turn_context.client.clone().stream(prompt).await?; loop { let maybe_event = stream.next().await; let Some(event) = maybe_event else { return Err(CodexErr::Stream( "stream closed before response.completed".into(), None, )); }; match event { Ok(ResponseEvent::OutputItemDone(item)) => { sess.record_into_history(std::slice::from_ref(&item), turn_context) .await; } Ok(ResponseEvent::RateLimits(snapshot)) => { sess.update_rate_limits(turn_context, snapshot).await; } Ok(ResponseEvent::Completed { token_usage, .. }) => { sess.update_token_usage_info(turn_context, token_usage.as_ref()) .await; return Ok(()); } Ok(_) => continue, Err(e) => return Err(e), } } } #[cfg(test)] mod tests { use super::*; use pretty_assertions::assert_eq; #[test] fn content_items_to_text_joins_non_empty_segments() { let items = vec![ ContentItem::InputText { text: "hello".to_string(), }, ContentItem::OutputText { text: String::new(), }, ContentItem::OutputText { text: "world".to_string(), }, ]; let joined = content_items_to_text(&items); assert_eq!(Some("hello\nworld".to_string()), joined); } #[test] fn content_items_to_text_ignores_image_only_content() { let items = vec![ContentItem::InputImage { image_url: "file://image.png".to_string(), }]; let joined = content_items_to_text(&items); assert_eq!(None, joined); } #[test] fn collect_user_messages_extracts_user_text_only() { let items = vec![ ResponseItem::Message { id: Some("assistant".to_string()), role: "assistant".to_string(), content: vec![ContentItem::OutputText { text: "ignored".to_string(), }], }, ResponseItem::Message { id: Some("user".to_string()), role: "user".to_string(), content: vec![ContentItem::InputText { text: "first".to_string(), }], }, ResponseItem::Other, ]; let collected = collect_user_messages(&items); assert_eq!(vec!["first".to_string()], collected); } #[test] fn collect_user_messages_filters_session_prefix_entries() { let items = vec![ ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: "# AGENTS.md instructions for project\n\n<INSTRUCTIONS>\ndo things\n</INSTRUCTIONS>" .to_string(), }], }, ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: "<ENVIRONMENT_CONTEXT>cwd=/tmp</ENVIRONMENT_CONTEXT>".to_string(), }], }, ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: "real user message".to_string(), }], }, ]; let collected = collect_user_messages(&items); assert_eq!(vec!["real user message".to_string()], collected); } #[test] fn build_token_limited_compacted_history_truncates_overlong_user_messages() { // Use a small truncation limit so the test remains fast while still validating // that oversized user content is truncated. let max_tokens = 16; let big = "word ".repeat(200); let history = super::build_compacted_history_with_limit( Vec::new(), std::slice::from_ref(&big), "SUMMARY", max_tokens, ); assert_eq!(history.len(), 2); let truncated_message = &history[0]; let summary_message = &history[1]; let truncated_text = match truncated_message { ResponseItem::Message { role, content, .. } if role == "user" => { content_items_to_text(content).unwrap_or_default() } other => panic!("unexpected item in history: {other:?}"), }; assert!( truncated_text.contains("tokens truncated"), "expected truncation marker in truncated user message" ); assert!( !truncated_text.contains(&big), "truncated user message should not include the full oversized user text" ); let summary_text = match summary_message { ResponseItem::Message { role, content, .. } if role == "user" => { content_items_to_text(content).unwrap_or_default() } other => panic!("unexpected item in history: {other:?}"), }; assert_eq!(summary_text, "SUMMARY"); } #[test] fn build_token_limited_compacted_history_appends_summary_message() { let initial_context: Vec<ResponseItem> = Vec::new(); let user_messages = vec!["first user message".to_string()]; let summary_text = "summary text"; let history = build_compacted_history(initial_context, &user_messages, summary_text); assert!( !history.is_empty(), "expected compacted history to include summary" ); let last = history.last().expect("history should have a summary entry"); let summary = match last { ResponseItem::Message { role, content, .. } if role == "user" => { content_items_to_text(content).unwrap_or_default() } other => panic!("expected summary message, found {other:?}"), }; assert_eq!(summary, summary_text); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/exec_policy.rs
codex-rs/core/src/exec_policy.rs
use std::io::ErrorKind; use std::path::Path; use std::path::PathBuf; use std::sync::Arc; use arc_swap::ArcSwap; use crate::command_safety::is_dangerous_command::requires_initial_appoval; use crate::config_loader::ConfigLayerStack; use crate::config_loader::ConfigLayerStackOrdering; use codex_execpolicy::AmendError; use codex_execpolicy::Decision; use codex_execpolicy::Error as ExecPolicyRuleError; use codex_execpolicy::Evaluation; use codex_execpolicy::Policy; use codex_execpolicy::PolicyParser; use codex_execpolicy::RuleMatch; use codex_execpolicy::blocking_append_allow_prefix_rule; use codex_protocol::approvals::ExecPolicyAmendment; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::SandboxPolicy; use thiserror::Error; use tokio::fs; use tokio::task::spawn_blocking; use crate::bash::parse_shell_lc_plain_commands; use crate::features::Feature; use crate::features::Features; use crate::sandboxing::SandboxPermissions; use crate::tools::sandboxing::ExecApprovalRequirement; const FORBIDDEN_REASON: &str = "execpolicy forbids this command"; const PROMPT_CONFLICT_REASON: &str = "execpolicy requires approval for this command, but AskForApproval is set to Never"; const PROMPT_REASON: &str = "execpolicy requires approval for this command"; const RULES_DIR_NAME: &str = "rules"; const RULE_EXTENSION: &str = "rules"; const DEFAULT_POLICY_FILE: &str = "default.rules"; fn is_policy_match(rule_match: &RuleMatch) -> bool { match rule_match { RuleMatch::PrefixRuleMatch { .. } => true, RuleMatch::HeuristicsRuleMatch { .. } => false, } } #[derive(Debug, Error)] pub enum ExecPolicyError { #[error("failed to read execpolicy files from {dir}: {source}")] ReadDir { dir: PathBuf, source: std::io::Error, }, #[error("failed to read execpolicy file {path}: {source}")] ReadFile { path: PathBuf, source: std::io::Error, }, #[error("failed to parse execpolicy file {path}: {source}")] ParsePolicy { path: String, source: codex_execpolicy::Error, }, } #[derive(Debug, Error)] pub enum ExecPolicyUpdateError { #[error("failed to update execpolicy file {path}: {source}")] AppendRule { path: PathBuf, source: AmendError }, #[error("failed to join blocking execpolicy update task: {source}")] JoinBlockingTask { source: tokio::task::JoinError }, #[error("failed to update in-memory execpolicy: {source}")] AddRule { #[from] source: ExecPolicyRuleError, }, #[error("cannot append execpolicy rule because execpolicy feature is disabled")] FeatureDisabled, } pub(crate) struct ExecPolicyManager { policy: ArcSwap<Policy>, } impl ExecPolicyManager { pub(crate) fn new(policy: Arc<Policy>) -> Self { Self { policy: ArcSwap::from(policy), } } pub(crate) async fn load( features: &Features, config_stack: &ConfigLayerStack, ) -> Result<Self, ExecPolicyError> { let policy = load_exec_policy_for_features(features, config_stack).await?; Ok(Self::new(Arc::new(policy))) } pub(crate) fn current(&self) -> Arc<Policy> { self.policy.load_full() } pub(crate) async fn create_exec_approval_requirement_for_command( &self, features: &Features, command: &[String], approval_policy: AskForApproval, sandbox_policy: &SandboxPolicy, sandbox_permissions: SandboxPermissions, ) -> ExecApprovalRequirement { let exec_policy = self.current(); let commands = parse_shell_lc_plain_commands(command).unwrap_or_else(|| vec![command.to_vec()]); let heuristics_fallback = |cmd: &[String]| { if requires_initial_appoval(approval_policy, sandbox_policy, cmd, sandbox_permissions) { Decision::Prompt } else { Decision::Allow } }; let evaluation = exec_policy.check_multiple(commands.iter(), &heuristics_fallback); match evaluation.decision { Decision::Forbidden => ExecApprovalRequirement::Forbidden { reason: FORBIDDEN_REASON.to_string(), }, Decision::Prompt => { if matches!(approval_policy, AskForApproval::Never) { ExecApprovalRequirement::Forbidden { reason: PROMPT_CONFLICT_REASON.to_string(), } } else { ExecApprovalRequirement::NeedsApproval { reason: derive_prompt_reason(&evaluation), proposed_execpolicy_amendment: if features.enabled(Feature::ExecPolicy) { try_derive_execpolicy_amendment_for_prompt_rules( &evaluation.matched_rules, ) } else { None }, } } } Decision::Allow => ExecApprovalRequirement::Skip { // Bypass sandbox if execpolicy allows the command bypass_sandbox: evaluation.matched_rules.iter().any(|rule_match| { is_policy_match(rule_match) && rule_match.decision() == Decision::Allow }), proposed_execpolicy_amendment: if features.enabled(Feature::ExecPolicy) { try_derive_execpolicy_amendment_for_allow_rules(&evaluation.matched_rules) } else { None }, }, } } pub(crate) async fn append_amendment_and_update( &self, codex_home: &Path, amendment: &ExecPolicyAmendment, ) -> Result<(), ExecPolicyUpdateError> { let policy_path = default_policy_path(codex_home); let prefix = amendment.command.clone(); spawn_blocking({ let policy_path = policy_path.clone(); let prefix = prefix.clone(); move || blocking_append_allow_prefix_rule(&policy_path, &prefix) }) .await .map_err(|source| ExecPolicyUpdateError::JoinBlockingTask { source })? .map_err(|source| ExecPolicyUpdateError::AppendRule { path: policy_path, source, })?; let mut updated_policy = self.current().as_ref().clone(); updated_policy.add_prefix_rule(&prefix, Decision::Allow)?; self.policy.store(Arc::new(updated_policy)); Ok(()) } } impl Default for ExecPolicyManager { fn default() -> Self { Self::new(Arc::new(Policy::empty())) } } async fn load_exec_policy_for_features( features: &Features, config_stack: &ConfigLayerStack, ) -> Result<Policy, ExecPolicyError> { if !features.enabled(Feature::ExecPolicy) { Ok(Policy::empty()) } else { load_exec_policy(config_stack).await } } pub async fn load_exec_policy(config_stack: &ConfigLayerStack) -> Result<Policy, ExecPolicyError> { // Iterate the layers in increasing order of precedence, adding the *.rules // from each layer, so that higher-precedence layers can override // rules defined in lower-precedence ones. let mut policy_paths = Vec::new(); for layer in config_stack.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst) { if let Some(config_folder) = layer.config_folder() { #[expect(clippy::expect_used)] let policy_dir = config_folder.join(RULES_DIR_NAME).expect("safe join"); let layer_policy_paths = collect_policy_files(&policy_dir).await?; policy_paths.extend(layer_policy_paths); } } let mut parser = PolicyParser::new(); for policy_path in &policy_paths { let contents = fs::read_to_string(policy_path) .await .map_err(|source| ExecPolicyError::ReadFile { path: policy_path.clone(), source, })?; let identifier = policy_path.to_string_lossy().to_string(); parser .parse(&identifier, &contents) .map_err(|source| ExecPolicyError::ParsePolicy { path: identifier, source, })?; } let policy = parser.build(); tracing::debug!("loaded execpolicy from {} files", policy_paths.len()); Ok(policy) } fn default_policy_path(codex_home: &Path) -> PathBuf { codex_home.join(RULES_DIR_NAME).join(DEFAULT_POLICY_FILE) } /// Derive a proposed execpolicy amendment when a command requires user approval /// - If any execpolicy rule prompts, return None, because an amendment would not skip that policy requirement. /// - Otherwise return the first heuristics Prompt. /// - Examples: /// - execpolicy: empty. Command: `["python"]`. Heuristics prompt -> `Some(vec!["python"])`. /// - execpolicy: empty. Command: `["bash", "-c", "cd /some/folder && prog1 --option1 arg1 && prog2 --option2 arg2"]`. /// Parsed commands include `cd /some/folder`, `prog1 --option1 arg1`, and `prog2 --option2 arg2`. If heuristics allow `cd` but prompt /// on `prog1`, we return `Some(vec!["prog1", "--option1", "arg1"])`. /// - execpolicy: contains a `prompt for prefix ["prog2"]` rule. For the same command as above, /// we return `None` because an execpolicy prompt still applies even if we amend execpolicy to allow ["prog1", "--option1", "arg1"]. fn try_derive_execpolicy_amendment_for_prompt_rules( matched_rules: &[RuleMatch], ) -> Option<ExecPolicyAmendment> { if matched_rules .iter() .any(|rule_match| is_policy_match(rule_match) && rule_match.decision() == Decision::Prompt) { return None; } matched_rules .iter() .find_map(|rule_match| match rule_match { RuleMatch::HeuristicsRuleMatch { command, decision: Decision::Prompt, } => Some(ExecPolicyAmendment::from(command.clone())), _ => None, }) } /// - Note: we only use this amendment when the command fails to run in sandbox and codex prompts the user to run outside the sandbox /// - The purpose of this amendment is to bypass sandbox for similar commands in the future /// - If any execpolicy rule matches, return None, because we would already be running command outside the sandbox fn try_derive_execpolicy_amendment_for_allow_rules( matched_rules: &[RuleMatch], ) -> Option<ExecPolicyAmendment> { if matched_rules.iter().any(is_policy_match) { return None; } matched_rules .iter() .find_map(|rule_match| match rule_match { RuleMatch::HeuristicsRuleMatch { command, decision: Decision::Allow, } => Some(ExecPolicyAmendment::from(command.clone())), _ => None, }) } /// Only return PROMPT_REASON when an execpolicy rule drove the prompt decision. fn derive_prompt_reason(evaluation: &Evaluation) -> Option<String> { evaluation.matched_rules.iter().find_map(|rule_match| { if is_policy_match(rule_match) && rule_match.decision() == Decision::Prompt { Some(PROMPT_REASON.to_string()) } else { None } }) } async fn collect_policy_files(dir: impl AsRef<Path>) -> Result<Vec<PathBuf>, ExecPolicyError> { let dir = dir.as_ref(); let mut read_dir = match fs::read_dir(dir).await { Ok(read_dir) => read_dir, Err(err) if err.kind() == ErrorKind::NotFound => return Ok(Vec::new()), Err(source) => { return Err(ExecPolicyError::ReadDir { dir: dir.to_path_buf(), source, }); } }; let mut policy_paths = Vec::new(); while let Some(entry) = read_dir .next_entry() .await .map_err(|source| ExecPolicyError::ReadDir { dir: dir.to_path_buf(), source, })? { let path = entry.path(); let file_type = entry .file_type() .await .map_err(|source| ExecPolicyError::ReadDir { dir: dir.to_path_buf(), source, })?; if path .extension() .and_then(|ext| ext.to_str()) .is_some_and(|ext| ext == RULE_EXTENSION) && file_type.is_file() { policy_paths.push(path); } } policy_paths.sort(); tracing::debug!( "loaded {} .rules files in {}", policy_paths.len(), dir.display() ); Ok(policy_paths) } #[cfg(test)] mod tests { use super::*; use crate::config_loader::ConfigLayerEntry; use crate::config_loader::ConfigLayerStack; use crate::config_loader::ConfigRequirements; use crate::features::Feature; use crate::features::Features; use codex_app_server_protocol::ConfigLayerSource; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::SandboxPolicy; use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use std::fs; use std::path::Path; use std::sync::Arc; use tempfile::tempdir; use toml::Value as TomlValue; fn config_stack_for_dot_codex_folder(dot_codex_folder: &Path) -> ConfigLayerStack { let dot_codex_folder = AbsolutePathBuf::from_absolute_path(dot_codex_folder) .expect("absolute dot_codex_folder"); let layer = ConfigLayerEntry::new( ConfigLayerSource::Project { dot_codex_folder }, TomlValue::Table(Default::default()), ); ConfigLayerStack::new(vec![layer], ConfigRequirements::default()).expect("ConfigLayerStack") } #[tokio::test] async fn returns_empty_policy_when_feature_disabled() { let mut features = Features::with_defaults(); features.disable(Feature::ExecPolicy); let temp_dir = tempdir().expect("create temp dir"); let config_stack = config_stack_for_dot_codex_folder(temp_dir.path()); let manager = ExecPolicyManager::load(&features, &config_stack) .await .expect("manager result"); let policy = manager.current(); let commands = [vec!["rm".to_string()]]; assert_eq!( Evaluation { decision: Decision::Allow, matched_rules: vec![RuleMatch::HeuristicsRuleMatch { command: vec!["rm".to_string()], decision: Decision::Allow }], }, policy.check_multiple(commands.iter(), &|_| Decision::Allow) ); assert!(!temp_dir.path().join(RULES_DIR_NAME).exists()); } #[tokio::test] async fn collect_policy_files_returns_empty_when_dir_missing() { let temp_dir = tempdir().expect("create temp dir"); let policy_dir = temp_dir.path().join(RULES_DIR_NAME); let files = collect_policy_files(&policy_dir) .await .expect("collect policy files"); assert!(files.is_empty()); } #[tokio::test] async fn loads_policies_from_policy_subdirectory() { let temp_dir = tempdir().expect("create temp dir"); let config_stack = config_stack_for_dot_codex_folder(temp_dir.path()); let policy_dir = temp_dir.path().join(RULES_DIR_NAME); fs::create_dir_all(&policy_dir).expect("create policy dir"); fs::write( policy_dir.join("deny.rules"), r#"prefix_rule(pattern=["rm"], decision="forbidden")"#, ) .expect("write policy file"); let policy = load_exec_policy(&config_stack) .await .expect("policy result"); let command = [vec!["rm".to_string()]]; assert_eq!( Evaluation { decision: Decision::Forbidden, matched_rules: vec![RuleMatch::PrefixRuleMatch { matched_prefix: vec!["rm".to_string()], decision: Decision::Forbidden }], }, policy.check_multiple(command.iter(), &|_| Decision::Allow) ); } #[tokio::test] async fn ignores_policies_outside_policy_dir() { let temp_dir = tempdir().expect("create temp dir"); let config_stack = config_stack_for_dot_codex_folder(temp_dir.path()); fs::write( temp_dir.path().join("root.rules"), r#"prefix_rule(pattern=["ls"], decision="prompt")"#, ) .expect("write policy file"); let policy = load_exec_policy(&config_stack) .await .expect("policy result"); let command = [vec!["ls".to_string()]]; assert_eq!( Evaluation { decision: Decision::Allow, matched_rules: vec![RuleMatch::HeuristicsRuleMatch { command: vec!["ls".to_string()], decision: Decision::Allow }], }, policy.check_multiple(command.iter(), &|_| Decision::Allow) ); } #[tokio::test] async fn loads_policies_from_multiple_config_layers() -> anyhow::Result<()> { let user_dir = tempdir()?; let project_dir = tempdir()?; let user_policy_dir = user_dir.path().join(RULES_DIR_NAME); fs::create_dir_all(&user_policy_dir)?; fs::write( user_policy_dir.join("user.rules"), r#"prefix_rule(pattern=["rm"], decision="forbidden")"#, )?; let project_policy_dir = project_dir.path().join(RULES_DIR_NAME); fs::create_dir_all(&project_policy_dir)?; fs::write( project_policy_dir.join("project.rules"), r#"prefix_rule(pattern=["ls"], decision="prompt")"#, )?; let user_config_toml = AbsolutePathBuf::from_absolute_path(user_dir.path().join("config.toml"))?; let project_dot_codex_folder = AbsolutePathBuf::from_absolute_path(project_dir.path())?; let layers = vec![ ConfigLayerEntry::new( ConfigLayerSource::User { file: user_config_toml, }, TomlValue::Table(Default::default()), ), ConfigLayerEntry::new( ConfigLayerSource::Project { dot_codex_folder: project_dot_codex_folder, }, TomlValue::Table(Default::default()), ), ]; let config_stack = ConfigLayerStack::new(layers, ConfigRequirements::default())?; let policy = load_exec_policy(&config_stack).await?; assert_eq!( Evaluation { decision: Decision::Forbidden, matched_rules: vec![RuleMatch::PrefixRuleMatch { matched_prefix: vec!["rm".to_string()], decision: Decision::Forbidden }], }, policy.check_multiple([vec!["rm".to_string()]].iter(), &|_| Decision::Allow) ); assert_eq!( Evaluation { decision: Decision::Prompt, matched_rules: vec![RuleMatch::PrefixRuleMatch { matched_prefix: vec!["ls".to_string()], decision: Decision::Prompt }], }, policy.check_multiple([vec!["ls".to_string()]].iter(), &|_| Decision::Allow) ); Ok(()) } #[tokio::test] async fn evaluates_bash_lc_inner_commands() { let policy_src = r#" prefix_rule(pattern=["rm"], decision="forbidden") "#; let mut parser = PolicyParser::new(); parser .parse("test.rules", policy_src) .expect("parse policy"); let policy = Arc::new(parser.build()); let forbidden_script = vec![ "bash".to_string(), "-lc".to_string(), "rm -rf /tmp".to_string(), ]; let manager = ExecPolicyManager::new(policy); let requirement = manager .create_exec_approval_requirement_for_command( &Features::with_defaults(), &forbidden_script, AskForApproval::OnRequest, &SandboxPolicy::DangerFullAccess, SandboxPermissions::UseDefault, ) .await; assert_eq!( requirement, ExecApprovalRequirement::Forbidden { reason: FORBIDDEN_REASON.to_string() } ); } #[tokio::test] async fn exec_approval_requirement_prefers_execpolicy_match() { let policy_src = r#"prefix_rule(pattern=["rm"], decision="prompt")"#; let mut parser = PolicyParser::new(); parser .parse("test.rules", policy_src) .expect("parse policy"); let policy = Arc::new(parser.build()); let command = vec!["rm".to_string()]; let manager = ExecPolicyManager::new(policy); let requirement = manager .create_exec_approval_requirement_for_command( &Features::with_defaults(), &command, AskForApproval::OnRequest, &SandboxPolicy::DangerFullAccess, SandboxPermissions::UseDefault, ) .await; assert_eq!( requirement, ExecApprovalRequirement::NeedsApproval { reason: Some(PROMPT_REASON.to_string()), proposed_execpolicy_amendment: None, } ); } #[tokio::test] async fn exec_approval_requirement_respects_approval_policy() { let policy_src = r#"prefix_rule(pattern=["rm"], decision="prompt")"#; let mut parser = PolicyParser::new(); parser .parse("test.rules", policy_src) .expect("parse policy"); let policy = Arc::new(parser.build()); let command = vec!["rm".to_string()]; let manager = ExecPolicyManager::new(policy); let requirement = manager .create_exec_approval_requirement_for_command( &Features::with_defaults(), &command, AskForApproval::Never, &SandboxPolicy::DangerFullAccess, SandboxPermissions::UseDefault, ) .await; assert_eq!( requirement, ExecApprovalRequirement::Forbidden { reason: PROMPT_CONFLICT_REASON.to_string() } ); } #[tokio::test] async fn exec_approval_requirement_falls_back_to_heuristics() { let command = vec!["cargo".to_string(), "build".to_string()]; let manager = ExecPolicyManager::default(); let requirement = manager .create_exec_approval_requirement_for_command( &Features::with_defaults(), &command, AskForApproval::UnlessTrusted, &SandboxPolicy::ReadOnly, SandboxPermissions::UseDefault, ) .await; assert_eq!( requirement, ExecApprovalRequirement::NeedsApproval { reason: None, proposed_execpolicy_amendment: Some(ExecPolicyAmendment::new(command)) } ); } #[tokio::test] async fn heuristics_apply_when_other_commands_match_policy() { let policy_src = r#"prefix_rule(pattern=["apple"], decision="allow")"#; let mut parser = PolicyParser::new(); parser .parse("test.rules", policy_src) .expect("parse policy"); let policy = Arc::new(parser.build()); let command = vec![ "bash".to_string(), "-lc".to_string(), "apple | orange".to_string(), ]; assert_eq!( ExecPolicyManager::new(policy) .create_exec_approval_requirement_for_command( &Features::with_defaults(), &command, AskForApproval::UnlessTrusted, &SandboxPolicy::DangerFullAccess, SandboxPermissions::UseDefault, ) .await, ExecApprovalRequirement::NeedsApproval { reason: None, proposed_execpolicy_amendment: Some(ExecPolicyAmendment::new(vec![ "orange".to_string() ])) } ); } #[tokio::test] async fn append_execpolicy_amendment_updates_policy_and_file() { let codex_home = tempdir().expect("create temp dir"); let prefix = vec!["echo".to_string(), "hello".to_string()]; let manager = ExecPolicyManager::default(); manager .append_amendment_and_update(codex_home.path(), &ExecPolicyAmendment::from(prefix)) .await .expect("update policy"); let updated_policy = manager.current(); let evaluation = updated_policy.check( &["echo".to_string(), "hello".to_string(), "world".to_string()], &|_| Decision::Allow, ); assert!(matches!( evaluation, Evaluation { decision: Decision::Allow, .. } )); let contents = fs::read_to_string(default_policy_path(codex_home.path())) .expect("policy file should have been created"); assert_eq!( contents, r#"prefix_rule(pattern=["echo", "hello"], decision="allow") "# ); } #[tokio::test] async fn append_execpolicy_amendment_rejects_empty_prefix() { let codex_home = tempdir().expect("create temp dir"); let manager = ExecPolicyManager::default(); let result = manager .append_amendment_and_update(codex_home.path(), &ExecPolicyAmendment::from(vec![])) .await; assert!(matches!( result, Err(ExecPolicyUpdateError::AppendRule { source: AmendError::EmptyPrefix, .. }) )); } #[tokio::test] async fn proposed_execpolicy_amendment_is_present_for_single_command_without_policy_match() { let command = vec!["cargo".to_string(), "build".to_string()]; let manager = ExecPolicyManager::default(); let requirement = manager .create_exec_approval_requirement_for_command( &Features::with_defaults(), &command, AskForApproval::UnlessTrusted, &SandboxPolicy::ReadOnly, SandboxPermissions::UseDefault, ) .await; assert_eq!( requirement, ExecApprovalRequirement::NeedsApproval { reason: None, proposed_execpolicy_amendment: Some(ExecPolicyAmendment::new(command)) } ); } #[tokio::test] async fn proposed_execpolicy_amendment_is_disabled_when_execpolicy_feature_disabled() { let command = vec!["cargo".to_string(), "build".to_string()]; let mut features = Features::with_defaults(); features.disable(Feature::ExecPolicy); let manager = ExecPolicyManager::default(); let requirement = manager .create_exec_approval_requirement_for_command( &features, &command, AskForApproval::UnlessTrusted, &SandboxPolicy::ReadOnly, SandboxPermissions::UseDefault, ) .await; assert_eq!( requirement, ExecApprovalRequirement::NeedsApproval { reason: None, proposed_execpolicy_amendment: None, } ); } #[tokio::test] async fn proposed_execpolicy_amendment_is_omitted_when_policy_prompts() { let policy_src = r#"prefix_rule(pattern=["rm"], decision="prompt")"#; let mut parser = PolicyParser::new(); parser .parse("test.rules", policy_src) .expect("parse policy"); let policy = Arc::new(parser.build()); let command = vec!["rm".to_string()]; let manager = ExecPolicyManager::new(policy); let requirement = manager .create_exec_approval_requirement_for_command( &Features::with_defaults(), &command, AskForApproval::OnRequest, &SandboxPolicy::DangerFullAccess, SandboxPermissions::UseDefault, ) .await; assert_eq!( requirement, ExecApprovalRequirement::NeedsApproval { reason: Some(PROMPT_REASON.to_string()), proposed_execpolicy_amendment: None, } ); } #[tokio::test] async fn proposed_execpolicy_amendment_is_present_for_multi_command_scripts() { let command = vec![ "bash".to_string(), "-lc".to_string(), "cargo build && echo ok".to_string(), ]; let manager = ExecPolicyManager::default(); let requirement = manager .create_exec_approval_requirement_for_command( &Features::with_defaults(), &command, AskForApproval::UnlessTrusted, &SandboxPolicy::ReadOnly, SandboxPermissions::UseDefault, ) .await; assert_eq!( requirement, ExecApprovalRequirement::NeedsApproval { reason: None, proposed_execpolicy_amendment: Some(ExecPolicyAmendment::new(vec![ "cargo".to_string(), "build".to_string() ])), } ); } #[tokio::test] async fn proposed_execpolicy_amendment_uses_first_no_match_in_multi_command_scripts() { let policy_src = r#"prefix_rule(pattern=["cat"], decision="allow")"#; let mut parser = PolicyParser::new(); parser .parse("test.rules", policy_src) .expect("parse policy"); let policy = Arc::new(parser.build()); let command = vec![ "bash".to_string(), "-lc".to_string(), "cat && apple".to_string(), ]; assert_eq!( ExecPolicyManager::new(policy) .create_exec_approval_requirement_for_command( &Features::with_defaults(), &command, AskForApproval::UnlessTrusted, &SandboxPolicy::ReadOnly, SandboxPermissions::UseDefault, ) .await, ExecApprovalRequirement::NeedsApproval { reason: None, proposed_execpolicy_amendment: Some(ExecPolicyAmendment::new(vec![ "apple".to_string() ])), } ); } #[tokio::test] async fn proposed_execpolicy_amendment_is_present_when_heuristics_allow() { let command = vec!["echo".to_string(), "safe".to_string()]; let manager = ExecPolicyManager::default(); let requirement = manager .create_exec_approval_requirement_for_command( &Features::with_defaults(), &command, AskForApproval::OnRequest, &SandboxPolicy::ReadOnly, SandboxPermissions::UseDefault, ) .await; assert_eq!( requirement, ExecApprovalRequirement::Skip { bypass_sandbox: false, proposed_execpolicy_amendment: Some(ExecPolicyAmendment::new(command)), } ); } #[tokio::test] async fn proposed_execpolicy_amendment_is_suppressed_when_policy_matches_allow() { let policy_src = r#"prefix_rule(pattern=["echo"], decision="allow")"#; let mut parser = PolicyParser::new(); parser .parse("test.rules", policy_src) .expect("parse policy"); let policy = Arc::new(parser.build()); let command = vec!["echo".to_string(), "safe".to_string()]; let manager = ExecPolicyManager::new(policy); let requirement = manager .create_exec_approval_requirement_for_command( &Features::with_defaults(), &command, AskForApproval::OnRequest, &SandboxPolicy::ReadOnly,
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/mcp_tool_call.rs
codex-rs/core/src/mcp_tool_call.rs
use std::time::Instant; use tracing::error; use crate::codex::Session; use crate::codex::TurnContext; use crate::protocol::EventMsg; use crate::protocol::McpInvocation; use crate::protocol::McpToolCallBeginEvent; use crate::protocol::McpToolCallEndEvent; use codex_protocol::models::FunctionCallOutputPayload; use codex_protocol::models::ResponseInputItem; /// Handles the specified tool call dispatches the appropriate /// `McpToolCallBegin` and `McpToolCallEnd` events to the `Session`. pub(crate) async fn handle_mcp_tool_call( sess: &Session, turn_context: &TurnContext, call_id: String, server: String, tool_name: String, arguments: String, ) -> ResponseInputItem { // Parse the `arguments` as JSON. An empty string is OK, but invalid JSON // is not. let arguments_value = if arguments.trim().is_empty() { None } else { match serde_json::from_str::<serde_json::Value>(&arguments) { Ok(value) => Some(value), Err(e) => { error!("failed to parse tool call arguments: {e}"); return ResponseInputItem::FunctionCallOutput { call_id: call_id.clone(), output: FunctionCallOutputPayload { content: format!("err: {e}"), success: Some(false), ..Default::default() }, }; } } }; let invocation = McpInvocation { server: server.clone(), tool: tool_name.clone(), arguments: arguments_value.clone(), }; let tool_call_begin_event = EventMsg::McpToolCallBegin(McpToolCallBeginEvent { call_id: call_id.clone(), invocation: invocation.clone(), }); notify_mcp_tool_call_event(sess, turn_context, tool_call_begin_event).await; let start = Instant::now(); // Perform the tool call. let result = sess .call_tool(&server, &tool_name, arguments_value.clone()) .await .map_err(|e| format!("tool call error: {e:?}")); if let Err(e) = &result { tracing::warn!("MCP tool call error: {e:?}"); } let tool_call_end_event = EventMsg::McpToolCallEnd(McpToolCallEndEvent { call_id: call_id.clone(), invocation, duration: start.elapsed(), result: result.clone(), }); notify_mcp_tool_call_event(sess, turn_context, tool_call_end_event.clone()).await; ResponseInputItem::McpToolCallOutput { call_id, result } } async fn notify_mcp_tool_call_event(sess: &Session, turn_context: &TurnContext, event: EventMsg) { sess.send_event(turn_context, event).await; }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/spawn.rs
codex-rs/core/src/spawn.rs
use std::collections::HashMap; use std::path::PathBuf; use std::process::Stdio; use tokio::process::Child; use tokio::process::Command; use tracing::trace; use crate::protocol::SandboxPolicy; /// Experimental environment variable that will be set to some non-empty value /// if both of the following are true: /// /// 1. The process was spawned by Codex as part of a shell tool call. /// 2. SandboxPolicy.has_full_network_access() was false for the tool call. /// /// We may try to have just one environment variable for all sandboxing /// attributes, so this may change in the future. pub const CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR: &str = "CODEX_SANDBOX_NETWORK_DISABLED"; /// Should be set when the process is spawned under a sandbox. Currently, the /// value is "seatbelt" for macOS, but it may change in the future to /// accommodate sandboxing configuration and other sandboxing mechanisms. pub const CODEX_SANDBOX_ENV_VAR: &str = "CODEX_SANDBOX"; #[derive(Debug, Clone, Copy)] pub enum StdioPolicy { RedirectForShellTool, Inherit, } /// Spawns the appropriate child process for the ExecParams and SandboxPolicy, /// ensuring the args and environment variables used to create the `Command` /// (and `Child`) honor the configuration. /// /// For now, we take `SandboxPolicy` as a parameter to spawn_child() because /// we need to determine whether to set the /// `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` environment variable. pub(crate) async fn spawn_child_async( program: PathBuf, args: Vec<String>, #[cfg_attr(not(unix), allow(unused_variables))] arg0: Option<&str>, cwd: PathBuf, sandbox_policy: &SandboxPolicy, stdio_policy: StdioPolicy, env: HashMap<String, String>, ) -> std::io::Result<Child> { trace!( "spawn_child_async: {program:?} {args:?} {arg0:?} {cwd:?} {sandbox_policy:?} {stdio_policy:?} {env:?}" ); let mut cmd = Command::new(&program); #[cfg(unix)] cmd.arg0(arg0.map_or_else(|| program.to_string_lossy().to_string(), String::from)); cmd.args(args); cmd.current_dir(cwd); cmd.env_clear(); cmd.envs(env); if !sandbox_policy.has_full_network_access() { cmd.env(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR, "1"); } // If this Codex process dies (including being killed via SIGKILL), we want // any child processes that were spawned as part of a `"shell"` tool call // to also be terminated. #[cfg(unix)] unsafe { #[cfg(target_os = "linux")] let parent_pid = libc::getpid(); cmd.pre_exec(move || { if libc::setpgid(0, 0) == -1 { return Err(std::io::Error::last_os_error()); } // This relies on prctl(2), so it only works on Linux. #[cfg(target_os = "linux")] { // This prctl call effectively requests, "deliver SIGTERM when my // current parent dies." if libc::prctl(libc::PR_SET_PDEATHSIG, libc::SIGTERM) == -1 { return Err(std::io::Error::last_os_error()); } // Though if there was a race condition and this pre_exec() block is // run _after_ the parent (i.e., the Codex process) has already // exited, then parent will be the closest configured "subreaper" // ancestor process, or PID 1 (init). If the Codex process has exited // already, so should the child process. if libc::getppid() != parent_pid { libc::raise(libc::SIGTERM); } } Ok(()) }); } match stdio_policy { StdioPolicy::RedirectForShellTool => { // Do not create a file descriptor for stdin because otherwise some // commands may hang forever waiting for input. For example, ripgrep has // a heuristic where it may try to read from stdin as explained here: // https://github.com/BurntSushi/ripgrep/blob/e2362d4d5185d02fa857bf381e7bd52e66fafc73/crates/core/flags/hiargs.rs#L1101-L1103 cmd.stdin(Stdio::null()); cmd.stdout(Stdio::piped()).stderr(Stdio::piped()); } StdioPolicy::Inherit => { // Inherit stdin, stdout, and stderr from the parent process. cmd.stdin(Stdio::inherit()) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()); } } cmd.kill_on_drop(true).spawn() }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/seatbelt.rs
codex-rs/core/src/seatbelt.rs
#![cfg(target_os = "macos")] use std::collections::HashMap; use std::ffi::CStr; use std::path::Path; use std::path::PathBuf; use tokio::process::Child; use crate::protocol::SandboxPolicy; use crate::spawn::CODEX_SANDBOX_ENV_VAR; use crate::spawn::StdioPolicy; use crate::spawn::spawn_child_async; const MACOS_SEATBELT_BASE_POLICY: &str = include_str!("seatbelt_base_policy.sbpl"); const MACOS_SEATBELT_NETWORK_POLICY: &str = include_str!("seatbelt_network_policy.sbpl"); /// When working with `sandbox-exec`, only consider `sandbox-exec` in `/usr/bin` /// to defend against an attacker trying to inject a malicious version on the /// PATH. If /usr/bin/sandbox-exec has been tampered with, then the attacker /// already has root access. pub(crate) const MACOS_PATH_TO_SEATBELT_EXECUTABLE: &str = "/usr/bin/sandbox-exec"; pub async fn spawn_command_under_seatbelt( command: Vec<String>, command_cwd: PathBuf, sandbox_policy: &SandboxPolicy, sandbox_policy_cwd: &Path, stdio_policy: StdioPolicy, mut env: HashMap<String, String>, ) -> std::io::Result<Child> { let args = create_seatbelt_command_args(command, sandbox_policy, sandbox_policy_cwd); let arg0 = None; env.insert(CODEX_SANDBOX_ENV_VAR.to_string(), "seatbelt".to_string()); spawn_child_async( PathBuf::from(MACOS_PATH_TO_SEATBELT_EXECUTABLE), args, arg0, command_cwd, sandbox_policy, stdio_policy, env, ) .await } pub(crate) fn create_seatbelt_command_args( command: Vec<String>, sandbox_policy: &SandboxPolicy, sandbox_policy_cwd: &Path, ) -> Vec<String> { let (file_write_policy, file_write_dir_params) = { if sandbox_policy.has_full_disk_write_access() { // Allegedly, this is more permissive than `(allow file-write*)`. ( r#"(allow file-write* (regex #"^/"))"#.to_string(), Vec::new(), ) } else { let writable_roots = sandbox_policy.get_writable_roots_with_cwd(sandbox_policy_cwd); let mut writable_folder_policies: Vec<String> = Vec::new(); let mut file_write_params = Vec::new(); for (index, wr) in writable_roots.iter().enumerate() { // Canonicalize to avoid mismatches like /var vs /private/var on macOS. let canonical_root = wr .root .as_path() .canonicalize() .unwrap_or_else(|_| wr.root.to_path_buf()); let root_param = format!("WRITABLE_ROOT_{index}"); file_write_params.push((root_param.clone(), canonical_root)); if wr.read_only_subpaths.is_empty() { writable_folder_policies.push(format!("(subpath (param \"{root_param}\"))")); } else { // Add parameters for each read-only subpath and generate // the `(require-not ...)` clauses. let mut require_parts: Vec<String> = Vec::new(); require_parts.push(format!("(subpath (param \"{root_param}\"))")); for (subpath_index, ro) in wr.read_only_subpaths.iter().enumerate() { let canonical_ro = ro .as_path() .canonicalize() .unwrap_or_else(|_| ro.to_path_buf()); let ro_param = format!("WRITABLE_ROOT_{index}_RO_{subpath_index}"); require_parts .push(format!("(require-not (subpath (param \"{ro_param}\")))")); file_write_params.push((ro_param, canonical_ro)); } let policy_component = format!("(require-all {} )", require_parts.join(" ")); writable_folder_policies.push(policy_component); } } if writable_folder_policies.is_empty() { ("".to_string(), Vec::new()) } else { let file_write_policy = format!( "(allow file-write*\n{}\n)", writable_folder_policies.join(" ") ); (file_write_policy, file_write_params) } } }; let file_read_policy = if sandbox_policy.has_full_disk_read_access() { "; allow read-only file operations\n(allow file-read*)" } else { "" }; // TODO(mbolin): apply_patch calls must also honor the SandboxPolicy. let network_policy = if sandbox_policy.has_full_network_access() { MACOS_SEATBELT_NETWORK_POLICY } else { "" }; let full_policy = format!( "{MACOS_SEATBELT_BASE_POLICY}\n{file_read_policy}\n{file_write_policy}\n{network_policy}" ); let dir_params = [file_write_dir_params, macos_dir_params()].concat(); let mut seatbelt_args: Vec<String> = vec!["-p".to_string(), full_policy]; let definition_args = dir_params .into_iter() .map(|(key, value)| format!("-D{key}={value}", value = value.to_string_lossy())); seatbelt_args.extend(definition_args); seatbelt_args.push("--".to_string()); seatbelt_args.extend(command); seatbelt_args } /// Wraps libc::confstr to return a String. fn confstr(name: libc::c_int) -> Option<String> { let mut buf = vec![0_i8; (libc::PATH_MAX as usize) + 1]; let len = unsafe { libc::confstr(name, buf.as_mut_ptr(), buf.len()) }; if len == 0 { return None; } // confstr guarantees NUL-termination when len > 0. let cstr = unsafe { CStr::from_ptr(buf.as_ptr()) }; cstr.to_str().ok().map(ToString::to_string) } /// Wraps confstr to return a canonicalized PathBuf. fn confstr_path(name: libc::c_int) -> Option<PathBuf> { let s = confstr(name)?; let path = PathBuf::from(s); path.canonicalize().ok().or(Some(path)) } fn macos_dir_params() -> Vec<(String, PathBuf)> { if let Some(p) = confstr_path(libc::_CS_DARWIN_USER_CACHE_DIR) { return vec![("DARWIN_USER_CACHE_DIR".to_string(), p)]; } vec![] } #[cfg(test)] mod tests { use super::MACOS_SEATBELT_BASE_POLICY; use super::create_seatbelt_command_args; use super::macos_dir_params; use crate::protocol::SandboxPolicy; use crate::seatbelt::MACOS_PATH_TO_SEATBELT_EXECUTABLE; use pretty_assertions::assert_eq; use std::fs; use std::path::Path; use std::path::PathBuf; use std::process::Command; use tempfile::TempDir; #[test] fn create_seatbelt_args_with_read_only_git_and_codex_subpaths() { // Create a temporary workspace with two writable roots: one containing // top-level .git and .codex directories and one without them. let tmp = TempDir::new().expect("tempdir"); let PopulatedTmp { vulnerable_root, vulnerable_root_canonical, dot_git_canonical, dot_codex_canonical, empty_root, empty_root_canonical, } = populate_tmpdir(tmp.path()); let cwd = tmp.path().join("cwd"); fs::create_dir_all(&cwd).expect("create cwd"); // Build a policy that only includes the two test roots as writable and // does not automatically include defaults TMPDIR or /tmp. let policy = SandboxPolicy::WorkspaceWrite { writable_roots: vec![vulnerable_root, empty_root] .into_iter() .map(|p| p.try_into().unwrap()) .collect(), network_access: false, exclude_tmpdir_env_var: true, exclude_slash_tmp: true, }; // Create the Seatbelt command to wrap a shell command that tries to // write to .codex/config.toml in the vulnerable root. let shell_command: Vec<String> = [ "bash", "-c", "echo 'sandbox_mode = \"danger-full-access\"' > \"$1\"", "bash", dot_codex_canonical .join("config.toml") .to_string_lossy() .as_ref(), ] .iter() .map(std::string::ToString::to_string) .collect(); let args = create_seatbelt_command_args(shell_command.clone(), &policy, &cwd); // Build the expected policy text using a raw string for readability. // Note that the policy includes: // - the base policy, // - read-only access to the filesystem, // - write access to WRITABLE_ROOT_0 (but not its .git or .codex), WRITABLE_ROOT_1, and cwd as WRITABLE_ROOT_2. let expected_policy = format!( r#"{MACOS_SEATBELT_BASE_POLICY} ; allow read-only file operations (allow file-read*) (allow file-write* (require-all (subpath (param "WRITABLE_ROOT_0")) (require-not (subpath (param "WRITABLE_ROOT_0_RO_0"))) (require-not (subpath (param "WRITABLE_ROOT_0_RO_1"))) ) (subpath (param "WRITABLE_ROOT_1")) (subpath (param "WRITABLE_ROOT_2")) ) "#, ); let mut expected_args = vec![ "-p".to_string(), expected_policy, format!( "-DWRITABLE_ROOT_0={}", vulnerable_root_canonical.to_string_lossy() ), format!( "-DWRITABLE_ROOT_0_RO_0={}", dot_git_canonical.to_string_lossy() ), format!( "-DWRITABLE_ROOT_0_RO_1={}", dot_codex_canonical.to_string_lossy() ), format!( "-DWRITABLE_ROOT_1={}", empty_root_canonical.to_string_lossy() ), format!( "-DWRITABLE_ROOT_2={}", cwd.canonicalize() .expect("canonicalize cwd") .to_string_lossy() ), ]; expected_args.extend( macos_dir_params() .into_iter() .map(|(key, value)| format!("-D{key}={value}", value = value.to_string_lossy())), ); expected_args.push("--".to_string()); expected_args.extend(shell_command); assert_eq!(expected_args, args); // Verify that .codex/config.toml cannot be modified under the generated // Seatbelt policy. let config_toml = dot_codex_canonical.join("config.toml"); let output = Command::new(MACOS_PATH_TO_SEATBELT_EXECUTABLE) .args(&args) .current_dir(&cwd) .output() .expect("execute seatbelt command"); assert_eq!( "sandbox_mode = \"read-only\"\n", String::from_utf8_lossy(&fs::read(&config_toml).expect("read config.toml")), "config.toml should contain its original contents because it should not have been modified" ); assert!( !output.status.success(), "command to write {} should fail under seatbelt", &config_toml.display() ); assert_eq!( String::from_utf8_lossy(&output.stderr), format!("bash: {}: Operation not permitted\n", config_toml.display()), ); // Create a similar Seatbelt command that tries to write to a file in // the .git folder, which should also be blocked. let pre_commit_hook = dot_git_canonical.join("hooks").join("pre-commit"); let shell_command_git: Vec<String> = [ "bash", "-c", "echo 'pwned!' > \"$1\"", "bash", pre_commit_hook.to_string_lossy().as_ref(), ] .iter() .map(std::string::ToString::to_string) .collect(); let write_hooks_file_args = create_seatbelt_command_args(shell_command_git, &policy, &cwd); let output = Command::new(MACOS_PATH_TO_SEATBELT_EXECUTABLE) .args(&write_hooks_file_args) .current_dir(&cwd) .output() .expect("execute seatbelt command"); assert!( !fs::exists(&pre_commit_hook).expect("exists pre-commit hook"), "{} should not exist because it should not have been created", pre_commit_hook.display() ); assert!( !output.status.success(), "command to write {} should fail under seatbelt", &pre_commit_hook.display() ); assert_eq!( String::from_utf8_lossy(&output.stderr), format!( "bash: {}: Operation not permitted\n", pre_commit_hook.display() ), ); // Verify that writing a file to the folder containing .git and .codex is allowed. let allowed_file = vulnerable_root_canonical.join("allowed.txt"); let shell_command_allowed: Vec<String> = [ "bash", "-c", "echo 'this is allowed' > \"$1\"", "bash", allowed_file.to_string_lossy().as_ref(), ] .iter() .map(std::string::ToString::to_string) .collect(); let write_allowed_file_args = create_seatbelt_command_args(shell_command_allowed, &policy, &cwd); let output = Command::new(MACOS_PATH_TO_SEATBELT_EXECUTABLE) .args(&write_allowed_file_args) .current_dir(&cwd) .output() .expect("execute seatbelt command"); assert!( output.status.success(), "command to write {} should succeed under seatbelt", &allowed_file.display() ); assert_eq!( "this is allowed\n", String::from_utf8_lossy(&fs::read(&allowed_file).expect("read allowed.txt")), "{} should contain the written text", allowed_file.display() ); } #[test] fn create_seatbelt_args_for_cwd_as_git_repo() { // Create a temporary workspace with two writable roots: one containing // top-level .git and .codex directories and one without them. let tmp = TempDir::new().expect("tempdir"); let PopulatedTmp { vulnerable_root, vulnerable_root_canonical, dot_git_canonical, dot_codex_canonical, .. } = populate_tmpdir(tmp.path()); // Build a policy that does not specify any writable_roots, but does // use the default ones (cwd and TMPDIR) and verifies the `.git` and // `.codex` checks are done properly for cwd. let policy = SandboxPolicy::WorkspaceWrite { writable_roots: vec![], network_access: false, exclude_tmpdir_env_var: false, exclude_slash_tmp: false, }; let shell_command: Vec<String> = [ "bash", "-c", "echo 'sandbox_mode = \"danger-full-access\"' > \"$1\"", "bash", dot_codex_canonical .join("config.toml") .to_string_lossy() .as_ref(), ] .iter() .map(std::string::ToString::to_string) .collect(); let args = create_seatbelt_command_args(shell_command.clone(), &policy, vulnerable_root.as_path()); let tmpdir_env_var = std::env::var("TMPDIR") .ok() .map(PathBuf::from) .and_then(|p| p.canonicalize().ok()) .map(|p| p.to_string_lossy().to_string()); let tempdir_policy_entry = if tmpdir_env_var.is_some() { r#" (subpath (param "WRITABLE_ROOT_2"))"# } else { "" }; // Build the expected policy text using a raw string for readability. // Note that the policy includes: // - the base policy, // - read-only access to the filesystem, // - write access to WRITABLE_ROOT_0 (but not its .git or .codex), WRITABLE_ROOT_1, and cwd as WRITABLE_ROOT_2. let expected_policy = format!( r#"{MACOS_SEATBELT_BASE_POLICY} ; allow read-only file operations (allow file-read*) (allow file-write* (require-all (subpath (param "WRITABLE_ROOT_0")) (require-not (subpath (param "WRITABLE_ROOT_0_RO_0"))) (require-not (subpath (param "WRITABLE_ROOT_0_RO_1"))) ) (subpath (param "WRITABLE_ROOT_1")){tempdir_policy_entry} ) "#, ); let mut expected_args = vec![ "-p".to_string(), expected_policy, format!( "-DWRITABLE_ROOT_0={}", vulnerable_root_canonical.to_string_lossy() ), format!( "-DWRITABLE_ROOT_0_RO_0={}", dot_git_canonical.to_string_lossy() ), format!( "-DWRITABLE_ROOT_0_RO_1={}", dot_codex_canonical.to_string_lossy() ), format!( "-DWRITABLE_ROOT_1={}", PathBuf::from("/tmp") .canonicalize() .expect("canonicalize /tmp") .to_string_lossy() ), ]; if let Some(p) = tmpdir_env_var { expected_args.push(format!("-DWRITABLE_ROOT_2={p}")); } expected_args.extend( macos_dir_params() .into_iter() .map(|(key, value)| format!("-D{key}={value}", value = value.to_string_lossy())), ); expected_args.push("--".to_string()); expected_args.extend(shell_command); assert_eq!(expected_args, args); } struct PopulatedTmp { /// Path containing a .git and .codex subfolder. /// For the purposes of this test, we consider this a "vulnerable" root /// because a bad actor could write to .git/hooks/pre-commit so an /// unsuspecting user would run code as privileged the next time they /// ran `git commit` themselves, or modified .codex/config.toml to /// contain `sandbox_mode = "danger-full-access"` so the agent would /// have full privileges the next time it ran in that repo. vulnerable_root: PathBuf, vulnerable_root_canonical: PathBuf, dot_git_canonical: PathBuf, dot_codex_canonical: PathBuf, /// Path without .git or .codex subfolders. empty_root: PathBuf, /// Canonicalized version of `empty_root`. empty_root_canonical: PathBuf, } fn populate_tmpdir(tmp: &Path) -> PopulatedTmp { let vulnerable_root = tmp.join("vulnerable_root"); fs::create_dir_all(&vulnerable_root).expect("create vulnerable_root"); // TODO(mbolin): Should also support the case where `.git` is a file // with a gitdir: ... line. Command::new("git") .arg("init") .arg(".") .current_dir(&vulnerable_root) .output() .expect("git init ."); fs::create_dir_all(vulnerable_root.join(".codex")).expect("create .codex"); fs::write( vulnerable_root.join(".codex").join("config.toml"), "sandbox_mode = \"read-only\"\n", ) .expect("write .codex/config.toml"); let empty_root = tmp.join("empty_root"); fs::create_dir_all(&empty_root).expect("create empty_root"); // Ensure we have canonical paths for -D parameter matching. let vulnerable_root_canonical = vulnerable_root .canonicalize() .expect("canonicalize vulnerable_root"); let dot_git_canonical = vulnerable_root_canonical.join(".git"); let dot_codex_canonical = vulnerable_root_canonical.join(".codex"); let empty_root_canonical = empty_root.canonicalize().expect("canonicalize empty_root"); PopulatedTmp { vulnerable_root, vulnerable_root_canonical, dot_git_canonical, dot_codex_canonical, empty_root, empty_root_canonical, } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/user_notification.rs
codex-rs/core/src/user_notification.rs
use serde::Serialize; use tracing::error; use tracing::warn; #[derive(Debug, Default)] pub(crate) struct UserNotifier { notify_command: Option<Vec<String>>, } impl UserNotifier { pub(crate) fn notify(&self, notification: &UserNotification) { if let Some(notify_command) = &self.notify_command && !notify_command.is_empty() { self.invoke_notify(notify_command, notification) } } fn invoke_notify(&self, notify_command: &[String], notification: &UserNotification) { let Ok(json) = serde_json::to_string(&notification) else { error!("failed to serialise notification payload"); return; }; let mut command = std::process::Command::new(&notify_command[0]); if notify_command.len() > 1 { command.args(&notify_command[1..]); } command.arg(json); // Fire-and-forget – we do not wait for completion. if let Err(e) = command.spawn() { warn!("failed to spawn notifier '{}': {e}", notify_command[0]); } } pub(crate) fn new(notify: Option<Vec<String>>) -> Self { Self { notify_command: notify, } } } /// User can configure a program that will receive notifications. Each /// notification is serialized as JSON and passed as an argument to the /// program. #[derive(Debug, Clone, PartialEq, Serialize)] #[serde(tag = "type", rename_all = "kebab-case")] pub(crate) enum UserNotification { #[serde(rename_all = "kebab-case")] AgentTurnComplete { thread_id: String, turn_id: String, cwd: String, /// Messages that the user sent to the agent to initiate the turn. input_messages: Vec<String>, /// The last message sent by the assistant in the turn. last_assistant_message: Option<String>, }, } #[cfg(test)] mod tests { use super::*; use anyhow::Result; #[test] fn test_user_notification() -> Result<()> { let notification = UserNotification::AgentTurnComplete { thread_id: "b5f6c1c2-1111-2222-3333-444455556666".to_string(), turn_id: "12345".to_string(), cwd: "/Users/example/project".to_string(), input_messages: vec!["Rename `foo` to `bar` and update the callsites.".to_string()], last_assistant_message: Some( "Rename complete and verified `cargo build` succeeds.".to_string(), ), }; let serialized = serde_json::to_string(&notification)?; assert_eq!( serialized, r#"{"type":"agent-turn-complete","thread-id":"b5f6c1c2-1111-2222-3333-444455556666","turn-id":"12345","cwd":"/Users/example/project","input-messages":["Rename `foo` to `bar` and update the callsites."],"last-assistant-message":"Rename complete and verified `cargo build` succeeds."}"# ); Ok(()) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/git_info.rs
codex-rs/core/src/git_info.rs
use std::collections::HashSet; use std::path::Path; use std::path::PathBuf; use crate::util::resolve_path; use codex_app_server_protocol::GitSha; use codex_protocol::protocol::GitInfo; use futures::future::join_all; use serde::Deserialize; use serde::Serialize; use tokio::process::Command; use tokio::time::Duration as TokioDuration; use tokio::time::timeout; /// Return `true` if the project folder specified by the `Config` is inside a /// Git repository. /// /// The check walks up the directory hierarchy looking for a `.git` file or /// directory (note `.git` can be a file that contains a `gitdir` entry). This /// approach does **not** require the `git` binary or the `git2` crate and is /// therefore fairly lightweight. /// /// Note that this does **not** detect *work‑trees* created with /// `git worktree add` where the checkout lives outside the main repository /// directory. If you need Codex to work from such a checkout simply pass the /// `--allow-no-git-exec` CLI flag that disables the repo requirement. pub fn get_git_repo_root(base_dir: &Path) -> Option<PathBuf> { let mut dir = base_dir.to_path_buf(); loop { if dir.join(".git").exists() { return Some(dir); } // Pop one component (go up one directory). `pop` returns false when // we have reached the filesystem root. if !dir.pop() { break; } } None } /// Timeout for git commands to prevent freezing on large repositories const GIT_COMMAND_TIMEOUT: TokioDuration = TokioDuration::from_secs(5); #[derive(Serialize, Deserialize, Clone, Debug)] pub struct GitDiffToRemote { pub sha: GitSha, pub diff: String, } /// Collect git repository information from the given working directory using command-line git. /// Returns None if no git repository is found or if git operations fail. /// Uses timeouts to prevent freezing on large repositories. /// All git commands (except the initial repo check) run in parallel for better performance. pub async fn collect_git_info(cwd: &Path) -> Option<GitInfo> { // Check if we're in a git repository first let is_git_repo = run_git_command_with_timeout(&["rev-parse", "--git-dir"], cwd) .await? .status .success(); if !is_git_repo { return None; } // Run all git info collection commands in parallel let (commit_result, branch_result, url_result) = tokio::join!( run_git_command_with_timeout(&["rev-parse", "HEAD"], cwd), run_git_command_with_timeout(&["rev-parse", "--abbrev-ref", "HEAD"], cwd), run_git_command_with_timeout(&["remote", "get-url", "origin"], cwd) ); let mut git_info = GitInfo { commit_hash: None, branch: None, repository_url: None, }; // Process commit hash if let Some(output) = commit_result && output.status.success() && let Ok(hash) = String::from_utf8(output.stdout) { git_info.commit_hash = Some(hash.trim().to_string()); } // Process branch name if let Some(output) = branch_result && output.status.success() && let Ok(branch) = String::from_utf8(output.stdout) { let branch = branch.trim(); if branch != "HEAD" { git_info.branch = Some(branch.to_string()); } } // Process repository URL if let Some(output) = url_result && output.status.success() && let Ok(url) = String::from_utf8(output.stdout) { git_info.repository_url = Some(url.trim().to_string()); } Some(git_info) } /// A minimal commit summary entry used for pickers (subject + timestamp + sha). #[derive(Clone, Debug, Serialize, Deserialize)] pub struct CommitLogEntry { pub sha: String, /// Unix timestamp (seconds since epoch) of the commit time (committer time). pub timestamp: i64, /// Single-line subject of the commit message. pub subject: String, } /// Return the last `limit` commits reachable from HEAD for the current branch. /// Each entry contains the SHA, commit timestamp (seconds), and subject line. /// Returns an empty vector if not in a git repo or on error/timeout. pub async fn recent_commits(cwd: &Path, limit: usize) -> Vec<CommitLogEntry> { // Ensure we're in a git repo first to avoid noisy errors. let Some(out) = run_git_command_with_timeout(&["rev-parse", "--git-dir"], cwd).await else { return Vec::new(); }; if !out.status.success() { return Vec::new(); } let fmt = "%H%x1f%ct%x1f%s"; // <sha> <US> <commit_time> <US> <subject> let limit_arg = (limit > 0).then(|| limit.to_string()); let mut args: Vec<String> = vec!["log".to_string()]; if let Some(n) = &limit_arg { args.push("-n".to_string()); args.push(n.clone()); } args.push(format!("--pretty=format:{fmt}")); let arg_refs: Vec<&str> = args.iter().map(String::as_str).collect(); let Some(log_out) = run_git_command_with_timeout(&arg_refs, cwd).await else { return Vec::new(); }; if !log_out.status.success() { return Vec::new(); } let text = String::from_utf8_lossy(&log_out.stdout); let mut entries: Vec<CommitLogEntry> = Vec::new(); for line in text.lines() { let mut parts = line.split('\u{001f}'); let sha = parts.next().unwrap_or("").trim(); let ts_s = parts.next().unwrap_or("").trim(); let subject = parts.next().unwrap_or("").trim(); if sha.is_empty() || ts_s.is_empty() { continue; } let timestamp = ts_s.parse::<i64>().unwrap_or(0); entries.push(CommitLogEntry { sha: sha.to_string(), timestamp, subject: subject.to_string(), }); } entries } /// Returns the closest git sha to HEAD that is on a remote as well as the diff to that sha. pub async fn git_diff_to_remote(cwd: &Path) -> Option<GitDiffToRemote> { get_git_repo_root(cwd)?; let remotes = get_git_remotes(cwd).await?; let branches = branch_ancestry(cwd).await?; let base_sha = find_closest_sha(cwd, &branches, &remotes).await?; let diff = diff_against_sha(cwd, &base_sha).await?; Some(GitDiffToRemote { sha: base_sha, diff, }) } /// Run a git command with a timeout to prevent blocking on large repositories async fn run_git_command_with_timeout(args: &[&str], cwd: &Path) -> Option<std::process::Output> { let result = timeout( GIT_COMMAND_TIMEOUT, Command::new("git").args(args).current_dir(cwd).output(), ) .await; match result { Ok(Ok(output)) => Some(output), _ => None, // Timeout or error } } async fn get_git_remotes(cwd: &Path) -> Option<Vec<String>> { let output = run_git_command_with_timeout(&["remote"], cwd).await?; if !output.status.success() { return None; } let mut remotes: Vec<String> = String::from_utf8(output.stdout) .ok()? .lines() .map(str::to_string) .collect(); if let Some(pos) = remotes.iter().position(|r| r == "origin") { let origin = remotes.remove(pos); remotes.insert(0, origin); } Some(remotes) } /// Attempt to determine the repository's default branch name. /// /// Preference order: /// 1) The symbolic ref at `refs/remotes/<remote>/HEAD` for the first remote (origin prioritized) /// 2) `git remote show <remote>` parsed for "HEAD branch: <name>" /// 3) Local fallback to existing `main` or `master` if present async fn get_default_branch(cwd: &Path) -> Option<String> { // Prefer the first remote (with origin prioritized) let remotes = get_git_remotes(cwd).await.unwrap_or_default(); for remote in remotes { // Try symbolic-ref, which returns something like: refs/remotes/origin/main if let Some(symref_output) = run_git_command_with_timeout( &[ "symbolic-ref", "--quiet", &format!("refs/remotes/{remote}/HEAD"), ], cwd, ) .await && symref_output.status.success() && let Ok(sym) = String::from_utf8(symref_output.stdout) { let trimmed = sym.trim(); if let Some((_, name)) = trimmed.rsplit_once('/') { return Some(name.to_string()); } } // Fall back to parsing `git remote show <remote>` output if let Some(show_output) = run_git_command_with_timeout(&["remote", "show", &remote], cwd).await && show_output.status.success() && let Ok(text) = String::from_utf8(show_output.stdout) { for line in text.lines() { let line = line.trim(); if let Some(rest) = line.strip_prefix("HEAD branch:") { let name = rest.trim(); if !name.is_empty() { return Some(name.to_string()); } } } } } // No remote-derived default; try common local defaults if they exist get_default_branch_local(cwd).await } /// Determine the repository's default branch name, if available. /// /// This inspects remote configuration first (including the symbolic `HEAD` /// reference) and falls back to common local defaults such as `main` or /// `master`. Returns `None` when the information cannot be determined, for /// example when the current directory is not inside a Git repository. pub async fn default_branch_name(cwd: &Path) -> Option<String> { get_default_branch(cwd).await } /// Attempt to determine the repository's default branch name from local branches. async fn get_default_branch_local(cwd: &Path) -> Option<String> { for candidate in ["main", "master"] { if let Some(verify) = run_git_command_with_timeout( &[ "rev-parse", "--verify", "--quiet", &format!("refs/heads/{candidate}"), ], cwd, ) .await && verify.status.success() { return Some(candidate.to_string()); } } None } /// Build an ancestry of branches starting at the current branch and ending at the /// repository's default branch (if determinable).. async fn branch_ancestry(cwd: &Path) -> Option<Vec<String>> { // Discover current branch (ignore detached HEAD by treating it as None) let current_branch = run_git_command_with_timeout(&["rev-parse", "--abbrev-ref", "HEAD"], cwd) .await .and_then(|o| { if o.status.success() { String::from_utf8(o.stdout).ok() } else { None } }) .map(|s| s.trim().to_string()) .filter(|s| s != "HEAD"); // Discover default branch let default_branch = get_default_branch(cwd).await; let mut ancestry: Vec<String> = Vec::new(); let mut seen: HashSet<String> = HashSet::new(); if let Some(cb) = current_branch.clone() { seen.insert(cb.clone()); ancestry.push(cb); } if let Some(db) = default_branch && !seen.contains(&db) { seen.insert(db.clone()); ancestry.push(db); } // Expand candidates: include any remote branches that already contain HEAD. // This addresses cases where we're on a new local-only branch forked from a // remote branch that isn't the repository default. We prioritize remotes in // the order returned by get_git_remotes (origin first). let remotes = get_git_remotes(cwd).await.unwrap_or_default(); for remote in remotes { if let Some(output) = run_git_command_with_timeout( &[ "for-each-ref", "--format=%(refname:short)", "--contains=HEAD", &format!("refs/remotes/{remote}"), ], cwd, ) .await && output.status.success() && let Ok(text) = String::from_utf8(output.stdout) { for line in text.lines() { let short = line.trim(); // Expect format like: "origin/feature"; extract the branch path after "remote/" if let Some(stripped) = short.strip_prefix(&format!("{remote}/")) && !stripped.is_empty() && !seen.contains(stripped) { seen.insert(stripped.to_string()); ancestry.push(stripped.to_string()); } } } } // Ensure we return Some vector, even if empty, to allow caller logic to proceed Some(ancestry) } // Helper for a single branch: return the remote SHA if present on any remote // and the distance (commits ahead of HEAD) for that branch. The first item is // None if the branch is not present on any remote. Returns None if distance // could not be computed due to git errors/timeouts. async fn branch_remote_and_distance( cwd: &Path, branch: &str, remotes: &[String], ) -> Option<(Option<GitSha>, usize)> { // Try to find the first remote ref that exists for this branch (origin prioritized by caller). let mut found_remote_sha: Option<GitSha> = None; let mut found_remote_ref: Option<String> = None; for remote in remotes { let remote_ref = format!("refs/remotes/{remote}/{branch}"); let Some(verify_output) = run_git_command_with_timeout(&["rev-parse", "--verify", "--quiet", &remote_ref], cwd) .await else { // Mirror previous behavior: if the verify call times out/fails at the process level, // treat the entire branch as unusable. return None; }; if !verify_output.status.success() { continue; } let Ok(sha) = String::from_utf8(verify_output.stdout) else { // Mirror previous behavior and skip the entire branch on parse failure. return None; }; found_remote_sha = Some(GitSha::new(sha.trim())); found_remote_ref = Some(remote_ref); break; } // Compute distance as the number of commits HEAD is ahead of the branch. // Prefer local branch name if it exists; otherwise fall back to the remote ref (if any). let count_output = if let Some(local_count) = run_git_command_with_timeout(&["rev-list", "--count", &format!("{branch}..HEAD")], cwd) .await { if local_count.status.success() { local_count } else if let Some(remote_ref) = &found_remote_ref { match run_git_command_with_timeout( &["rev-list", "--count", &format!("{remote_ref}..HEAD")], cwd, ) .await { Some(remote_count) => remote_count, None => return None, } } else { return None; } } else if let Some(remote_ref) = &found_remote_ref { match run_git_command_with_timeout( &["rev-list", "--count", &format!("{remote_ref}..HEAD")], cwd, ) .await { Some(remote_count) => remote_count, None => return None, } } else { return None; }; if !count_output.status.success() { return None; } let Ok(distance_str) = String::from_utf8(count_output.stdout) else { return None; }; let Ok(distance) = distance_str.trim().parse::<usize>() else { return None; }; Some((found_remote_sha, distance)) } // Finds the closest sha that exist on any of branches and also exists on any of the remotes. async fn find_closest_sha(cwd: &Path, branches: &[String], remotes: &[String]) -> Option<GitSha> { // A sha and how many commits away from HEAD it is. let mut closest_sha: Option<(GitSha, usize)> = None; for branch in branches { let Some((maybe_remote_sha, distance)) = branch_remote_and_distance(cwd, branch, remotes).await else { continue; }; let Some(remote_sha) = maybe_remote_sha else { // Preserve existing behavior: skip branches that are not present on a remote. continue; }; match &closest_sha { None => closest_sha = Some((remote_sha, distance)), Some((_, best_distance)) if distance < *best_distance => { closest_sha = Some((remote_sha, distance)); } _ => {} } } closest_sha.map(|(sha, _)| sha) } async fn diff_against_sha(cwd: &Path, sha: &GitSha) -> Option<String> { let output = run_git_command_with_timeout(&["diff", "--no-textconv", "--no-ext-diff", &sha.0], cwd) .await?; // 0 is success and no diff. // 1 is success but there is a diff. let exit_ok = output.status.code().is_some_and(|c| c == 0 || c == 1); if !exit_ok { return None; } let mut diff = String::from_utf8(output.stdout).ok()?; if let Some(untracked_output) = run_git_command_with_timeout(&["ls-files", "--others", "--exclude-standard"], cwd).await && untracked_output.status.success() { let untracked: Vec<String> = String::from_utf8(untracked_output.stdout) .ok()? .lines() .map(str::to_string) .filter(|s| !s.is_empty()) .collect(); if !untracked.is_empty() { // Use platform-appropriate null device and guard paths with `--`. let null_device: &str = if cfg!(windows) { "NUL" } else { "/dev/null" }; let futures_iter = untracked.into_iter().map(|file| async move { let file_owned = file; let args_vec: Vec<&str> = vec![ "diff", "--no-textconv", "--no-ext-diff", "--binary", "--no-index", // -- ensures that filenames that start with - are not treated as options. "--", null_device, &file_owned, ]; run_git_command_with_timeout(&args_vec, cwd).await }); let results = join_all(futures_iter).await; for extra in results.into_iter().flatten() { if extra.status.code().is_some_and(|c| c == 0 || c == 1) && let Ok(s) = String::from_utf8(extra.stdout) { diff.push_str(&s); } } } } Some(diff) } /// Resolve the path that should be used for trust checks. Similar to /// `[get_git_repo_root]`, but resolves to the root of the main /// repository. Handles worktrees. pub fn resolve_root_git_project_for_trust(cwd: &Path) -> Option<PathBuf> { let base = if cwd.is_dir() { cwd } else { cwd.parent()? }; // TODO: we should make this async, but it's primarily used deep in // callstacks of sync code, and should almost always be fast let git_dir_out = std::process::Command::new("git") .args(["rev-parse", "--git-common-dir"]) .current_dir(base) .output() .ok()?; if !git_dir_out.status.success() { return None; } let git_dir_s = String::from_utf8(git_dir_out.stdout) .ok()? .trim() .to_string(); let git_dir_path_raw = resolve_path(base, &PathBuf::from(&git_dir_s)); // Normalize to handle macOS /var vs /private/var and resolve ".." segments. let git_dir_path = std::fs::canonicalize(&git_dir_path_raw).unwrap_or(git_dir_path_raw); git_dir_path.parent().map(Path::to_path_buf) } /// Returns a list of local git branches. /// Includes the default branch at the beginning of the list, if it exists. pub async fn local_git_branches(cwd: &Path) -> Vec<String> { let mut branches: Vec<String> = if let Some(out) = run_git_command_with_timeout(&["branch", "--format=%(refname:short)"], cwd).await && out.status.success() { String::from_utf8_lossy(&out.stdout) .lines() .map(|s| s.trim().to_string()) .filter(|s| !s.is_empty()) .collect() } else { Vec::new() }; branches.sort_unstable(); if let Some(base) = get_default_branch_local(cwd).await && let Some(pos) = branches.iter().position(|name| name == &base) { let base_branch = branches.remove(pos); branches.insert(0, base_branch); } branches } /// Returns the current checked out branch name. pub async fn current_branch_name(cwd: &Path) -> Option<String> { let out = run_git_command_with_timeout(&["branch", "--show-current"], cwd).await?; if !out.status.success() { return None; } String::from_utf8(out.stdout) .ok() .map(|s| s.trim().to_string()) .filter(|name| !name.is_empty()) } #[cfg(test)] mod tests { use super::*; use core_test_support::skip_if_sandbox; use std::fs; use std::path::PathBuf; use tempfile::TempDir; // Helper function to create a test git repository async fn create_test_git_repo(temp_dir: &TempDir) -> PathBuf { let repo_path = temp_dir.path().join("repo"); fs::create_dir(&repo_path).expect("Failed to create repo dir"); let envs = vec![ ("GIT_CONFIG_GLOBAL", "/dev/null"), ("GIT_CONFIG_NOSYSTEM", "1"), ]; // Initialize git repo Command::new("git") .envs(envs.clone()) .args(["init"]) .current_dir(&repo_path) .output() .await .expect("Failed to init git repo"); // Configure git user (required for commits) Command::new("git") .envs(envs.clone()) .args(["config", "user.name", "Test User"]) .current_dir(&repo_path) .output() .await .expect("Failed to set git user name"); Command::new("git") .envs(envs.clone()) .args(["config", "user.email", "test@example.com"]) .current_dir(&repo_path) .output() .await .expect("Failed to set git user email"); // Create a test file and commit it let test_file = repo_path.join("test.txt"); fs::write(&test_file, "test content").expect("Failed to write test file"); Command::new("git") .envs(envs.clone()) .args(["add", "."]) .current_dir(&repo_path) .output() .await .expect("Failed to add files"); Command::new("git") .envs(envs.clone()) .args(["commit", "-m", "Initial commit"]) .current_dir(&repo_path) .output() .await .expect("Failed to commit"); repo_path } #[tokio::test] async fn test_recent_commits_non_git_directory_returns_empty() { let temp_dir = TempDir::new().expect("Failed to create temp dir"); let entries = recent_commits(temp_dir.path(), 10).await; assert!(entries.is_empty(), "expected no commits outside a git repo"); } #[tokio::test] async fn test_recent_commits_orders_and_limits() { skip_if_sandbox!(); use tokio::time::Duration; use tokio::time::sleep; let temp_dir = TempDir::new().expect("Failed to create temp dir"); let repo_path = create_test_git_repo(&temp_dir).await; // Make three distinct commits with small delays to ensure ordering by timestamp. fs::write(repo_path.join("file.txt"), "one").unwrap(); Command::new("git") .args(["add", "file.txt"]) .current_dir(&repo_path) .output() .await .expect("git add"); Command::new("git") .args(["commit", "-m", "first change"]) .current_dir(&repo_path) .output() .await .expect("git commit 1"); sleep(Duration::from_millis(1100)).await; fs::write(repo_path.join("file.txt"), "two").unwrap(); Command::new("git") .args(["add", "file.txt"]) .current_dir(&repo_path) .output() .await .expect("git add 2"); Command::new("git") .args(["commit", "-m", "second change"]) .current_dir(&repo_path) .output() .await .expect("git commit 2"); sleep(Duration::from_millis(1100)).await; fs::write(repo_path.join("file.txt"), "three").unwrap(); Command::new("git") .args(["add", "file.txt"]) .current_dir(&repo_path) .output() .await .expect("git add 3"); Command::new("git") .args(["commit", "-m", "third change"]) .current_dir(&repo_path) .output() .await .expect("git commit 3"); // Request the latest 3 commits; should be our three changes in reverse time order. let entries = recent_commits(&repo_path, 3).await; assert_eq!(entries.len(), 3); assert_eq!(entries[0].subject, "third change"); assert_eq!(entries[1].subject, "second change"); assert_eq!(entries[2].subject, "first change"); // Basic sanity on SHA formatting for e in entries { assert!(e.sha.len() >= 7 && e.sha.chars().all(|c| c.is_ascii_hexdigit())); } } async fn create_test_git_repo_with_remote(temp_dir: &TempDir) -> (PathBuf, String) { let repo_path = create_test_git_repo(temp_dir).await; let remote_path = temp_dir.path().join("remote.git"); Command::new("git") .args(["init", "--bare", remote_path.to_str().unwrap()]) .output() .await .expect("Failed to init bare remote"); Command::new("git") .args(["remote", "add", "origin", remote_path.to_str().unwrap()]) .current_dir(&repo_path) .output() .await .expect("Failed to add remote"); let output = Command::new("git") .args(["rev-parse", "--abbrev-ref", "HEAD"]) .current_dir(&repo_path) .output() .await .expect("Failed to get branch"); let branch = String::from_utf8(output.stdout).unwrap().trim().to_string(); Command::new("git") .args(["push", "-u", "origin", &branch]) .current_dir(&repo_path) .output() .await .expect("Failed to push initial commit"); (repo_path, branch) } #[tokio::test] async fn test_collect_git_info_non_git_directory() { let temp_dir = TempDir::new().expect("Failed to create temp dir"); let result = collect_git_info(temp_dir.path()).await; assert!(result.is_none()); } #[tokio::test] async fn test_collect_git_info_git_repository() { let temp_dir = TempDir::new().expect("Failed to create temp dir"); let repo_path = create_test_git_repo(&temp_dir).await; let git_info = collect_git_info(&repo_path) .await .expect("Should collect git info from repo"); // Should have commit hash assert!(git_info.commit_hash.is_some()); let commit_hash = git_info.commit_hash.unwrap(); assert_eq!(commit_hash.len(), 40); // SHA-1 hash should be 40 characters assert!(commit_hash.chars().all(|c| c.is_ascii_hexdigit())); // Should have branch (likely "main" or "master") assert!(git_info.branch.is_some()); let branch = git_info.branch.unwrap(); assert!(branch == "main" || branch == "master"); // Repository URL might be None for local repos without remote // This is acceptable behavior } #[tokio::test] async fn test_collect_git_info_with_remote() { let temp_dir = TempDir::new().expect("Failed to create temp dir"); let repo_path = create_test_git_repo(&temp_dir).await; // Add a remote origin Command::new("git") .args([ "remote", "add", "origin", "https://github.com/example/repo.git", ]) .current_dir(&repo_path) .output() .await .expect("Failed to add remote"); let git_info = collect_git_info(&repo_path) .await .expect("Should collect git info from repo"); let remote_url_output = Command::new("git") .args(["remote", "get-url", "origin"]) .current_dir(&repo_path) .output() .await .expect("Failed to read remote url"); // Some dev environments rewrite remotes (e.g., force SSH), so compare against // whatever URL Git reports instead of a fixed placeholder. let expected_remote = String::from_utf8(remote_url_output.stdout) .unwrap() .trim() .to_string(); // Should have repository URL assert_eq!(git_info.repository_url, Some(expected_remote)); } #[tokio::test] async fn test_collect_git_info_detached_head() { let temp_dir = TempDir::new().expect("Failed to create temp dir"); let repo_path = create_test_git_repo(&temp_dir).await; // Get the current commit hash let output = Command::new("git") .args(["rev-parse", "HEAD"]) .current_dir(&repo_path) .output() .await .expect("Failed to get HEAD"); let commit_hash = String::from_utf8(output.stdout).unwrap().trim().to_string(); // Checkout the commit directly (detached HEAD) Command::new("git") .args(["checkout", &commit_hash]) .current_dir(&repo_path) .output() .await .expect("Failed to checkout commit"); let git_info = collect_git_info(&repo_path) .await .expect("Should collect git info from repo"); // Should have commit hash assert!(git_info.commit_hash.is_some()); // Branch should be None for detached HEAD (since rev-parse --abbrev-ref HEAD returns "HEAD") assert!(git_info.branch.is_none()); } #[tokio::test] async fn test_collect_git_info_with_branch() { let temp_dir = TempDir::new().expect("Failed to create temp dir"); let repo_path = create_test_git_repo(&temp_dir).await; // Create and checkout a new branch Command::new("git") .args(["checkout", "-b", "feature-branch"]) .current_dir(&repo_path) .output() .await .expect("Failed to create branch"); let git_info = collect_git_info(&repo_path) .await .expect("Should collect git info from repo"); // Should have the new branch name assert_eq!(git_info.branch, Some("feature-branch".to_string())); } #[tokio::test] async fn test_get_git_working_tree_state_clean_repo() { let temp_dir = TempDir::new().expect("Failed to create temp dir"); let (repo_path, branch) = create_test_git_repo_with_remote(&temp_dir).await; let remote_sha = Command::new("git") .args(["rev-parse", &format!("origin/{branch}")]) .current_dir(&repo_path) .output() .await .expect("Failed to rev-parse remote"); let remote_sha = String::from_utf8(remote_sha.stdout) .unwrap() .trim() .to_string(); let state = git_diff_to_remote(&repo_path) .await .expect("Should collect working tree state"); assert_eq!(state.sha, GitSha::new(&remote_sha)); assert!(state.diff.is_empty()); } #[tokio::test] async fn test_get_git_working_tree_state_with_changes() { let temp_dir = TempDir::new().expect("Failed to create temp dir"); let (repo_path, branch) = create_test_git_repo_with_remote(&temp_dir).await; let tracked = repo_path.join("test.txt"); fs::write(&tracked, "modified").unwrap(); fs::write(repo_path.join("untracked.txt"), "new").unwrap(); let remote_sha = Command::new("git") .args(["rev-parse", &format!("origin/{branch}")]) .current_dir(&repo_path)
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/parse_command.rs
codex-rs/core/src/parse_command.rs
use crate::bash::extract_bash_command; use crate::bash::try_parse_shell; use crate::bash::try_parse_word_only_commands_sequence; use crate::powershell::extract_powershell_command; use codex_protocol::parse_command::ParsedCommand; use shlex::split as shlex_split; use shlex::try_join as shlex_try_join; use std::path::PathBuf; pub fn shlex_join(tokens: &[String]) -> String { shlex_try_join(tokens.iter().map(String::as_str)) .unwrap_or_else(|_| "<command included NUL byte>".to_string()) } /// Extracts the shell and script from a command, regardless of platform pub fn extract_shell_command(command: &[String]) -> Option<(&str, &str)> { extract_bash_command(command).or_else(|| extract_powershell_command(command)) } /// DO NOT REVIEW THIS CODE BY HAND /// This parsing code is quite complex and not easy to hand-modify. /// The easiest way to iterate is to add unit tests and have Codex fix the implementation. /// To encourage this, the tests have been put directly below this function rather than at the bottom of the /// /// Parses metadata out of an arbitrary command. /// These commands are model driven and could include just about anything. /// The parsing is slightly lossy due to the ~infinite expressiveness of an arbitrary command. /// The goal of the parsed metadata is to be able to provide the user with a human readable gis /// of what it is doing. pub fn parse_command(command: &[String]) -> Vec<ParsedCommand> { // Parse and then collapse consecutive duplicate commands to avoid redundant summaries. let parsed = parse_command_impl(command); let mut deduped: Vec<ParsedCommand> = Vec::with_capacity(parsed.len()); for cmd in parsed.into_iter() { if deduped.last().is_some_and(|prev| prev == &cmd) { continue; } deduped.push(cmd); } deduped } #[cfg(test)] #[allow(clippy::items_after_test_module)] /// Tests are at the top to encourage using TDD + Codex to fix the implementation. mod tests { use super::*; use std::path::PathBuf; use std::string::ToString; fn shlex_split_safe(s: &str) -> Vec<String> { shlex_split(s).unwrap_or_else(|| s.split_whitespace().map(ToString::to_string).collect()) } fn vec_str(args: &[&str]) -> Vec<String> { args.iter().map(ToString::to_string).collect() } fn assert_parsed(args: &[String], expected: Vec<ParsedCommand>) { let out = parse_command(args); assert_eq!(out, expected); } #[test] fn git_status_is_unknown() { assert_parsed( &vec_str(&["git", "status"]), vec![ParsedCommand::Unknown { cmd: "git status".to_string(), }], ); } #[test] fn handles_git_pipe_wc() { let inner = "git status | wc -l"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Unknown { cmd: "git status".to_string(), }], ); } #[test] fn bash_lc_redirect_not_quoted() { let inner = "echo foo > bar"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Unknown { cmd: "echo foo > bar".to_string(), }], ); } #[test] fn handles_complex_bash_command_head() { let inner = "rg --version && node -v && pnpm -v && rg --files | wc -l && rg --files | head -n 40"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ // Expect commands in left-to-right execution order ParsedCommand::Search { cmd: "rg --version".to_string(), query: None, path: None, }, ParsedCommand::Unknown { cmd: "node -v".to_string(), }, ParsedCommand::Unknown { cmd: "pnpm -v".to_string(), }, ParsedCommand::Search { cmd: "rg --files".to_string(), query: None, path: None, }, ], ); } #[test] fn supports_searching_for_navigate_to_route() -> anyhow::Result<()> { let inner = "rg -n \"navigate-to-route\" -S"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Search { cmd: "rg -n navigate-to-route -S".to_string(), query: Some("navigate-to-route".to_string()), path: None, }], ); Ok(()) } #[test] fn handles_complex_bash_command() { let inner = "rg -n \"BUG|FIXME|TODO|XXX|HACK\" -S | head -n 200"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Search { cmd: "rg -n 'BUG|FIXME|TODO|XXX|HACK' -S".to_string(), query: Some("BUG|FIXME|TODO|XXX|HACK".to_string()), path: None, }], ); } #[test] fn supports_rg_files_with_path_and_pipe() { let inner = "rg --files webview/src | sed -n"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Search { cmd: "rg --files webview/src".to_string(), query: None, path: Some("webview".to_string()), }], ); } #[test] fn supports_rg_files_then_head() { let inner = "rg --files | head -n 50"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Search { cmd: "rg --files".to_string(), query: None, path: None, }], ); } #[test] fn supports_cat() { let inner = "cat webview/README.md"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Read { cmd: inner.to_string(), name: "README.md".to_string(), path: PathBuf::from("webview/README.md"), }], ); } #[test] fn zsh_lc_supports_cat() { let inner = "cat README.md"; assert_parsed( &vec_str(&["zsh", "-lc", inner]), vec![ParsedCommand::Read { cmd: inner.to_string(), name: "README.md".to_string(), path: PathBuf::from("README.md"), }], ); } #[test] fn cd_then_cat_is_single_read() { assert_parsed( &shlex_split_safe("cd foo && cat foo.txt"), vec![ParsedCommand::Read { cmd: "cat foo.txt".to_string(), name: "foo.txt".to_string(), path: PathBuf::from("foo/foo.txt"), }], ); } #[test] fn bash_cd_then_bar_is_same_as_bar() { // Ensure a leading `cd` inside bash -lc is dropped when followed by another command. assert_parsed( &shlex_split_safe("bash -lc 'cd foo && bar'"), vec![ParsedCommand::Unknown { cmd: "bar".to_string(), }], ); } #[test] fn bash_cd_then_cat_is_read() { assert_parsed( &shlex_split_safe("bash -lc 'cd foo && cat foo.txt'"), vec![ParsedCommand::Read { cmd: "cat foo.txt".to_string(), name: "foo.txt".to_string(), path: PathBuf::from("foo/foo.txt"), }], ); } #[test] fn supports_ls_with_pipe() { let inner = "ls -la | sed -n '1,120p'"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::ListFiles { cmd: "ls -la".to_string(), path: None, }], ); } #[test] fn supports_head_n() { let inner = "head -n 50 Cargo.toml"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Read { cmd: inner.to_string(), name: "Cargo.toml".to_string(), path: PathBuf::from("Cargo.toml"), }], ); } #[test] fn supports_head_file_only() { let inner = "head Cargo.toml"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Read { cmd: inner.to_string(), name: "Cargo.toml".to_string(), path: PathBuf::from("Cargo.toml"), }], ); } #[test] fn supports_cat_sed_n() { let inner = "cat tui/Cargo.toml | sed -n '1,200p'"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Read { cmd: inner.to_string(), name: "Cargo.toml".to_string(), path: PathBuf::from("tui/Cargo.toml"), }], ); } #[test] fn supports_tail_n_plus() { let inner = "tail -n +522 README.md"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Read { cmd: inner.to_string(), name: "README.md".to_string(), path: PathBuf::from("README.md"), }], ); } #[test] fn supports_tail_n_last_lines() { let inner = "tail -n 30 README.md"; let out = parse_command(&vec_str(&["bash", "-lc", inner])); assert_eq!( out, vec![ParsedCommand::Read { cmd: inner.to_string(), name: "README.md".to_string(), path: PathBuf::from("README.md"), }] ); } #[test] fn supports_tail_file_only() { let inner = "tail README.md"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Read { cmd: inner.to_string(), name: "README.md".to_string(), path: PathBuf::from("README.md"), }], ); } #[test] fn supports_npm_run_build_is_unknown() { assert_parsed( &vec_str(&["npm", "run", "build"]), vec![ParsedCommand::Unknown { cmd: "npm run build".to_string(), }], ); } #[test] fn supports_grep_recursive_current_dir() { assert_parsed( &vec_str(&["grep", "-R", "CODEX_SANDBOX_ENV_VAR", "-n", "."]), vec![ParsedCommand::Search { cmd: "grep -R CODEX_SANDBOX_ENV_VAR -n .".to_string(), query: Some("CODEX_SANDBOX_ENV_VAR".to_string()), path: Some(".".to_string()), }], ); } #[test] fn supports_grep_recursive_specific_file() { assert_parsed( &vec_str(&[ "grep", "-R", "CODEX_SANDBOX_ENV_VAR", "-n", "core/src/spawn.rs", ]), vec![ParsedCommand::Search { cmd: "grep -R CODEX_SANDBOX_ENV_VAR -n core/src/spawn.rs".to_string(), query: Some("CODEX_SANDBOX_ENV_VAR".to_string()), path: Some("spawn.rs".to_string()), }], ); } #[test] fn supports_grep_query_with_slashes_not_shortened() { // Query strings may contain slashes and should not be shortened to the basename. // Previously, grep queries were passed through short_display_path, which is incorrect. assert_parsed( &shlex_split_safe("grep -R src/main.rs -n ."), vec![ParsedCommand::Search { cmd: "grep -R src/main.rs -n .".to_string(), query: Some("src/main.rs".to_string()), path: Some(".".to_string()), }], ); } #[test] fn supports_grep_weird_backtick_in_query() { assert_parsed( &shlex_split_safe("grep -R COD`EX_SANDBOX -n"), vec![ParsedCommand::Search { cmd: "grep -R 'COD`EX_SANDBOX' -n".to_string(), query: Some("COD`EX_SANDBOX".to_string()), path: None, }], ); } #[test] fn supports_cd_and_rg_files() { assert_parsed( &shlex_split_safe("cd codex-rs && rg --files"), vec![ParsedCommand::Search { cmd: "rg --files".to_string(), query: None, path: None, }], ); } #[test] fn supports_single_string_script_with_cd_and_pipe() { let inner = r#"cd /Users/pakrym/code/codex && rg -n "codex_api" codex-rs -S | head -n 50"#; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Search { cmd: "rg -n codex_api codex-rs -S".to_string(), query: Some("codex_api".to_string()), path: Some("codex-rs".to_string()), }], ); } // ---- is_small_formatting_command unit tests ---- #[test] fn small_formatting_always_true_commands() { for cmd in [ "wc", "tr", "cut", "sort", "uniq", "xargs", "tee", "column", "awk", ] { assert!(is_small_formatting_command(&shlex_split_safe(cmd))); assert!(is_small_formatting_command(&shlex_split_safe(&format!( "{cmd} -x" )))); } } #[test] fn head_behavior() { // No args -> small formatting assert!(is_small_formatting_command(&vec_str(&["head"]))); // Numeric count only -> formatting assert!(is_small_formatting_command(&shlex_split_safe("head -n 40"))); // With explicit file -> not small formatting assert!(!is_small_formatting_command(&shlex_split_safe( "head -n 40 file.txt" ))); // File only (no count) -> not formatting assert!(!is_small_formatting_command(&vec_str(&[ "head", "file.txt" ]))); } #[test] fn tail_behavior() { // No args -> small formatting assert!(is_small_formatting_command(&vec_str(&["tail"]))); // Numeric with plus offset -> formatting assert!(is_small_formatting_command(&shlex_split_safe( "tail -n +10" ))); assert!(!is_small_formatting_command(&shlex_split_safe( "tail -n +10 file.txt" ))); // Numeric count -> formatting assert!(is_small_formatting_command(&shlex_split_safe("tail -n 30"))); assert!(!is_small_formatting_command(&shlex_split_safe( "tail -n 30 file.txt" ))); // Byte count -> formatting assert!(is_small_formatting_command(&shlex_split_safe("tail -c 30"))); assert!(is_small_formatting_command(&shlex_split_safe( "tail -c +10" ))); // File only (no count) -> not formatting assert!(!is_small_formatting_command(&vec_str(&[ "tail", "file.txt" ]))); } #[test] fn sed_behavior() { // Plain sed -> small formatting assert!(is_small_formatting_command(&vec_str(&["sed"]))); // sed -n <range> (no file) -> still small formatting assert!(is_small_formatting_command(&vec_str(&["sed", "-n", "10p"]))); // Valid range with file -> not small formatting assert!(!is_small_formatting_command(&shlex_split_safe( "sed -n 10p file.txt" ))); assert!(!is_small_formatting_command(&shlex_split_safe( "sed -n 1,200p file.txt" ))); // Invalid ranges with file -> small formatting assert!(is_small_formatting_command(&shlex_split_safe( "sed -n p file.txt" ))); assert!(is_small_formatting_command(&shlex_split_safe( "sed -n +10p file.txt" ))); } #[test] fn empty_tokens_is_not_small() { let empty: Vec<String> = Vec::new(); assert!(!is_small_formatting_command(&empty)); } #[test] fn supports_nl_then_sed_reading() { let inner = "nl -ba core/src/parse_command.rs | sed -n '1200,1720p'"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Read { cmd: inner.to_string(), name: "parse_command.rs".to_string(), path: PathBuf::from("core/src/parse_command.rs"), }], ); } #[test] fn supports_sed_n() { let inner = "sed -n '2000,2200p' tui/src/history_cell.rs"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Read { cmd: inner.to_string(), name: "history_cell.rs".to_string(), path: PathBuf::from("tui/src/history_cell.rs"), }], ); } #[test] fn filters_out_printf() { let inner = r#"printf "\n===== ansi-escape/Cargo.toml =====\n"; cat -- ansi-escape/Cargo.toml"#; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Read { cmd: "cat -- ansi-escape/Cargo.toml".to_string(), name: "Cargo.toml".to_string(), path: PathBuf::from("ansi-escape/Cargo.toml"), }], ); } #[test] fn drops_yes_in_pipelines() { // Inside bash -lc, `yes | rg --files` should focus on the primary command. let inner = "yes | rg --files"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Search { cmd: "rg --files".to_string(), query: None, path: None, }], ); } #[test] fn supports_sed_n_then_nl_as_search() { // Ensure `sed -n '<range>' <file> | nl -ba` is summarized as a search for that file. let args = shlex_split_safe( "sed -n '260,640p' exec/src/event_processor_with_human_output.rs | nl -ba", ); assert_parsed( &args, vec![ParsedCommand::Read { cmd: "sed -n '260,640p' exec/src/event_processor_with_human_output.rs".to_string(), name: "event_processor_with_human_output.rs".to_string(), path: PathBuf::from("exec/src/event_processor_with_human_output.rs"), }], ); } #[test] fn preserves_rg_with_spaces() { assert_parsed( &shlex_split_safe("yes | rg -n 'foo bar' -S"), vec![ParsedCommand::Search { cmd: "rg -n 'foo bar' -S".to_string(), query: Some("foo bar".to_string()), path: None, }], ); } #[test] fn ls_with_glob() { assert_parsed( &shlex_split_safe("ls -I '*.test.js'"), vec![ParsedCommand::ListFiles { cmd: "ls -I '*.test.js'".to_string(), path: None, }], ); } #[test] fn trim_on_semicolon() { assert_parsed( &shlex_split_safe("rg foo ; echo done"), vec![ ParsedCommand::Search { cmd: "rg foo".to_string(), query: Some("foo".to_string()), path: None, }, ParsedCommand::Unknown { cmd: "echo done".to_string(), }, ], ); } #[test] fn split_on_or_connector() { // Ensure we split commands on the logical OR operator as well. assert_parsed( &shlex_split_safe("rg foo || echo done"), vec![ ParsedCommand::Search { cmd: "rg foo".to_string(), query: Some("foo".to_string()), path: None, }, ParsedCommand::Unknown { cmd: "echo done".to_string(), }, ], ); } #[test] fn parses_mixed_sequence_with_pipes_semicolons_and_or() { // Provided long command sequence combining sequencing, pipelines, and ORs. let inner = "pwd; ls -la; rg --files -g '!target' | wc -l; rg -n '^\\[workspace\\]' -n Cargo.toml || true; rg -n '^\\[package\\]' -n */Cargo.toml || true; cargo --version; rustc --version; cargo clippy --workspace --all-targets --all-features -q"; let args = vec_str(&["bash", "-lc", inner]); let expected = vec![ ParsedCommand::Unknown { cmd: "pwd".to_string(), }, ParsedCommand::ListFiles { cmd: shlex_join(&shlex_split_safe("ls -la")), path: None, }, ParsedCommand::Search { cmd: shlex_join(&shlex_split_safe("rg --files -g '!target'")), query: None, path: Some("!target".to_string()), }, ParsedCommand::Search { cmd: shlex_join(&shlex_split_safe("rg -n '^\\[workspace\\]' -n Cargo.toml")), query: Some("^\\[workspace\\]".to_string()), path: Some("Cargo.toml".to_string()), }, ParsedCommand::Search { cmd: shlex_join(&shlex_split_safe("rg -n '^\\[package\\]' -n */Cargo.toml")), query: Some("^\\[package\\]".to_string()), path: Some("Cargo.toml".to_string()), }, ParsedCommand::Unknown { cmd: shlex_join(&shlex_split_safe("cargo --version")), }, ParsedCommand::Unknown { cmd: shlex_join(&shlex_split_safe("rustc --version")), }, ParsedCommand::Unknown { cmd: shlex_join(&shlex_split_safe( "cargo clippy --workspace --all-targets --all-features -q", )), }, ]; assert_parsed(&args, expected); } #[test] fn strips_true_in_sequence() { // `true` should be dropped from parsed sequences assert_parsed( &shlex_split_safe("true && rg --files"), vec![ParsedCommand::Search { cmd: "rg --files".to_string(), query: None, path: None, }], ); assert_parsed( &shlex_split_safe("rg --files && true"), vec![ParsedCommand::Search { cmd: "rg --files".to_string(), query: None, path: None, }], ); } #[test] fn strips_true_inside_bash_lc() { let inner = "true && rg --files"; assert_parsed( &vec_str(&["bash", "-lc", inner]), vec![ParsedCommand::Search { cmd: "rg --files".to_string(), query: None, path: None, }], ); let inner2 = "rg --files || true"; assert_parsed( &vec_str(&["bash", "-lc", inner2]), vec![ParsedCommand::Search { cmd: "rg --files".to_string(), query: None, path: None, }], ); } #[test] fn shorten_path_on_windows() { assert_parsed( &shlex_split_safe(r#"cat "pkg\src\main.rs""#), vec![ParsedCommand::Read { cmd: r#"cat "pkg\\src\\main.rs""#.to_string(), name: "main.rs".to_string(), path: PathBuf::from(r#"pkg\src\main.rs"#), }], ); } #[test] fn head_with_no_space() { assert_parsed( &shlex_split_safe("bash -lc 'head -n50 Cargo.toml'"), vec![ParsedCommand::Read { cmd: "head -n50 Cargo.toml".to_string(), name: "Cargo.toml".to_string(), path: PathBuf::from("Cargo.toml"), }], ); } #[test] fn bash_dash_c_pipeline_parsing() { // Ensure -c is handled similarly to -lc by shell parsing let inner = "rg --files | head -n 1"; assert_parsed( &vec_str(&["bash", "-c", inner]), vec![ParsedCommand::Search { cmd: "rg --files".to_string(), query: None, path: None, }], ); } #[test] fn tail_with_no_space() { assert_parsed( &shlex_split_safe("bash -lc 'tail -n+10 README.md'"), vec![ParsedCommand::Read { cmd: "tail -n+10 README.md".to_string(), name: "README.md".to_string(), path: PathBuf::from("README.md"), }], ); } #[test] fn grep_with_query_and_path() { assert_parsed( &shlex_split_safe("grep -R TODO src"), vec![ParsedCommand::Search { cmd: "grep -R TODO src".to_string(), query: Some("TODO".to_string()), path: Some("src".to_string()), }], ); } #[test] fn rg_with_equals_style_flags() { assert_parsed( &shlex_split_safe("rg --colors=never -n foo src"), vec![ParsedCommand::Search { cmd: "rg '--colors=never' -n foo src".to_string(), query: Some("foo".to_string()), path: Some("src".to_string()), }], ); } #[test] fn cat_with_double_dash_and_sed_ranges() { // cat -- <file> should be treated as a read of that file assert_parsed( &shlex_split_safe("cat -- ./-strange-file-name"), vec![ParsedCommand::Read { cmd: "cat -- ./-strange-file-name".to_string(), name: "-strange-file-name".to_string(), path: PathBuf::from("./-strange-file-name"), }], ); // sed -n <range> <file> should be treated as a read of <file> assert_parsed( &shlex_split_safe("sed -n '12,20p' Cargo.toml"), vec![ParsedCommand::Read { cmd: "sed -n '12,20p' Cargo.toml".to_string(), name: "Cargo.toml".to_string(), path: PathBuf::from("Cargo.toml"), }], ); } #[test] fn drop_trailing_nl_in_pipeline() { // When an `nl` stage has only flags, it should be dropped from the summary assert_parsed( &shlex_split_safe("rg --files | nl -ba"), vec![ParsedCommand::Search { cmd: "rg --files".to_string(), query: None, path: None, }], ); } #[test] fn ls_with_time_style_and_path() { assert_parsed( &shlex_split_safe("ls --time-style=long-iso ./dist"), vec![ParsedCommand::ListFiles { cmd: "ls '--time-style=long-iso' ./dist".to_string(), // short_display_path drops "dist" and shows "." as the last useful segment path: Some(".".to_string()), }], ); } #[test] fn fd_file_finder_variants() { assert_parsed( &shlex_split_safe("fd -t f src/"), vec![ParsedCommand::Search { cmd: "fd -t f src/".to_string(), query: None, path: Some("src".to_string()), }], ); // fd with query and path should capture both assert_parsed( &shlex_split_safe("fd main src"), vec![ParsedCommand::Search { cmd: "fd main src".to_string(), query: Some("main".to_string()), path: Some("src".to_string()), }], ); } #[test] fn find_basic_name_filter() { assert_parsed( &shlex_split_safe("find . -name '*.rs'"), vec![ParsedCommand::Search { cmd: "find . -name '*.rs'".to_string(), query: Some("*.rs".to_string()), path: Some(".".to_string()), }], ); } #[test] fn find_type_only_path() { assert_parsed( &shlex_split_safe("find src -type f"), vec![ParsedCommand::Search { cmd: "find src -type f".to_string(), query: None, path: Some("src".to_string()), }], ); } #[test] fn bin_bash_lc_sed() { assert_parsed( &shlex_split_safe("/bin/bash -lc 'sed -n '1,10p' Cargo.toml'"), vec![ParsedCommand::Read { cmd: "sed -n '1,10p' Cargo.toml".to_string(), name: "Cargo.toml".to_string(), path: PathBuf::from("Cargo.toml"), }], ); } #[test] fn bin_zsh_lc_sed() { assert_parsed( &shlex_split_safe("/bin/zsh -lc 'sed -n '1,10p' Cargo.toml'"), vec![ParsedCommand::Read { cmd: "sed -n '1,10p' Cargo.toml".to_string(), name: "Cargo.toml".to_string(), path: PathBuf::from("Cargo.toml"), }], ); } #[test] fn powershell_command_is_stripped() { assert_parsed( &vec_str(&["powershell", "-Command", "Get-ChildItem"]), vec![ParsedCommand::Unknown { cmd: "Get-ChildItem".to_string(), }], ); } #[test] fn pwsh_with_noprofile_and_c_alias_is_stripped() { assert_parsed( &vec_str(&["pwsh", "-NoProfile", "-c", "Write-Host hi"]), vec![ParsedCommand::Unknown { cmd: "Write-Host hi".to_string(), }], ); } #[test] fn powershell_with_path_is_stripped() { let command = if cfg!(windows) { "C:\\windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe" } else { "/usr/local/bin/powershell.exe" }; assert_parsed( &vec_str(&[command, "-NoProfile", "-c", "Write-Host hi"]), vec![ParsedCommand::Unknown { cmd: "Write-Host hi".to_string(), }], ); } } pub fn parse_command_impl(command: &[String]) -> Vec<ParsedCommand> { if let Some(commands) = parse_shell_lc_commands(command) { return commands; } if let Some((_, script)) = extract_powershell_command(command) { return vec![ParsedCommand::Unknown { cmd: script.to_string(), }]; } let normalized = normalize_tokens(command); let parts = if contains_connectors(&normalized) { split_on_connectors(&normalized) } else { vec![normalized] }; // Preserve left-to-right execution order for all commands, including bash -c/-lc // so summaries reflect the order they will run. // Map each pipeline segment to its parsed summary, tracking `cd` to compute paths. let mut commands: Vec<ParsedCommand> = Vec::new(); let mut cwd: Option<String> = None; for tokens in &parts { if let Some((head, tail)) = tokens.split_first() && head == "cd" { if let Some(dir) = tail.first() { cwd = Some(match &cwd { Some(base) => join_paths(base, dir), None => dir.clone(), }); } continue; } let parsed = summarize_main_tokens(tokens); let parsed = match parsed { ParsedCommand::Read { cmd, name, path } => { if let Some(base) = &cwd { let full = join_paths(base, &path.to_string_lossy()); ParsedCommand::Read { cmd, name, path: PathBuf::from(full), } } else { ParsedCommand::Read { cmd, name, path } } } other => other, }; commands.push(parsed); } while let Some(next) = simplify_once(&commands) { commands = next; } commands } fn simplify_once(commands: &[ParsedCommand]) -> Option<Vec<ParsedCommand>> { if commands.len() <= 1 { return None; } // echo ... && ...rest => ...rest if let ParsedCommand::Unknown { cmd } = &commands[0] && shlex_split(cmd).is_some_and(|t| t.first().map(String::as_str) == Some("echo")) { return Some(commands[1..].to_vec()); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/shell.rs
codex-rs/core/src/shell.rs
use serde::Deserialize; use serde::Serialize; use std::path::PathBuf; use std::sync::Arc; use crate::shell_snapshot::ShellSnapshot; #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub enum ShellType { Zsh, Bash, PowerShell, Sh, Cmd, } #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct Shell { pub(crate) shell_type: ShellType, pub(crate) shell_path: PathBuf, #[serde(skip_serializing, skip_deserializing, default)] pub(crate) shell_snapshot: Option<Arc<ShellSnapshot>>, } impl Shell { pub fn name(&self) -> &'static str { match self.shell_type { ShellType::Zsh => "zsh", ShellType::Bash => "bash", ShellType::PowerShell => "powershell", ShellType::Sh => "sh", ShellType::Cmd => "cmd", } } /// Takes a string of shell and returns the full list of command args to /// use with `exec()` to run the shell command. pub fn derive_exec_args(&self, command: &str, use_login_shell: bool) -> Vec<String> { match self.shell_type { ShellType::Zsh | ShellType::Bash | ShellType::Sh => { let arg = if use_login_shell { "-lc" } else { "-c" }; vec![ self.shell_path.to_string_lossy().to_string(), arg.to_string(), command.to_string(), ] } ShellType::PowerShell => { let mut args = vec![self.shell_path.to_string_lossy().to_string()]; if !use_login_shell { args.push("-NoProfile".to_string()); } args.push("-Command".to_string()); args.push(command.to_string()); args } ShellType::Cmd => { let mut args = vec![self.shell_path.to_string_lossy().to_string()]; args.push("/c".to_string()); args.push(command.to_string()); args } } } } #[cfg(unix)] fn get_user_shell_path() -> Option<PathBuf> { use libc::getpwuid; use libc::getuid; use std::ffi::CStr; unsafe { let uid = getuid(); let pw = getpwuid(uid); if !pw.is_null() { let shell_path = CStr::from_ptr((*pw).pw_shell) .to_string_lossy() .into_owned(); Some(PathBuf::from(shell_path)) } else { None } } } #[cfg(not(unix))] fn get_user_shell_path() -> Option<PathBuf> { None } fn file_exists(path: &PathBuf) -> Option<PathBuf> { if std::fs::metadata(path).is_ok_and(|metadata| metadata.is_file()) { Some(PathBuf::from(path)) } else { None } } fn get_shell_path( shell_type: ShellType, provided_path: Option<&PathBuf>, binary_name: &str, fallback_paths: Vec<&str>, ) -> Option<PathBuf> { // If exact provided path exists, use it if provided_path.and_then(file_exists).is_some() { return provided_path.cloned(); } // Check if the shell we are trying to load is user's default shell // if just use it let default_shell_path = get_user_shell_path(); if let Some(default_shell_path) = default_shell_path && detect_shell_type(&default_shell_path) == Some(shell_type) { return Some(default_shell_path); } if let Ok(path) = which::which(binary_name) { return Some(path); } for path in fallback_paths { //check exists if let Some(path) = file_exists(&PathBuf::from(path)) { return Some(path); } } None } fn get_zsh_shell(path: Option<&PathBuf>) -> Option<Shell> { let shell_path = get_shell_path(ShellType::Zsh, path, "zsh", vec!["/bin/zsh"]); shell_path.map(|shell_path| Shell { shell_type: ShellType::Zsh, shell_path, shell_snapshot: None, }) } fn get_bash_shell(path: Option<&PathBuf>) -> Option<Shell> { let shell_path = get_shell_path(ShellType::Bash, path, "bash", vec!["/bin/bash"]); shell_path.map(|shell_path| Shell { shell_type: ShellType::Bash, shell_path, shell_snapshot: None, }) } fn get_sh_shell(path: Option<&PathBuf>) -> Option<Shell> { let shell_path = get_shell_path(ShellType::Sh, path, "sh", vec!["/bin/sh"]); shell_path.map(|shell_path| Shell { shell_type: ShellType::Sh, shell_path, shell_snapshot: None, }) } fn get_powershell_shell(path: Option<&PathBuf>) -> Option<Shell> { let shell_path = get_shell_path( ShellType::PowerShell, path, "pwsh", vec!["/usr/local/bin/pwsh"], ) .or_else(|| get_shell_path(ShellType::PowerShell, path, "powershell", vec![])); shell_path.map(|shell_path| Shell { shell_type: ShellType::PowerShell, shell_path, shell_snapshot: None, }) } fn get_cmd_shell(path: Option<&PathBuf>) -> Option<Shell> { let shell_path = get_shell_path(ShellType::Cmd, path, "cmd", vec![]); shell_path.map(|shell_path| Shell { shell_type: ShellType::Cmd, shell_path, shell_snapshot: None, }) } fn ultimate_fallback_shell() -> Shell { if cfg!(windows) { Shell { shell_type: ShellType::Cmd, shell_path: PathBuf::from("cmd.exe"), shell_snapshot: None, } } else { Shell { shell_type: ShellType::Sh, shell_path: PathBuf::from("/bin/sh"), shell_snapshot: None, } } } pub fn get_shell_by_model_provided_path(shell_path: &PathBuf) -> Shell { detect_shell_type(shell_path) .and_then(|shell_type| get_shell(shell_type, Some(shell_path))) .unwrap_or(ultimate_fallback_shell()) } pub fn get_shell(shell_type: ShellType, path: Option<&PathBuf>) -> Option<Shell> { match shell_type { ShellType::Zsh => get_zsh_shell(path), ShellType::Bash => get_bash_shell(path), ShellType::PowerShell => get_powershell_shell(path), ShellType::Sh => get_sh_shell(path), ShellType::Cmd => get_cmd_shell(path), } } pub fn detect_shell_type(shell_path: &PathBuf) -> Option<ShellType> { match shell_path.as_os_str().to_str() { Some("zsh") => Some(ShellType::Zsh), Some("sh") => Some(ShellType::Sh), Some("cmd") => Some(ShellType::Cmd), Some("bash") => Some(ShellType::Bash), Some("pwsh") => Some(ShellType::PowerShell), Some("powershell") => Some(ShellType::PowerShell), _ => { let shell_name = shell_path.file_stem(); if let Some(shell_name) = shell_name && shell_name != shell_path { detect_shell_type(&PathBuf::from(shell_name)) } else { None } } } } pub fn default_user_shell() -> Shell { default_user_shell_from_path(get_user_shell_path()) } fn default_user_shell_from_path(user_shell_path: Option<PathBuf>) -> Shell { if cfg!(windows) { get_shell(ShellType::PowerShell, None).unwrap_or(ultimate_fallback_shell()) } else { let user_default_shell = user_shell_path .and_then(|shell| detect_shell_type(&shell)) .and_then(|shell_type| get_shell(shell_type, None)); let shell_with_fallback = if cfg!(target_os = "macos") { user_default_shell .or_else(|| get_shell(ShellType::Zsh, None)) .or_else(|| get_shell(ShellType::Bash, None)) } else { user_default_shell .or_else(|| get_shell(ShellType::Bash, None)) .or_else(|| get_shell(ShellType::Zsh, None)) }; shell_with_fallback.unwrap_or(ultimate_fallback_shell()) } } #[cfg(test)] mod detect_shell_type_tests { use super::*; #[test] fn test_detect_shell_type() { assert_eq!( detect_shell_type(&PathBuf::from("zsh")), Some(ShellType::Zsh) ); assert_eq!( detect_shell_type(&PathBuf::from("bash")), Some(ShellType::Bash) ); assert_eq!( detect_shell_type(&PathBuf::from("pwsh")), Some(ShellType::PowerShell) ); assert_eq!( detect_shell_type(&PathBuf::from("powershell")), Some(ShellType::PowerShell) ); assert_eq!(detect_shell_type(&PathBuf::from("fish")), None); assert_eq!(detect_shell_type(&PathBuf::from("other")), None); assert_eq!( detect_shell_type(&PathBuf::from("/bin/zsh")), Some(ShellType::Zsh) ); assert_eq!( detect_shell_type(&PathBuf::from("/bin/bash")), Some(ShellType::Bash) ); assert_eq!( detect_shell_type(&PathBuf::from("powershell.exe")), Some(ShellType::PowerShell) ); assert_eq!( detect_shell_type(&PathBuf::from(if cfg!(windows) { "C:\\windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe" } else { "/usr/local/bin/pwsh" })), Some(ShellType::PowerShell) ); assert_eq!( detect_shell_type(&PathBuf::from("pwsh.exe")), Some(ShellType::PowerShell) ); assert_eq!( detect_shell_type(&PathBuf::from("/usr/local/bin/pwsh")), Some(ShellType::PowerShell) ); assert_eq!( detect_shell_type(&PathBuf::from("/bin/sh")), Some(ShellType::Sh) ); assert_eq!(detect_shell_type(&PathBuf::from("sh")), Some(ShellType::Sh)); assert_eq!( detect_shell_type(&PathBuf::from("cmd")), Some(ShellType::Cmd) ); assert_eq!( detect_shell_type(&PathBuf::from("cmd.exe")), Some(ShellType::Cmd) ); } } #[cfg(test)] #[cfg(unix)] mod tests { use super::*; use std::path::PathBuf; use std::process::Command; #[test] #[cfg(target_os = "macos")] fn detects_zsh() { let zsh_shell = get_shell(ShellType::Zsh, None).unwrap(); let shell_path = zsh_shell.shell_path; assert_eq!(shell_path, PathBuf::from("/bin/zsh")); } #[test] #[cfg(target_os = "macos")] fn fish_fallback_to_zsh() { let zsh_shell = default_user_shell_from_path(Some(PathBuf::from("/bin/fish"))); let shell_path = zsh_shell.shell_path; assert_eq!(shell_path, PathBuf::from("/bin/zsh")); } #[test] fn detects_bash() { let bash_shell = get_shell(ShellType::Bash, None).unwrap(); let shell_path = bash_shell.shell_path; assert!( shell_path == PathBuf::from("/bin/bash") || shell_path == PathBuf::from("/usr/bin/bash") || shell_path == PathBuf::from("/usr/local/bin/bash"), "shell path: {shell_path:?}", ); } #[test] fn detects_sh() { let sh_shell = get_shell(ShellType::Sh, None).unwrap(); let shell_path = sh_shell.shell_path; assert!( shell_path == PathBuf::from("/bin/sh") || shell_path == PathBuf::from("/usr/bin/sh"), "shell path: {shell_path:?}", ); } #[test] fn can_run_on_shell_test() { let cmd = "echo \"Works\""; if cfg!(windows) { assert!(shell_works( get_shell(ShellType::PowerShell, None), "Out-String 'Works'", true, )); assert!(shell_works(get_shell(ShellType::Cmd, None), cmd, true,)); assert!(shell_works(Some(ultimate_fallback_shell()), cmd, true)); } else { assert!(shell_works(Some(ultimate_fallback_shell()), cmd, true)); assert!(shell_works(get_shell(ShellType::Zsh, None), cmd, false)); assert!(shell_works(get_shell(ShellType::Bash, None), cmd, true)); assert!(shell_works(get_shell(ShellType::Sh, None), cmd, true)); } } fn shell_works(shell: Option<Shell>, command: &str, required: bool) -> bool { if let Some(shell) = shell { let args = shell.derive_exec_args(command, false); let output = Command::new(args[0].clone()) .args(&args[1..]) .output() .unwrap(); assert!(output.status.success()); assert!(String::from_utf8_lossy(&output.stdout).contains("Works")); true } else { !required } } #[test] fn derive_exec_args() { let test_bash_shell = Shell { shell_type: ShellType::Bash, shell_path: PathBuf::from("/bin/bash"), shell_snapshot: None, }; assert_eq!( test_bash_shell.derive_exec_args("echo hello", false), vec!["/bin/bash", "-c", "echo hello"] ); assert_eq!( test_bash_shell.derive_exec_args("echo hello", true), vec!["/bin/bash", "-lc", "echo hello"] ); let test_zsh_shell = Shell { shell_type: ShellType::Zsh, shell_path: PathBuf::from("/bin/zsh"), shell_snapshot: None, }; assert_eq!( test_zsh_shell.derive_exec_args("echo hello", false), vec!["/bin/zsh", "-c", "echo hello"] ); assert_eq!( test_zsh_shell.derive_exec_args("echo hello", true), vec!["/bin/zsh", "-lc", "echo hello"] ); let test_powershell_shell = Shell { shell_type: ShellType::PowerShell, shell_path: PathBuf::from("pwsh.exe"), shell_snapshot: None, }; assert_eq!( test_powershell_shell.derive_exec_args("echo hello", false), vec!["pwsh.exe", "-NoProfile", "-Command", "echo hello"] ); assert_eq!( test_powershell_shell.derive_exec_args("echo hello", true), vec!["pwsh.exe", "-Command", "echo hello"] ); } #[tokio::test] async fn test_current_shell_detects_zsh() { let shell = Command::new("sh") .arg("-c") .arg("echo $SHELL") .output() .unwrap(); let shell_path = String::from_utf8_lossy(&shell.stdout).trim().to_string(); if shell_path.ends_with("/zsh") { assert_eq!( default_user_shell(), Shell { shell_type: ShellType::Zsh, shell_path: PathBuf::from(shell_path), shell_snapshot: None, } ); } } #[tokio::test] async fn detects_powershell_as_default() { if !cfg!(windows) { return; } let powershell_shell = default_user_shell(); let shell_path = powershell_shell.shell_path; assert!(shell_path.ends_with("pwsh.exe") || shell_path.ends_with("powershell.exe")); } #[test] fn finds_poweshell() { if !cfg!(windows) { return; } let powershell_shell = get_shell(ShellType::PowerShell, None).unwrap(); let shell_path = powershell_shell.shell_path; assert!(shell_path.ends_with("pwsh.exe") || shell_path.ends_with("powershell.exe")); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/landlock.rs
codex-rs/core/src/landlock.rs
use crate::protocol::SandboxPolicy; use crate::spawn::StdioPolicy; use crate::spawn::spawn_child_async; use std::collections::HashMap; use std::path::Path; use std::path::PathBuf; use tokio::process::Child; /// Spawn a shell tool command under the Linux Landlock+seccomp sandbox helper /// (codex-linux-sandbox). /// /// Unlike macOS Seatbelt where we directly embed the policy text, the Linux /// helper accepts a list of `--sandbox-permission`/`-s` flags mirroring the /// public CLI. We convert the internal [`SandboxPolicy`] representation into /// the equivalent CLI options. pub async fn spawn_command_under_linux_sandbox<P>( codex_linux_sandbox_exe: P, command: Vec<String>, command_cwd: PathBuf, sandbox_policy: &SandboxPolicy, sandbox_policy_cwd: &Path, stdio_policy: StdioPolicy, env: HashMap<String, String>, ) -> std::io::Result<Child> where P: AsRef<Path>, { let args = create_linux_sandbox_command_args(command, sandbox_policy, sandbox_policy_cwd); let arg0 = Some("codex-linux-sandbox"); spawn_child_async( codex_linux_sandbox_exe.as_ref().to_path_buf(), args, arg0, command_cwd, sandbox_policy, stdio_policy, env, ) .await } /// Converts the sandbox policy into the CLI invocation for `codex-linux-sandbox`. pub(crate) fn create_linux_sandbox_command_args( command: Vec<String>, sandbox_policy: &SandboxPolicy, sandbox_policy_cwd: &Path, ) -> Vec<String> { #[expect(clippy::expect_used)] let sandbox_policy_cwd = sandbox_policy_cwd .to_str() .expect("cwd must be valid UTF-8") .to_string(); #[expect(clippy::expect_used)] let sandbox_policy_json = serde_json::to_string(sandbox_policy).expect("Failed to serialize SandboxPolicy to JSON"); let mut linux_cmd: Vec<String> = vec![ "--sandbox-policy-cwd".to_string(), sandbox_policy_cwd, "--sandbox-policy".to_string(), sandbox_policy_json, // Separator so that command arguments starting with `-` are not parsed as // options of the helper itself. "--".to_string(), ]; // Append the original tool command. linux_cmd.extend(command); linux_cmd }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/env.rs
codex-rs/core/src/env.rs
//! Functions for environment detection that need to be shared across crates. /// Returns true if the current process is running under Windows Subsystem for Linux. pub fn is_wsl() -> bool { #[cfg(target_os = "linux")] { if std::env::var_os("WSL_DISTRO_NAME").is_some() { return true; } match std::fs::read_to_string("/proc/version") { Ok(version) => version.to_lowercase().contains("microsoft"), Err(_) => false, } } #[cfg(not(target_os = "linux"))] { false } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/default_client.rs
codex-rs/core/src/default_client.rs
use crate::spawn::CODEX_SANDBOX_ENV_VAR; use codex_client::CodexHttpClient; pub use codex_client::CodexRequestBuilder; use reqwest::header::HeaderValue; use std::sync::LazyLock; use std::sync::Mutex; use std::sync::OnceLock; /// Set this to add a suffix to the User-Agent string. /// /// It is not ideal that we're using a global singleton for this. /// This is primarily designed to differentiate MCP clients from each other. /// Because there can only be one MCP server per process, it should be safe for this to be a global static. /// However, future users of this should use this with caution as a result. /// In addition, we want to be confident that this value is used for ALL clients and doing that requires a /// lot of wiring and it's easy to miss code paths by doing so. /// See https://github.com/openai/codex/pull/3388/files for an example of what that would look like. /// Finally, we want to make sure this is set for ALL mcp clients without needing to know a special env var /// or having to set data that they already specified in the mcp initialize request somewhere else. /// /// A space is automatically added between the suffix and the rest of the User-Agent string. /// The full user agent string is returned from the mcp initialize response. /// Parenthesis will be added by Codex. This should only specify what goes inside of the parenthesis. pub static USER_AGENT_SUFFIX: LazyLock<Mutex<Option<String>>> = LazyLock::new(|| Mutex::new(None)); pub const DEFAULT_ORIGINATOR: &str = "codex_cli_rs"; pub const CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR: &str = "CODEX_INTERNAL_ORIGINATOR_OVERRIDE"; #[derive(Debug, Clone)] pub struct Originator { pub value: String, pub header_value: HeaderValue, } static ORIGINATOR: OnceLock<Originator> = OnceLock::new(); #[derive(Debug)] pub enum SetOriginatorError { InvalidHeaderValue, AlreadyInitialized, } fn get_originator_value(provided: Option<String>) -> Originator { let value = std::env::var(CODEX_INTERNAL_ORIGINATOR_OVERRIDE_ENV_VAR) .ok() .or(provided) .unwrap_or(DEFAULT_ORIGINATOR.to_string()); match HeaderValue::from_str(&value) { Ok(header_value) => Originator { value, header_value, }, Err(e) => { tracing::error!("Unable to turn originator override {value} into header value: {e}"); Originator { value: DEFAULT_ORIGINATOR.to_string(), header_value: HeaderValue::from_static(DEFAULT_ORIGINATOR), } } } } pub fn set_default_originator(value: String) -> Result<(), SetOriginatorError> { let originator = get_originator_value(Some(value)); ORIGINATOR .set(originator) .map_err(|_| SetOriginatorError::AlreadyInitialized) } pub fn originator() -> &'static Originator { ORIGINATOR.get_or_init(|| get_originator_value(None)) } pub fn get_codex_user_agent() -> String { let build_version = env!("CARGO_PKG_VERSION"); let os_info = os_info::get(); let prefix = format!( "{}/{build_version} ({} {}; {}) {}", originator().value.as_str(), os_info.os_type(), os_info.version(), os_info.architecture().unwrap_or("unknown"), crate::terminal::user_agent() ); let suffix = USER_AGENT_SUFFIX .lock() .ok() .and_then(|guard| guard.clone()); let suffix = suffix .as_deref() .map(str::trim) .filter(|value| !value.is_empty()) .map_or_else(String::new, |value| format!(" ({value})")); let candidate = format!("{prefix}{suffix}"); sanitize_user_agent(candidate, &prefix) } /// Sanitize the user agent string. /// /// Invalid characters are replaced with an underscore. /// /// If the user agent fails to parse, it falls back to fallback and then to ORIGINATOR. fn sanitize_user_agent(candidate: String, fallback: &str) -> String { if HeaderValue::from_str(candidate.as_str()).is_ok() { return candidate; } let sanitized: String = candidate .chars() .map(|ch| if matches!(ch, ' '..='~') { ch } else { '_' }) .collect(); if !sanitized.is_empty() && HeaderValue::from_str(sanitized.as_str()).is_ok() { tracing::warn!( "Sanitized Codex user agent because provided suffix contained invalid header characters" ); sanitized } else if HeaderValue::from_str(fallback).is_ok() { tracing::warn!( "Falling back to base Codex user agent because provided suffix could not be sanitized" ); fallback.to_string() } else { tracing::warn!( "Falling back to default Codex originator because base user agent string is invalid" ); originator().value.clone() } } /// Create an HTTP client with default `originator` and `User-Agent` headers set. pub fn create_client() -> CodexHttpClient { let inner = build_reqwest_client(); CodexHttpClient::new(inner) } pub fn build_reqwest_client() -> reqwest::Client { use reqwest::header::HeaderMap; let mut headers = HeaderMap::new(); headers.insert("originator", originator().header_value.clone()); let ua = get_codex_user_agent(); let mut builder = reqwest::Client::builder() // Set UA via dedicated helper to avoid header validation pitfalls .user_agent(ua) .default_headers(headers); if is_sandboxed() { builder = builder.no_proxy(); } builder.build().unwrap_or_else(|_| reqwest::Client::new()) } fn is_sandboxed() -> bool { std::env::var(CODEX_SANDBOX_ENV_VAR).as_deref() == Ok("seatbelt") } #[cfg(test)] mod tests { use super::*; use core_test_support::skip_if_no_network; #[test] fn test_get_codex_user_agent() { let user_agent = get_codex_user_agent(); let originator = originator().value.as_str(); let prefix = format!("{originator}/"); assert!(user_agent.starts_with(&prefix)); } #[tokio::test] async fn test_create_client_sets_default_headers() { skip_if_no_network!(); use wiremock::Mock; use wiremock::MockServer; use wiremock::ResponseTemplate; use wiremock::matchers::method; use wiremock::matchers::path; let client = create_client(); // Spin up a local mock server and capture a request. let server = MockServer::start().await; Mock::given(method("GET")) .and(path("/")) .respond_with(ResponseTemplate::new(200)) .mount(&server) .await; let resp = client .get(server.uri()) .send() .await .expect("failed to send request"); assert!(resp.status().is_success()); let requests = server .received_requests() .await .expect("failed to fetch received requests"); assert!(!requests.is_empty()); let headers = &requests[0].headers; // originator header is set to the provided value let originator_header = headers .get("originator") .expect("originator header missing"); assert_eq!(originator_header.to_str().unwrap(), originator().value); // User-Agent matches the computed Codex UA for that originator let expected_ua = get_codex_user_agent(); let ua_header = headers .get("user-agent") .expect("user-agent header missing"); assert_eq!(ua_header.to_str().unwrap(), expected_ua); } #[test] fn test_invalid_suffix_is_sanitized() { let prefix = "codex_cli_rs/0.0.0"; let suffix = "bad\rsuffix"; assert_eq!( sanitize_user_agent(format!("{prefix} ({suffix})"), prefix), "codex_cli_rs/0.0.0 (bad_suffix)" ); } #[test] fn test_invalid_suffix_is_sanitized2() { let prefix = "codex_cli_rs/0.0.0"; let suffix = "bad\0suffix"; assert_eq!( sanitize_user_agent(format!("{prefix} ({suffix})"), prefix), "codex_cli_rs/0.0.0 (bad_suffix)" ); } #[test] #[cfg(target_os = "macos")] fn test_macos() { use regex_lite::Regex; let user_agent = get_codex_user_agent(); let originator = regex_lite::escape(originator().value.as_str()); let re = Regex::new(&format!( r"^{originator}/\d+\.\d+\.\d+ \(Mac OS \d+\.\d+\.\d+; (x86_64|arm64)\) (\S+)$" )) .unwrap(); assert!(re.is_match(&user_agent)); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/bash.rs
codex-rs/core/src/bash.rs
use std::path::PathBuf; use tree_sitter::Node; use tree_sitter::Parser; use tree_sitter::Tree; use tree_sitter_bash::LANGUAGE as BASH; use crate::shell::ShellType; use crate::shell::detect_shell_type; /// Parse the provided bash source using tree-sitter-bash, returning a Tree on /// success or None if parsing failed. pub fn try_parse_shell(shell_lc_arg: &str) -> Option<Tree> { let lang = BASH.into(); let mut parser = Parser::new(); #[expect(clippy::expect_used)] parser.set_language(&lang).expect("load bash grammar"); let old_tree: Option<&Tree> = None; parser.parse(shell_lc_arg, old_tree) } /// Parse a script which may contain multiple simple commands joined only by /// the safe logical/pipe/sequencing operators: `&&`, `||`, `;`, `|`. /// /// Returns `Some(Vec<command_words>)` if every command is a plain word‑only /// command and the parse tree does not contain disallowed constructs /// (parentheses, redirections, substitutions, control flow, etc.). Otherwise /// returns `None`. pub fn try_parse_word_only_commands_sequence(tree: &Tree, src: &str) -> Option<Vec<Vec<String>>> { if tree.root_node().has_error() { return None; } // List of allowed (named) node kinds for a "word only commands sequence". // If we encounter a named node that is not in this list we reject. const ALLOWED_KINDS: &[&str] = &[ // top level containers "program", "list", "pipeline", // commands & words "command", "command_name", "word", "string", "string_content", "raw_string", "number", "concatenation", ]; // Allow only safe punctuation / operator tokens; anything else causes reject. const ALLOWED_PUNCT_TOKENS: &[&str] = &["&&", "||", ";", "|", "\"", "'"]; let root = tree.root_node(); let mut cursor = root.walk(); let mut stack = vec![root]; let mut command_nodes = Vec::new(); while let Some(node) = stack.pop() { let kind = node.kind(); if node.is_named() { if !ALLOWED_KINDS.contains(&kind) { return None; } if kind == "command" { command_nodes.push(node); } } else { // Reject any punctuation / operator tokens that are not explicitly allowed. if kind.chars().any(|c| "&;|".contains(c)) && !ALLOWED_PUNCT_TOKENS.contains(&kind) { return None; } if !(ALLOWED_PUNCT_TOKENS.contains(&kind) || kind.trim().is_empty()) { // If it's a quote token or operator it's allowed above; we also allow whitespace tokens. // Any other punctuation like parentheses, braces, redirects, backticks, etc are rejected. return None; } } for child in node.children(&mut cursor) { stack.push(child); } } // Walk uses a stack (LIFO), so re-sort by position to restore source order. command_nodes.sort_by_key(Node::start_byte); let mut commands = Vec::new(); for node in command_nodes { if let Some(words) = parse_plain_command_from_node(node, src) { commands.push(words); } else { return None; } } Some(commands) } pub fn extract_bash_command(command: &[String]) -> Option<(&str, &str)> { let [shell, flag, script] = command else { return None; }; if !matches!(flag.as_str(), "-lc" | "-c") || !matches!( detect_shell_type(&PathBuf::from(shell)), Some(ShellType::Zsh) | Some(ShellType::Bash) | Some(ShellType::Sh) ) { return None; } Some((shell, script)) } /// Returns the sequence of plain commands within a `bash -lc "..."` or /// `zsh -lc "..."` invocation when the script only contains word-only commands /// joined by safe operators. pub fn parse_shell_lc_plain_commands(command: &[String]) -> Option<Vec<Vec<String>>> { let (_, script) = extract_bash_command(command)?; let tree = try_parse_shell(script)?; try_parse_word_only_commands_sequence(&tree, script) } fn parse_plain_command_from_node(cmd: tree_sitter::Node, src: &str) -> Option<Vec<String>> { if cmd.kind() != "command" { return None; } let mut words = Vec::new(); let mut cursor = cmd.walk(); for child in cmd.named_children(&mut cursor) { match child.kind() { "command_name" => { let word_node = child.named_child(0)?; if word_node.kind() != "word" { return None; } words.push(word_node.utf8_text(src.as_bytes()).ok()?.to_owned()); } "word" | "number" => { words.push(child.utf8_text(src.as_bytes()).ok()?.to_owned()); } "string" => { if child.child_count() == 3 && child.child(0)?.kind() == "\"" && child.child(1)?.kind() == "string_content" && child.child(2)?.kind() == "\"" { words.push(child.child(1)?.utf8_text(src.as_bytes()).ok()?.to_owned()); } else { return None; } } "raw_string" => { let raw_string = child.utf8_text(src.as_bytes()).ok()?; let stripped = raw_string .strip_prefix('\'') .and_then(|s| s.strip_suffix('\'')); if let Some(s) = stripped { words.push(s.to_owned()); } else { return None; } } "concatenation" => { // Handle concatenated arguments like -g"*.py" let mut concatenated = String::new(); let mut concat_cursor = child.walk(); for part in child.named_children(&mut concat_cursor) { match part.kind() { "word" | "number" => { concatenated .push_str(part.utf8_text(src.as_bytes()).ok()?.to_owned().as_str()); } "string" => { if part.child_count() == 3 && part.child(0)?.kind() == "\"" && part.child(1)?.kind() == "string_content" && part.child(2)?.kind() == "\"" { concatenated.push_str( part.child(1)? .utf8_text(src.as_bytes()) .ok()? .to_owned() .as_str(), ); } else { return None; } } "raw_string" => { let raw_string = part.utf8_text(src.as_bytes()).ok()?; let stripped = raw_string .strip_prefix('\'') .and_then(|s| s.strip_suffix('\''))?; concatenated.push_str(stripped); } _ => return None, } } if concatenated.is_empty() { return None; } words.push(concatenated); } _ => return None, } } Some(words) } #[cfg(test)] mod tests { use super::*; fn parse_seq(src: &str) -> Option<Vec<Vec<String>>> { let tree = try_parse_shell(src)?; try_parse_word_only_commands_sequence(&tree, src) } #[test] fn accepts_single_simple_command() { let cmds = parse_seq("ls -1").unwrap(); assert_eq!(cmds, vec![vec!["ls".to_string(), "-1".to_string()]]); } #[test] fn accepts_multiple_commands_with_allowed_operators() { let src = "ls && pwd; echo 'hi there' | wc -l"; let cmds = parse_seq(src).unwrap(); let expected: Vec<Vec<String>> = vec![ vec!["ls".to_string()], vec!["pwd".to_string()], vec!["echo".to_string(), "hi there".to_string()], vec!["wc".to_string(), "-l".to_string()], ]; assert_eq!(cmds, expected); } #[test] fn extracts_double_and_single_quoted_strings() { let cmds = parse_seq("echo \"hello world\"").unwrap(); assert_eq!( cmds, vec![vec!["echo".to_string(), "hello world".to_string()]] ); let cmds2 = parse_seq("echo 'hi there'").unwrap(); assert_eq!( cmds2, vec![vec!["echo".to_string(), "hi there".to_string()]] ); } #[test] fn accepts_numbers_as_words() { let cmds = parse_seq("echo 123 456").unwrap(); assert_eq!( cmds, vec![vec![ "echo".to_string(), "123".to_string(), "456".to_string() ]] ); } #[test] fn rejects_parentheses_and_subshells() { assert!(parse_seq("(ls)").is_none()); assert!(parse_seq("ls || (pwd && echo hi)").is_none()); } #[test] fn rejects_redirections_and_unsupported_operators() { assert!(parse_seq("ls > out.txt").is_none()); assert!(parse_seq("echo hi & echo bye").is_none()); } #[test] fn rejects_command_and_process_substitutions_and_expansions() { assert!(parse_seq("echo $(pwd)").is_none()); assert!(parse_seq("echo `pwd`").is_none()); assert!(parse_seq("echo $HOME").is_none()); assert!(parse_seq("echo \"hi $USER\"").is_none()); } #[test] fn rejects_variable_assignment_prefix() { assert!(parse_seq("FOO=bar ls").is_none()); } #[test] fn rejects_trailing_operator_parse_error() { assert!(parse_seq("ls &&").is_none()); } #[test] fn parse_zsh_lc_plain_commands() { let command = vec!["zsh".to_string(), "-lc".to_string(), "ls".to_string()]; let parsed = parse_shell_lc_plain_commands(&command).unwrap(); assert_eq!(parsed, vec![vec!["ls".to_string()]]); } #[test] fn accepts_concatenated_flag_and_value() { // Test case: -g"*.py" (flag directly concatenated with quoted value) let cmds = parse_seq("rg -n \"foo\" -g\"*.py\"").unwrap(); assert_eq!( cmds, vec![vec![ "rg".to_string(), "-n".to_string(), "foo".to_string(), "-g*.py".to_string(), ]] ); } #[test] fn accepts_concatenated_flag_with_single_quotes() { let cmds = parse_seq("grep -n 'pattern' -g'*.txt'").unwrap(); assert_eq!( cmds, vec![vec![ "grep".to_string(), "-n".to_string(), "pattern".to_string(), "-g*.txt".to_string(), ]] ); } #[test] fn rejects_concatenation_with_variable_substitution() { // Environment variables in concatenated strings should be rejected assert!(parse_seq("rg -g\"$VAR\" pattern").is_none()); assert!(parse_seq("rg -g\"${VAR}\" pattern").is_none()); } #[test] fn rejects_concatenation_with_command_substitution() { // Command substitution in concatenated strings should be rejected assert!(parse_seq("rg -g\"$(pwd)\" pattern").is_none()); assert!(parse_seq("rg -g\"$(echo '*.py')\" pattern").is_none()); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/client.rs
codex-rs/core/src/client.rs
use std::sync::Arc; use crate::api_bridge::auth_provider_from_auth; use crate::api_bridge::map_api_error; use codex_api::AggregateStreamExt; use codex_api::ChatClient as ApiChatClient; use codex_api::CompactClient as ApiCompactClient; use codex_api::CompactionInput as ApiCompactionInput; use codex_api::Prompt as ApiPrompt; use codex_api::RequestTelemetry; use codex_api::ReqwestTransport; use codex_api::ResponseStream as ApiResponseStream; use codex_api::ResponsesClient as ApiResponsesClient; use codex_api::ResponsesOptions as ApiResponsesOptions; use codex_api::SseTelemetry; use codex_api::TransportError; use codex_api::common::Reasoning; use codex_api::create_text_param_for_request; use codex_api::error::ApiError; use codex_app_server_protocol::AuthMode; use codex_otel::otel_manager::OtelManager; use codex_protocol::ConversationId; use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig; use codex_protocol::models::ResponseItem; use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig; use codex_protocol::protocol::SessionSource; use eventsource_stream::Event; use eventsource_stream::EventStreamError; use futures::StreamExt; use http::HeaderMap as ApiHeaderMap; use http::HeaderValue; use http::StatusCode as HttpStatusCode; use reqwest::StatusCode; use serde_json::Value; use std::time::Duration; use tokio::sync::mpsc; use tracing::warn; use crate::AuthManager; use crate::auth::RefreshTokenError; use crate::client_common::Prompt; use crate::client_common::ResponseEvent; use crate::client_common::ResponseStream; use crate::config::Config; use crate::default_client::build_reqwest_client; use crate::error::CodexErr; use crate::error::Result; use crate::features::FEATURES; use crate::flags::CODEX_RS_SSE_FIXTURE; use crate::model_provider_info::ModelProviderInfo; use crate::model_provider_info::WireApi; use crate::models_manager::model_family::ModelFamily; use crate::tools::spec::create_tools_json_for_chat_completions_api; use crate::tools::spec::create_tools_json_for_responses_api; #[derive(Debug, Clone)] pub struct ModelClient { config: Arc<Config>, auth_manager: Option<Arc<AuthManager>>, model_family: ModelFamily, otel_manager: OtelManager, provider: ModelProviderInfo, conversation_id: ConversationId, effort: Option<ReasoningEffortConfig>, summary: ReasoningSummaryConfig, session_source: SessionSource, } #[allow(clippy::too_many_arguments)] impl ModelClient { pub fn new( config: Arc<Config>, auth_manager: Option<Arc<AuthManager>>, model_family: ModelFamily, otel_manager: OtelManager, provider: ModelProviderInfo, effort: Option<ReasoningEffortConfig>, summary: ReasoningSummaryConfig, conversation_id: ConversationId, session_source: SessionSource, ) -> Self { Self { config, auth_manager, model_family, otel_manager, provider, conversation_id, effort, summary, session_source, } } pub fn get_model_context_window(&self) -> Option<i64> { let model_family = self.get_model_family(); let effective_context_window_percent = model_family.effective_context_window_percent; model_family .context_window .map(|w| w.saturating_mul(effective_context_window_percent) / 100) } pub fn config(&self) -> Arc<Config> { Arc::clone(&self.config) } pub fn provider(&self) -> &ModelProviderInfo { &self.provider } /// Streams a single model turn using either the Responses or Chat /// Completions wire API, depending on the configured provider. /// /// For Chat providers, the underlying stream is optionally aggregated /// based on the `show_raw_agent_reasoning` flag in the config. pub async fn stream(&self, prompt: &Prompt) -> Result<ResponseStream> { match self.provider.wire_api { WireApi::Responses => self.stream_responses_api(prompt).await, WireApi::Chat => { let api_stream = self.stream_chat_completions(prompt).await?; if self.config.show_raw_agent_reasoning { Ok(map_response_stream( api_stream.streaming_mode(), self.otel_manager.clone(), )) } else { Ok(map_response_stream( api_stream.aggregate(), self.otel_manager.clone(), )) } } } } /// Streams a turn via the OpenAI Chat Completions API. /// /// This path is only used when the provider is configured with /// `WireApi::Chat`; it does not support `output_schema` today. async fn stream_chat_completions(&self, prompt: &Prompt) -> Result<ApiResponseStream> { if prompt.output_schema.is_some() { return Err(CodexErr::UnsupportedOperation( "output_schema is not supported for Chat Completions API".to_string(), )); } let auth_manager = self.auth_manager.clone(); let model_family = self.get_model_family(); let instructions = prompt.get_full_instructions(&model_family).into_owned(); let tools_json = create_tools_json_for_chat_completions_api(&prompt.tools)?; let api_prompt = build_api_prompt(prompt, instructions, tools_json); let conversation_id = self.conversation_id.to_string(); let session_source = self.session_source.clone(); let mut refreshed = false; loop { let auth = auth_manager.as_ref().and_then(|m| m.auth()); let api_provider = self .provider .to_api_provider(auth.as_ref().map(|a| a.mode))?; let api_auth = auth_provider_from_auth(auth.clone(), &self.provider).await?; let transport = ReqwestTransport::new(build_reqwest_client()); let (request_telemetry, sse_telemetry) = self.build_streaming_telemetry(); let client = ApiChatClient::new(transport, api_provider, api_auth) .with_telemetry(Some(request_telemetry), Some(sse_telemetry)); let stream_result = client .stream_prompt( &self.get_model(), &api_prompt, Some(conversation_id.clone()), Some(session_source.clone()), ) .await; match stream_result { Ok(stream) => return Ok(stream), Err(ApiError::Transport(TransportError::Http { status, .. })) if status == StatusCode::UNAUTHORIZED => { handle_unauthorized(status, &mut refreshed, &auth_manager, &auth).await?; continue; } Err(err) => return Err(map_api_error(err)), } } } /// Streams a turn via the OpenAI Responses API. /// /// Handles SSE fixtures, reasoning summaries, verbosity, and the /// `text` controls used for output schemas. async fn stream_responses_api(&self, prompt: &Prompt) -> Result<ResponseStream> { if let Some(path) = &*CODEX_RS_SSE_FIXTURE { warn!(path, "Streaming from fixture"); let stream = codex_api::stream_from_fixture(path, self.provider.stream_idle_timeout()) .map_err(map_api_error)?; return Ok(map_response_stream(stream, self.otel_manager.clone())); } let auth_manager = self.auth_manager.clone(); let model_family = self.get_model_family(); let instructions = prompt.get_full_instructions(&model_family).into_owned(); let tools_json: Vec<Value> = create_tools_json_for_responses_api(&prompt.tools)?; let reasoning = if model_family.supports_reasoning_summaries { Some(Reasoning { effort: self.effort.or(model_family.default_reasoning_effort), summary: if self.summary == ReasoningSummaryConfig::None { None } else { Some(self.summary) }, }) } else { None }; let include: Vec<String> = if reasoning.is_some() { vec!["reasoning.encrypted_content".to_string()] } else { vec![] }; let verbosity = if model_family.support_verbosity { self.config .model_verbosity .or(model_family.default_verbosity) } else { if self.config.model_verbosity.is_some() { warn!( "model_verbosity is set but ignored as the model does not support verbosity: {}", model_family.family ); } None }; let text = create_text_param_for_request(verbosity, &prompt.output_schema); let api_prompt = build_api_prompt(prompt, instructions.clone(), tools_json); let conversation_id = self.conversation_id.to_string(); let session_source = self.session_source.clone(); let mut refreshed = false; loop { let auth = auth_manager.as_ref().and_then(|m| m.auth()); let api_provider = self .provider .to_api_provider(auth.as_ref().map(|a| a.mode))?; let api_auth = auth_provider_from_auth(auth.clone(), &self.provider).await?; let transport = ReqwestTransport::new(build_reqwest_client()); let (request_telemetry, sse_telemetry) = self.build_streaming_telemetry(); let client = ApiResponsesClient::new(transport, api_provider, api_auth) .with_telemetry(Some(request_telemetry), Some(sse_telemetry)); let options = ApiResponsesOptions { reasoning: reasoning.clone(), include: include.clone(), prompt_cache_key: Some(conversation_id.clone()), text: text.clone(), store_override: None, conversation_id: Some(conversation_id.clone()), session_source: Some(session_source.clone()), extra_headers: beta_feature_headers(&self.config), }; let stream_result = client .stream_prompt(&self.get_model(), &api_prompt, options) .await; match stream_result { Ok(stream) => { return Ok(map_response_stream(stream, self.otel_manager.clone())); } Err(ApiError::Transport(TransportError::Http { status, .. })) if status == StatusCode::UNAUTHORIZED => { handle_unauthorized(status, &mut refreshed, &auth_manager, &auth).await?; continue; } Err(err) => return Err(map_api_error(err)), } } } pub fn get_provider(&self) -> ModelProviderInfo { self.provider.clone() } pub fn get_otel_manager(&self) -> OtelManager { self.otel_manager.clone() } pub fn get_session_source(&self) -> SessionSource { self.session_source.clone() } /// Returns the currently configured model slug. pub fn get_model(&self) -> String { self.get_model_family().get_model_slug().to_string() } /// Returns the currently configured model family. pub fn get_model_family(&self) -> ModelFamily { self.model_family.clone() } /// Returns the current reasoning effort setting. pub fn get_reasoning_effort(&self) -> Option<ReasoningEffortConfig> { self.effort } /// Returns the current reasoning summary setting. pub fn get_reasoning_summary(&self) -> ReasoningSummaryConfig { self.summary } pub fn get_auth_manager(&self) -> Option<Arc<AuthManager>> { self.auth_manager.clone() } /// Compacts the current conversation history using the Compact endpoint. /// /// This is a unary call (no streaming) that returns a new list of /// `ResponseItem`s representing the compacted transcript. pub async fn compact_conversation_history(&self, prompt: &Prompt) -> Result<Vec<ResponseItem>> { if prompt.input.is_empty() { return Ok(Vec::new()); } let auth_manager = self.auth_manager.clone(); let auth = auth_manager.as_ref().and_then(|m| m.auth()); let api_provider = self .provider .to_api_provider(auth.as_ref().map(|a| a.mode))?; let api_auth = auth_provider_from_auth(auth.clone(), &self.provider).await?; let transport = ReqwestTransport::new(build_reqwest_client()); let request_telemetry = self.build_request_telemetry(); let client = ApiCompactClient::new(transport, api_provider, api_auth) .with_telemetry(Some(request_telemetry)); let instructions = prompt .get_full_instructions(&self.get_model_family()) .into_owned(); let payload = ApiCompactionInput { model: &self.get_model(), input: &prompt.input, instructions: &instructions, }; let mut extra_headers = ApiHeaderMap::new(); if let SessionSource::SubAgent(sub) = &self.session_source { let subagent = if let crate::protocol::SubAgentSource::Other(label) = sub { label.clone() } else { serde_json::to_value(sub) .ok() .and_then(|v| v.as_str().map(std::string::ToString::to_string)) .unwrap_or_else(|| "other".to_string()) }; if let Ok(val) = HeaderValue::from_str(&subagent) { extra_headers.insert("x-openai-subagent", val); } } client .compact_input(&payload, extra_headers) .await .map_err(map_api_error) } } impl ModelClient { /// Builds request and SSE telemetry for streaming API calls (Chat/Responses). fn build_streaming_telemetry(&self) -> (Arc<dyn RequestTelemetry>, Arc<dyn SseTelemetry>) { let telemetry = Arc::new(ApiTelemetry::new(self.otel_manager.clone())); let request_telemetry: Arc<dyn RequestTelemetry> = telemetry.clone(); let sse_telemetry: Arc<dyn SseTelemetry> = telemetry; (request_telemetry, sse_telemetry) } /// Builds request telemetry for unary API calls (e.g., Compact endpoint). fn build_request_telemetry(&self) -> Arc<dyn RequestTelemetry> { let telemetry = Arc::new(ApiTelemetry::new(self.otel_manager.clone())); let request_telemetry: Arc<dyn RequestTelemetry> = telemetry; request_telemetry } } /// Adapts the core `Prompt` type into the `codex-api` payload shape. fn build_api_prompt(prompt: &Prompt, instructions: String, tools_json: Vec<Value>) -> ApiPrompt { ApiPrompt { instructions, input: prompt.get_formatted_input(), tools: tools_json, parallel_tool_calls: prompt.parallel_tool_calls, output_schema: prompt.output_schema.clone(), } } fn beta_feature_headers(config: &Config) -> ApiHeaderMap { let enabled = FEATURES .iter() .filter_map(|spec| { if spec.stage.beta_menu_description().is_some() && config.features.enabled(spec.id) { Some(spec.key) } else { None } }) .collect::<Vec<_>>(); let value = enabled.join(","); let mut headers = ApiHeaderMap::new(); if !value.is_empty() && let Ok(header_value) = HeaderValue::from_str(value.as_str()) { headers.insert("x-codex-beta-features", header_value); } headers } fn map_response_stream<S>(api_stream: S, otel_manager: OtelManager) -> ResponseStream where S: futures::Stream<Item = std::result::Result<ResponseEvent, ApiError>> + Unpin + Send + 'static, { let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent>>(1600); tokio::spawn(async move { let mut logged_error = false; let mut api_stream = api_stream; while let Some(event) = api_stream.next().await { match event { Ok(ResponseEvent::Completed { response_id, token_usage, }) => { if let Some(usage) = &token_usage { otel_manager.sse_event_completed( usage.input_tokens, usage.output_tokens, Some(usage.cached_input_tokens), Some(usage.reasoning_output_tokens), usage.total_tokens, ); } if tx_event .send(Ok(ResponseEvent::Completed { response_id, token_usage, })) .await .is_err() { return; } } Ok(event) => { if tx_event.send(Ok(event)).await.is_err() { return; } } Err(err) => { let mapped = map_api_error(err); if !logged_error { otel_manager.see_event_completed_failed(&mapped); logged_error = true; } if tx_event.send(Err(mapped)).await.is_err() { return; } } } } }); ResponseStream { rx_event } } /// Handles a 401 response by optionally refreshing ChatGPT tokens once. /// /// When refresh succeeds, the caller should retry the API call; otherwise /// the mapped `CodexErr` is returned to the caller. async fn handle_unauthorized( status: StatusCode, refreshed: &mut bool, auth_manager: &Option<Arc<AuthManager>>, auth: &Option<crate::auth::CodexAuth>, ) -> Result<()> { if *refreshed { return Err(map_unauthorized_status(status)); } if let Some(manager) = auth_manager.as_ref() && let Some(auth) = auth.as_ref() && auth.mode == AuthMode::ChatGPT { match manager.refresh_token().await { Ok(_) => { *refreshed = true; Ok(()) } Err(RefreshTokenError::Permanent(failed)) => Err(CodexErr::RefreshTokenFailed(failed)), Err(RefreshTokenError::Transient(other)) => Err(CodexErr::Io(other)), } } else { Err(map_unauthorized_status(status)) } } fn map_unauthorized_status(status: StatusCode) -> CodexErr { map_api_error(ApiError::Transport(TransportError::Http { status, headers: None, body: None, })) } struct ApiTelemetry { otel_manager: OtelManager, } impl ApiTelemetry { fn new(otel_manager: OtelManager) -> Self { Self { otel_manager } } } impl RequestTelemetry for ApiTelemetry { fn on_request( &self, attempt: u64, status: Option<HttpStatusCode>, error: Option<&TransportError>, duration: Duration, ) { let error_message = error.map(std::string::ToString::to_string); self.otel_manager.record_api_request( attempt, status.map(|s| s.as_u16()), error_message.as_deref(), duration, ); } } impl SseTelemetry for ApiTelemetry { fn on_sse_poll( &self, result: &std::result::Result< Option<std::result::Result<Event, EventStreamError<TransportError>>>, tokio::time::error::Elapsed, >, duration: Duration, ) { self.otel_manager.log_sse_event(result, duration); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/flags.rs
codex-rs/core/src/flags.rs
use env_flags::env_flags; env_flags! { /// Fixture path for offline tests (see client.rs). pub CODEX_RS_SSE_FIXTURE: Option<&str> = None; }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/function_tool.rs
codex-rs/core/src/function_tool.rs
use thiserror::Error; #[derive(Debug, Error, PartialEq)] pub enum FunctionCallError { #[error("{0}")] RespondToModel(String), #[error("{0}")] #[allow(dead_code)] // TODO(jif) fix in a follow-up PR Denied(String), #[error("LocalShellCall without call_id or id")] MissingLocalShellCallId, #[error("Fatal error: {0}")] Fatal(String), }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/error.rs
codex-rs/core/src/error.rs
use crate::exec::ExecToolCallOutput; use crate::token_data::KnownPlan; use crate::token_data::PlanType; use crate::truncate::TruncationPolicy; use crate::truncate::truncate_text; use chrono::DateTime; use chrono::Datelike; use chrono::Local; use chrono::Utc; use codex_async_utils::CancelErr; use codex_protocol::ConversationId; use codex_protocol::protocol::CodexErrorInfo; use codex_protocol::protocol::ErrorEvent; use codex_protocol::protocol::RateLimitSnapshot; use reqwest::StatusCode; use serde_json; use std::io; use std::time::Duration; use thiserror::Error; use tokio::task::JoinError; pub type Result<T> = std::result::Result<T, CodexErr>; /// Limit UI error messages to a reasonable size while keeping useful context. const ERROR_MESSAGE_UI_MAX_BYTES: usize = 2 * 1024; // 4 KiB #[derive(Error, Debug)] pub enum SandboxErr { /// Error from sandbox execution #[error( "sandbox denied exec error, exit code: {}, stdout: {}, stderr: {}", .output.exit_code, .output.stdout.text, .output.stderr.text )] Denied { output: Box<ExecToolCallOutput> }, /// Error from linux seccomp filter setup #[cfg(target_os = "linux")] #[error("seccomp setup error")] SeccompInstall(#[from] seccompiler::Error), /// Error from linux seccomp backend #[cfg(target_os = "linux")] #[error("seccomp backend error")] SeccompBackend(#[from] seccompiler::BackendError), /// Command timed out #[error("command timed out")] Timeout { output: Box<ExecToolCallOutput> }, /// Command was killed by a signal #[error("command was killed by a signal")] Signal(i32), /// Error from linux landlock #[error("Landlock was not able to fully enforce all sandbox rules")] LandlockRestrict, } #[derive(Error, Debug)] pub enum CodexErr { #[error("turn aborted. Something went wrong? Hit `/feedback` to report the issue.")] TurnAborted, /// Returned by ResponsesClient when the SSE stream disconnects or errors out **after** the HTTP /// handshake has succeeded but **before** it finished emitting `response.completed`. /// /// The Session loop treats this as a transient error and will automatically retry the turn. /// /// Optionally includes the requested delay before retrying the turn. #[error("stream disconnected before completion: {0}")] Stream(String, Option<Duration>), #[error( "Codex ran out of room in the model's context window. Start a new conversation or clear earlier history before retrying." )] ContextWindowExceeded, #[error("no conversation with id: {0}")] ConversationNotFound(ConversationId), #[error("session configured event was not the first event in the stream")] SessionConfiguredNotFirstEvent, /// Returned by run_command_stream when the spawned child process timed out (10s). #[error("timeout waiting for child process to exit")] Timeout, /// Returned by run_command_stream when the child could not be spawned (its stdout/stderr pipes /// could not be captured). Analogous to the previous `CodexError::Spawn` variant. #[error("spawn failed: child stdout/stderr not captured")] Spawn, /// Returned by run_command_stream when the user pressed Ctrl‑C (SIGINT). Session uses this to /// surface a polite FunctionCallOutput back to the model instead of crashing the CLI. #[error("interrupted (Ctrl-C). Something went wrong? Hit `/feedback` to report the issue.")] Interrupted, /// Unexpected HTTP status code. #[error("{0}")] UnexpectedStatus(UnexpectedResponseError), /// Invalid request. #[error("{0}")] InvalidRequest(String), /// Invalid image. #[error("Image poisoning")] InvalidImageRequest(), #[error("{0}")] UsageLimitReached(UsageLimitReachedError), #[error("{0}")] ResponseStreamFailed(ResponseStreamFailed), #[error("{0}")] ConnectionFailed(ConnectionFailedError), #[error("Quota exceeded. Check your plan and billing details.")] QuotaExceeded, #[error( "To use Codex with your ChatGPT plan, upgrade to Plus: https://openai.com/chatgpt/pricing." )] UsageNotIncluded, #[error("We're currently experiencing high demand, which may cause temporary errors.")] InternalServerError, /// Retry limit exceeded. #[error("{0}")] RetryLimit(RetryLimitReachedError), /// Agent loop died unexpectedly #[error("internal error; agent loop died unexpectedly")] InternalAgentDied, /// Sandbox error #[error("sandbox error: {0}")] Sandbox(#[from] SandboxErr), #[error("codex-linux-sandbox was required but not provided")] LandlockSandboxExecutableNotProvided, #[error("unsupported operation: {0}")] UnsupportedOperation(String), #[error("{0}")] RefreshTokenFailed(RefreshTokenFailedError), #[error("Fatal error: {0}")] Fatal(String), // ----------------------------------------------------------------- // Automatic conversions for common external error types // ----------------------------------------------------------------- #[error(transparent)] Io(#[from] io::Error), #[error(transparent)] Json(#[from] serde_json::Error), #[cfg(target_os = "linux")] #[error(transparent)] LandlockRuleset(#[from] landlock::RulesetError), #[cfg(target_os = "linux")] #[error(transparent)] LandlockPathFd(#[from] landlock::PathFdError), #[error(transparent)] TokioJoin(#[from] JoinError), #[error("{0}")] EnvVar(EnvVarError), } impl From<CancelErr> for CodexErr { fn from(_: CancelErr) -> Self { CodexErr::TurnAborted } } #[derive(Debug)] pub struct ConnectionFailedError { pub source: reqwest::Error, } impl std::fmt::Display for ConnectionFailedError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Connection failed: {}", self.source) } } #[derive(Debug)] pub struct ResponseStreamFailed { pub source: reqwest::Error, pub request_id: Option<String>, } impl std::fmt::Display for ResponseStreamFailed { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "Error while reading the server response: {}{}", self.source, self.request_id .as_ref() .map(|id| format!(", request id: {id}")) .unwrap_or_default() ) } } #[derive(Debug, Clone, PartialEq, Eq, Error)] #[error("{message}")] pub struct RefreshTokenFailedError { pub reason: RefreshTokenFailedReason, pub message: String, } impl RefreshTokenFailedError { pub fn new(reason: RefreshTokenFailedReason, message: impl Into<String>) -> Self { Self { reason, message: message.into(), } } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum RefreshTokenFailedReason { Expired, Exhausted, Revoked, Other, } #[derive(Debug)] pub struct UnexpectedResponseError { pub status: StatusCode, pub body: String, pub request_id: Option<String>, } const CLOUDFLARE_BLOCKED_MESSAGE: &str = "Access blocked by Cloudflare. This usually happens when connecting from a restricted region"; impl UnexpectedResponseError { fn friendly_message(&self) -> Option<String> { if self.status != StatusCode::FORBIDDEN { return None; } if !self.body.contains("Cloudflare") || !self.body.contains("blocked") { return None; } let mut message = format!("{CLOUDFLARE_BLOCKED_MESSAGE} (status {})", self.status); if let Some(id) = &self.request_id { message.push_str(&format!(", request id: {id}")); } Some(message) } } impl std::fmt::Display for UnexpectedResponseError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if let Some(friendly) = self.friendly_message() { write!(f, "{friendly}") } else { write!( f, "unexpected status {}: {}{}", self.status, self.body, self.request_id .as_ref() .map(|id| format!(", request id: {id}")) .unwrap_or_default() ) } } } impl std::error::Error for UnexpectedResponseError {} #[derive(Debug)] pub struct RetryLimitReachedError { pub status: StatusCode, pub request_id: Option<String>, } impl std::fmt::Display for RetryLimitReachedError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "exceeded retry limit, last status: {}{}", self.status, self.request_id .as_ref() .map(|id| format!(", request id: {id}")) .unwrap_or_default() ) } } #[derive(Debug)] pub struct UsageLimitReachedError { pub(crate) plan_type: Option<PlanType>, pub(crate) resets_at: Option<DateTime<Utc>>, pub(crate) rate_limits: Option<RateLimitSnapshot>, } impl std::fmt::Display for UsageLimitReachedError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let message = match self.plan_type.as_ref() { Some(PlanType::Known(KnownPlan::Plus)) => format!( "You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit https://chatgpt.com/codex/settings/usage to purchase more credits{}", retry_suffix_after_or(self.resets_at.as_ref()) ), Some(PlanType::Known(KnownPlan::Team)) | Some(PlanType::Known(KnownPlan::Business)) => { format!( "You've hit your usage limit. To get more access now, send a request to your admin{}", retry_suffix_after_or(self.resets_at.as_ref()) ) } Some(PlanType::Known(KnownPlan::Free)) => { "You've hit your usage limit. Upgrade to Plus to continue using Codex (https://openai.com/chatgpt/pricing)." .to_string() } Some(PlanType::Known(KnownPlan::Pro)) => format!( "You've hit your usage limit. Visit https://chatgpt.com/codex/settings/usage to purchase more credits{}", retry_suffix_after_or(self.resets_at.as_ref()) ), Some(PlanType::Known(KnownPlan::Enterprise)) | Some(PlanType::Known(KnownPlan::Edu)) => format!( "You've hit your usage limit.{}", retry_suffix(self.resets_at.as_ref()) ), Some(PlanType::Unknown(_)) | None => format!( "You've hit your usage limit.{}", retry_suffix(self.resets_at.as_ref()) ), }; write!(f, "{message}") } } fn retry_suffix(resets_at: Option<&DateTime<Utc>>) -> String { if let Some(resets_at) = resets_at { let formatted = format_retry_timestamp(resets_at); format!(" Try again at {formatted}.") } else { " Try again later.".to_string() } } fn retry_suffix_after_or(resets_at: Option<&DateTime<Utc>>) -> String { if let Some(resets_at) = resets_at { let formatted = format_retry_timestamp(resets_at); format!(" or try again at {formatted}.") } else { " or try again later.".to_string() } } fn format_retry_timestamp(resets_at: &DateTime<Utc>) -> String { let local_reset = resets_at.with_timezone(&Local); let local_now = now_for_retry().with_timezone(&Local); if local_reset.date_naive() == local_now.date_naive() { local_reset.format("%-I:%M %p").to_string() } else { let suffix = day_suffix(local_reset.day()); local_reset .format(&format!("%b %-d{suffix}, %Y %-I:%M %p")) .to_string() } } fn day_suffix(day: u32) -> &'static str { match day { 11..=13 => "th", _ => match day % 10 { 1 => "st", 2 => "nd", // codespell:ignore 3 => "rd", _ => "th", }, } } #[cfg(test)] thread_local! { static NOW_OVERRIDE: std::cell::RefCell<Option<DateTime<Utc>>> = const { std::cell::RefCell::new(None) }; } fn now_for_retry() -> DateTime<Utc> { #[cfg(test)] { if let Some(now) = NOW_OVERRIDE.with(|cell| *cell.borrow()) { return now; } } Utc::now() } #[derive(Debug)] pub struct EnvVarError { /// Name of the environment variable that is missing. pub var: String, /// Optional instructions to help the user get a valid value for the /// variable and set it. pub instructions: Option<String>, } impl std::fmt::Display for EnvVarError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Missing environment variable: `{}`.", self.var)?; if let Some(instructions) = &self.instructions { write!(f, " {instructions}")?; } Ok(()) } } impl CodexErr { /// Minimal shim so that existing `e.downcast_ref::<CodexErr>()` checks continue to compile /// after replacing `anyhow::Error` in the return signature. This mirrors the behavior of /// `anyhow::Error::downcast_ref` but works directly on our concrete enum. pub fn downcast_ref<T: std::any::Any>(&self) -> Option<&T> { (self as &dyn std::any::Any).downcast_ref::<T>() } /// Translate core error to client-facing protocol error. pub fn to_codex_protocol_error(&self) -> CodexErrorInfo { match self { CodexErr::ContextWindowExceeded => CodexErrorInfo::ContextWindowExceeded, CodexErr::UsageLimitReached(_) | CodexErr::QuotaExceeded | CodexErr::UsageNotIncluded => CodexErrorInfo::UsageLimitExceeded, CodexErr::RetryLimit(_) => CodexErrorInfo::ResponseTooManyFailedAttempts { http_status_code: self.http_status_code_value(), }, CodexErr::ConnectionFailed(_) => CodexErrorInfo::HttpConnectionFailed { http_status_code: self.http_status_code_value(), }, CodexErr::ResponseStreamFailed(_) => CodexErrorInfo::ResponseStreamConnectionFailed { http_status_code: self.http_status_code_value(), }, CodexErr::RefreshTokenFailed(_) => CodexErrorInfo::Unauthorized, CodexErr::SessionConfiguredNotFirstEvent | CodexErr::InternalServerError | CodexErr::InternalAgentDied => CodexErrorInfo::InternalServerError, CodexErr::UnsupportedOperation(_) | CodexErr::ConversationNotFound(_) => { CodexErrorInfo::BadRequest } CodexErr::Sandbox(_) => CodexErrorInfo::SandboxError, _ => CodexErrorInfo::Other, } } pub fn to_error_event(&self, message_prefix: Option<String>) -> ErrorEvent { let error_message = self.to_string(); let message: String = match message_prefix { Some(prefix) => format!("{prefix}: {error_message}"), None => error_message, }; ErrorEvent { message, codex_error_info: Some(self.to_codex_protocol_error()), } } pub fn http_status_code_value(&self) -> Option<u16> { let http_status_code = match self { CodexErr::RetryLimit(err) => Some(err.status), CodexErr::UnexpectedStatus(err) => Some(err.status), CodexErr::ConnectionFailed(err) => err.source.status(), CodexErr::ResponseStreamFailed(err) => err.source.status(), _ => None, }; http_status_code.as_ref().map(StatusCode::as_u16) } } pub fn get_error_message_ui(e: &CodexErr) -> String { let message = match e { CodexErr::Sandbox(SandboxErr::Denied { output }) => { let aggregated = output.aggregated_output.text.trim(); if !aggregated.is_empty() { output.aggregated_output.text.clone() } else { let stderr = output.stderr.text.trim(); let stdout = output.stdout.text.trim(); match (stderr.is_empty(), stdout.is_empty()) { (false, false) => format!("{stderr}\n{stdout}"), (false, true) => output.stderr.text.clone(), (true, false) => output.stdout.text.clone(), (true, true) => format!( "command failed inside sandbox with exit code {}", output.exit_code ), } } } // Timeouts are not sandbox errors from a UX perspective; present them plainly CodexErr::Sandbox(SandboxErr::Timeout { output }) => { format!( "error: command timed out after {} ms", output.duration.as_millis() ) } _ => e.to_string(), }; truncate_text( &message, TruncationPolicy::Bytes(ERROR_MESSAGE_UI_MAX_BYTES), ) } #[cfg(test)] mod tests { use super::*; use crate::exec::StreamOutput; use chrono::DateTime; use chrono::Duration as ChronoDuration; use chrono::TimeZone; use chrono::Utc; use codex_protocol::protocol::RateLimitWindow; use pretty_assertions::assert_eq; use reqwest::Response; use reqwest::ResponseBuilderExt; use reqwest::StatusCode; use reqwest::Url; fn rate_limit_snapshot() -> RateLimitSnapshot { let primary_reset_at = Utc .with_ymd_and_hms(2024, 1, 1, 1, 0, 0) .unwrap() .timestamp(); let secondary_reset_at = Utc .with_ymd_and_hms(2024, 1, 1, 2, 0, 0) .unwrap() .timestamp(); RateLimitSnapshot { primary: Some(RateLimitWindow { used_percent: 50.0, window_minutes: Some(60), resets_at: Some(primary_reset_at), }), secondary: Some(RateLimitWindow { used_percent: 30.0, window_minutes: Some(120), resets_at: Some(secondary_reset_at), }), credits: None, plan_type: None, } } fn with_now_override<T>(now: DateTime<Utc>, f: impl FnOnce() -> T) -> T { NOW_OVERRIDE.with(|cell| { *cell.borrow_mut() = Some(now); let result = f(); *cell.borrow_mut() = None; result }) } #[test] fn usage_limit_reached_error_formats_plus_plan() { let err = UsageLimitReachedError { plan_type: Some(PlanType::Known(KnownPlan::Plus)), resets_at: None, rate_limits: Some(rate_limit_snapshot()), }; assert_eq!( err.to_string(), "You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit https://chatgpt.com/codex/settings/usage to purchase more credits or try again later." ); } #[test] fn sandbox_denied_uses_aggregated_output_when_stderr_empty() { let output = ExecToolCallOutput { exit_code: 77, stdout: StreamOutput::new(String::new()), stderr: StreamOutput::new(String::new()), aggregated_output: StreamOutput::new("aggregate detail".to_string()), duration: Duration::from_millis(10), timed_out: false, }; let err = CodexErr::Sandbox(SandboxErr::Denied { output: Box::new(output), }); assert_eq!(get_error_message_ui(&err), "aggregate detail"); } #[test] fn sandbox_denied_reports_both_streams_when_available() { let output = ExecToolCallOutput { exit_code: 9, stdout: StreamOutput::new("stdout detail".to_string()), stderr: StreamOutput::new("stderr detail".to_string()), aggregated_output: StreamOutput::new(String::new()), duration: Duration::from_millis(10), timed_out: false, }; let err = CodexErr::Sandbox(SandboxErr::Denied { output: Box::new(output), }); assert_eq!(get_error_message_ui(&err), "stderr detail\nstdout detail"); } #[test] fn sandbox_denied_reports_stdout_when_no_stderr() { let output = ExecToolCallOutput { exit_code: 11, stdout: StreamOutput::new("stdout only".to_string()), stderr: StreamOutput::new(String::new()), aggregated_output: StreamOutput::new(String::new()), duration: Duration::from_millis(8), timed_out: false, }; let err = CodexErr::Sandbox(SandboxErr::Denied { output: Box::new(output), }); assert_eq!(get_error_message_ui(&err), "stdout only"); } #[test] fn to_error_event_handles_response_stream_failed() { let response = http::Response::builder() .status(StatusCode::TOO_MANY_REQUESTS) .url(Url::parse("http://example.com").unwrap()) .body("") .unwrap(); let source = Response::from(response).error_for_status_ref().unwrap_err(); let err = CodexErr::ResponseStreamFailed(ResponseStreamFailed { source, request_id: Some("req-123".to_string()), }); let event = err.to_error_event(Some("prefix".to_string())); assert_eq!( event.message, "prefix: Error while reading the server response: HTTP status client error (429 Too Many Requests) for url (http://example.com/), request id: req-123" ); assert_eq!( event.codex_error_info, Some(CodexErrorInfo::ResponseStreamConnectionFailed { http_status_code: Some(429) }) ); } #[test] fn sandbox_denied_reports_exit_code_when_no_output_available() { let output = ExecToolCallOutput { exit_code: 13, stdout: StreamOutput::new(String::new()), stderr: StreamOutput::new(String::new()), aggregated_output: StreamOutput::new(String::new()), duration: Duration::from_millis(5), timed_out: false, }; let err = CodexErr::Sandbox(SandboxErr::Denied { output: Box::new(output), }); assert_eq!( get_error_message_ui(&err), "command failed inside sandbox with exit code 13" ); } #[test] fn usage_limit_reached_error_formats_free_plan() { let err = UsageLimitReachedError { plan_type: Some(PlanType::Known(KnownPlan::Free)), resets_at: None, rate_limits: Some(rate_limit_snapshot()), }; assert_eq!( err.to_string(), "You've hit your usage limit. Upgrade to Plus to continue using Codex (https://openai.com/chatgpt/pricing)." ); } #[test] fn usage_limit_reached_error_formats_default_when_none() { let err = UsageLimitReachedError { plan_type: None, resets_at: None, rate_limits: Some(rate_limit_snapshot()), }; assert_eq!( err.to_string(), "You've hit your usage limit. Try again later." ); } #[test] fn usage_limit_reached_error_formats_team_plan() { let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap(); let resets_at = base + ChronoDuration::hours(1); with_now_override(base, move || { let expected_time = format_retry_timestamp(&resets_at); let err = UsageLimitReachedError { plan_type: Some(PlanType::Known(KnownPlan::Team)), resets_at: Some(resets_at), rate_limits: Some(rate_limit_snapshot()), }; let expected = format!( "You've hit your usage limit. To get more access now, send a request to your admin or try again at {expected_time}." ); assert_eq!(err.to_string(), expected); }); } #[test] fn usage_limit_reached_error_formats_business_plan_without_reset() { let err = UsageLimitReachedError { plan_type: Some(PlanType::Known(KnownPlan::Business)), resets_at: None, rate_limits: Some(rate_limit_snapshot()), }; assert_eq!( err.to_string(), "You've hit your usage limit. To get more access now, send a request to your admin or try again later." ); } #[test] fn usage_limit_reached_error_formats_default_for_other_plans() { let err = UsageLimitReachedError { plan_type: Some(PlanType::Known(KnownPlan::Enterprise)), resets_at: None, rate_limits: Some(rate_limit_snapshot()), }; assert_eq!( err.to_string(), "You've hit your usage limit. Try again later." ); } #[test] fn usage_limit_reached_error_formats_pro_plan_with_reset() { let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap(); let resets_at = base + ChronoDuration::hours(1); with_now_override(base, move || { let expected_time = format_retry_timestamp(&resets_at); let err = UsageLimitReachedError { plan_type: Some(PlanType::Known(KnownPlan::Pro)), resets_at: Some(resets_at), rate_limits: Some(rate_limit_snapshot()), }; let expected = format!( "You've hit your usage limit. Visit https://chatgpt.com/codex/settings/usage to purchase more credits or try again at {expected_time}." ); assert_eq!(err.to_string(), expected); }); } #[test] fn usage_limit_reached_includes_minutes_when_available() { let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap(); let resets_at = base + ChronoDuration::minutes(5); with_now_override(base, move || { let expected_time = format_retry_timestamp(&resets_at); let err = UsageLimitReachedError { plan_type: None, resets_at: Some(resets_at), rate_limits: Some(rate_limit_snapshot()), }; let expected = format!("You've hit your usage limit. Try again at {expected_time}."); assert_eq!(err.to_string(), expected); }); } #[test] fn unexpected_status_cloudflare_html_is_simplified() { let err = UnexpectedResponseError { status: StatusCode::FORBIDDEN, body: "<html><body>Cloudflare error: Sorry, you have been blocked</body></html>" .to_string(), request_id: Some("ray-id".to_string()), }; let status = StatusCode::FORBIDDEN.to_string(); assert_eq!( err.to_string(), format!("{CLOUDFLARE_BLOCKED_MESSAGE} (status {status}), request id: ray-id") ); } #[test] fn unexpected_status_non_html_is_unchanged() { let err = UnexpectedResponseError { status: StatusCode::FORBIDDEN, body: "plain text error".to_string(), request_id: None, }; let status = StatusCode::FORBIDDEN.to_string(); assert_eq!( err.to_string(), format!("unexpected status {status}: plain text error") ); } #[test] fn usage_limit_reached_includes_hours_and_minutes() { let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap(); let resets_at = base + ChronoDuration::hours(3) + ChronoDuration::minutes(32); with_now_override(base, move || { let expected_time = format_retry_timestamp(&resets_at); let err = UsageLimitReachedError { plan_type: Some(PlanType::Known(KnownPlan::Plus)), resets_at: Some(resets_at), rate_limits: Some(rate_limit_snapshot()), }; let expected = format!( "You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing), visit https://chatgpt.com/codex/settings/usage to purchase more credits or try again at {expected_time}." ); assert_eq!(err.to_string(), expected); }); } #[test] fn usage_limit_reached_includes_days_hours_minutes() { let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap(); let resets_at = base + ChronoDuration::days(2) + ChronoDuration::hours(3) + ChronoDuration::minutes(5); with_now_override(base, move || { let expected_time = format_retry_timestamp(&resets_at); let err = UsageLimitReachedError { plan_type: None, resets_at: Some(resets_at), rate_limits: Some(rate_limit_snapshot()), }; let expected = format!("You've hit your usage limit. Try again at {expected_time}."); assert_eq!(err.to_string(), expected); }); } #[test] fn usage_limit_reached_less_than_minute() { let base = Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap(); let resets_at = base + ChronoDuration::seconds(30); with_now_override(base, move || { let expected_time = format_retry_timestamp(&resets_at); let err = UsageLimitReachedError { plan_type: None, resets_at: Some(resets_at), rate_limits: Some(rate_limit_snapshot()), }; let expected = format!("You've hit your usage limit. Try again at {expected_time}."); assert_eq!(err.to_string(), expected); }); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/text_encoding.rs
codex-rs/core/src/text_encoding.rs
//! Text encoding detection and conversion utilities for shell output. //! //! Windows users frequently run into code pages such as CP1251 or CP866 when invoking commands //! through VS Code. Those bytes show up as invalid UTF-8 and used to be replaced with the standard //! Unicode replacement character. We now lean on `chardetng` and `encoding_rs` so we can //! automatically detect and decode the vast majority of legacy encodings before falling back to //! lossy UTF-8 decoding. use chardetng::EncodingDetector; use encoding_rs::Encoding; use encoding_rs::IBM866; use encoding_rs::WINDOWS_1252; /// Attempts to convert arbitrary bytes to UTF-8 with best-effort encoding detection. pub fn bytes_to_string_smart(bytes: &[u8]) -> String { if bytes.is_empty() { return String::new(); } if let Ok(utf8_str) = std::str::from_utf8(bytes) { return utf8_str.to_owned(); } let encoding = detect_encoding(bytes); decode_bytes(bytes, encoding) } // Windows-1252 reassigns a handful of 0x80-0x9F slots to smart punctuation (curly quotes, dashes, // ™). CP866 uses those *same byte values* for uppercase Cyrillic letters. When chardetng sees shell // snippets that mix these bytes with ASCII it sometimes guesses IBM866, so “smart quotes” render as // Cyrillic garbage (“УФЦ”) in VS Code. However, CP866 uppercase tokens are perfectly valid output // (e.g., `ПРИ test`) so we cannot flip every 0x80-0x9F byte to Windows-1252 either. The compromise // is to only coerce IBM866 to Windows-1252 when (a) the high bytes are exclusively the punctuation // values listed below and (b) we spot adjacent ASCII. This targets the real failure case without // clobbering legitimate Cyrillic text. If another code page has a similar collision, introduce a // dedicated allowlist (like this one) plus unit tests that capture the actual shell output we want // to preserve. Windows-1252 byte values for smart punctuation. const WINDOWS_1252_PUNCT_BYTES: [u8; 8] = [ 0x91, // ‘ (left single quotation mark) 0x92, // ’ (right single quotation mark) 0x93, // “ (left double quotation mark) 0x94, // ” (right double quotation mark) 0x95, // • (bullet) 0x96, // – (en dash) 0x97, // — (em dash) 0x99, // ™ (trade mark sign) ]; fn detect_encoding(bytes: &[u8]) -> &'static Encoding { let mut detector = EncodingDetector::new(); detector.feed(bytes, true); let (encoding, _is_confident) = detector.guess_assess(None, true); // chardetng occasionally reports IBM866 for short strings that only contain Windows-1252 “smart // punctuation” bytes (0x80-0x9F) because that range maps to Cyrillic letters in IBM866. When // those bytes show up alongside an ASCII word (typical shell output: `"“`test), we know the // intent was likely CP1252 quotes/dashes. Prefer WINDOWS_1252 in that specific situation so we // render the characters users expect instead of Cyrillic junk. References: // - Windows-1252 reserving 0x80-0x9F for curly quotes/dashes: // https://en.wikipedia.org/wiki/Windows-1252 // - CP866 mapping 0x93/0x94/0x96 to Cyrillic letters, so the same bytes show up as “УФЦ” when // mis-decoded: https://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/PC/CP866.TXT if encoding == IBM866 && looks_like_windows_1252_punctuation(bytes) { return WINDOWS_1252; } encoding } fn decode_bytes(bytes: &[u8], encoding: &'static Encoding) -> String { let (decoded, _, had_errors) = encoding.decode(bytes); if had_errors { return String::from_utf8_lossy(bytes).into_owned(); } decoded.into_owned() } /// Detect whether the byte stream looks like Windows-1252 “smart punctuation” wrapped around /// otherwise-ASCII text. /// /// Context: IBM866 and Windows-1252 share the 0x80-0x9F slot range. In IBM866 these bytes decode to /// Cyrillic letters, whereas Windows-1252 maps them to curly quotes and dashes. chardetng can guess /// IBM866 for short snippets that only contain those bytes, which turns shell output such as /// `“test”` into unreadable Cyrillic. To avoid that, we treat inputs comprising a handful of bytes /// from the problematic range plus ASCII letters as CP1252 punctuation. We deliberately do *not* /// cap how many of those punctuation bytes we accept: VS Code frequently prints several quoted /// phrases (e.g., `"foo" – "bar"`), and truncating the count would once again mis-decode those as /// Cyrillic. If we discover additional encodings with overlapping byte ranges, prefer adding /// encoding-specific byte allowlists like `WINDOWS_1252_PUNCT` and tests that exercise real-world /// shell snippets. fn looks_like_windows_1252_punctuation(bytes: &[u8]) -> bool { let mut saw_extended_punctuation = false; let mut saw_ascii_word = false; for &byte in bytes { if byte >= 0xA0 { return false; } if (0x80..=0x9F).contains(&byte) { if !is_windows_1252_punct(byte) { return false; } saw_extended_punctuation = true; } if byte.is_ascii_alphabetic() { saw_ascii_word = true; } } saw_extended_punctuation && saw_ascii_word } fn is_windows_1252_punct(byte: u8) -> bool { WINDOWS_1252_PUNCT_BYTES.contains(&byte) } #[cfg(test)] mod tests { use super::*; use encoding_rs::BIG5; use encoding_rs::EUC_KR; use encoding_rs::GBK; use encoding_rs::ISO_8859_2; use encoding_rs::ISO_8859_3; use encoding_rs::ISO_8859_4; use encoding_rs::ISO_8859_5; use encoding_rs::ISO_8859_6; use encoding_rs::ISO_8859_7; use encoding_rs::ISO_8859_8; use encoding_rs::ISO_8859_10; use encoding_rs::ISO_8859_13; use encoding_rs::SHIFT_JIS; use encoding_rs::WINDOWS_874; use encoding_rs::WINDOWS_1250; use encoding_rs::WINDOWS_1251; use encoding_rs::WINDOWS_1253; use encoding_rs::WINDOWS_1254; use encoding_rs::WINDOWS_1255; use encoding_rs::WINDOWS_1256; use encoding_rs::WINDOWS_1257; use encoding_rs::WINDOWS_1258; use pretty_assertions::assert_eq; #[test] fn test_utf8_passthrough() { // Fast path: when UTF-8 is valid we should avoid copies and return as-is. let utf8_text = "Hello, мир! 世界"; let bytes = utf8_text.as_bytes(); assert_eq!(bytes_to_string_smart(bytes), utf8_text); } #[test] fn test_cp1251_russian_text() { // Cyrillic text emitted by PowerShell/WSL in CP1251 should decode cleanly. let bytes = b"\xEF\xF0\xE8\xEC\xE5\xF0"; // "пример" encoded with Windows-1251 assert_eq!(bytes_to_string_smart(bytes), "пример"); } #[test] fn test_cp1251_privet_word() { // Regression: CP1251 words like "Привет" must not be mis-identified as Windows-1252. let bytes = b"\xCF\xF0\xE8\xE2\xE5\xF2"; // "Привет" encoded with Windows-1251 assert_eq!(bytes_to_string_smart(bytes), "Привет"); } #[test] fn test_koi8_r_privet_word() { // KOI8-R output should decode to the original Cyrillic as well. let bytes = b"\xF0\xD2\xC9\xD7\xC5\xD4"; // "Привет" encoded with KOI8-R assert_eq!(bytes_to_string_smart(bytes), "Привет"); } #[test] fn test_cp866_russian_text() { // Legacy consoles (cmd.exe) commonly emit CP866 bytes for Cyrillic content. let bytes = b"\xAF\xE0\xA8\xAC\xA5\xE0"; // "пример" encoded with CP866 assert_eq!(bytes_to_string_smart(bytes), "пример"); } #[test] fn test_cp866_uppercase_text() { // Ensure the IBM866 heuristic still returns IBM866 for uppercase-only words. let bytes = b"\x8F\x90\x88"; // "ПРИ" encoded with CP866 uppercase letters assert_eq!(bytes_to_string_smart(bytes), "ПРИ"); } #[test] fn test_cp866_uppercase_followed_by_ascii() { // Regression test: uppercase CP866 tokens next to ASCII text should not be treated as // CP1252. let bytes = b"\x8F\x90\x88 test"; // "ПРИ test" encoded with CP866 uppercase letters followed by ASCII assert_eq!(bytes_to_string_smart(bytes), "ПРИ test"); } #[test] fn test_windows_1252_quotes() { // Smart detection should map Windows-1252 punctuation into proper Unicode. let bytes = b"\x93\x94test"; assert_eq!(bytes_to_string_smart(bytes), "\u{201C}\u{201D}test"); } #[test] fn test_windows_1252_multiple_quotes() { // Longer snippets of punctuation (e.g., “foo” – “bar”) should still flip to CP1252. let bytes = b"\x93foo\x94 \x96 \x93bar\x94"; assert_eq!( bytes_to_string_smart(bytes), "\u{201C}foo\u{201D} \u{2013} \u{201C}bar\u{201D}" ); } #[test] fn test_windows_1252_privet_gibberish_is_preserved() { // Windows-1252 cannot encode Cyrillic; if the input literally contains "ПÑ..." we should not "fix" it. let bytes = "Привет".as_bytes(); assert_eq!(bytes_to_string_smart(bytes), "Привет"); } #[test] fn test_iso8859_1_latin_text() { // ISO-8859-1 (code page 28591) is the Latin segment used by LatArCyrHeb. // encoding_rs unifies ISO-8859-1 with Windows-1252, so reuse that constant here. let (encoded, _, had_errors) = WINDOWS_1252.encode("Hello"); assert!(!had_errors, "failed to encode Latin sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), "Hello"); } #[test] fn test_iso8859_2_central_european_text() { // ISO-8859-2 (code page 28592) covers additional Central European glyphs. let (encoded, _, had_errors) = ISO_8859_2.encode("Příliš žluťoučký kůň"); assert!(!had_errors, "failed to encode ISO-8859-2 sample"); assert_eq!( bytes_to_string_smart(encoded.as_ref()), "Příliš žluťoučký kůň" ); } #[test] fn test_iso8859_3_south_europe_text() { // ISO-8859-3 (code page 28593) adds support for Maltese/Esperanto letters. // chardetng rarely distinguishes ISO-8859-3 from neighboring Latin code pages, so we rely on // an ASCII-only sample to ensure round-tripping still succeeds. let (encoded, _, had_errors) = ISO_8859_3.encode("Esperanto and Maltese"); assert!(!had_errors, "failed to encode ISO-8859-3 sample"); assert_eq!( bytes_to_string_smart(encoded.as_ref()), "Esperanto and Maltese" ); } #[test] fn test_iso8859_4_baltic_text() { // ISO-8859-4 (code page 28594) targets the Baltic/Nordic repertoire. let sample = "Šis ir rakstzīmju kodēšanas tests. Dažās valodās, kurās tiek \ izmantotas latīņu valodas burti, lēmuma pieņemšanai mums ir nepieciešams \ vairāk ieguldījuma."; let (encoded, _, had_errors) = ISO_8859_4.encode(sample); assert!(!had_errors, "failed to encode ISO-8859-4 sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), sample); } #[test] fn test_iso8859_5_cyrillic_text() { // ISO-8859-5 (code page 28595) covers the Cyrillic portion. let (encoded, _, had_errors) = ISO_8859_5.encode("Привет"); assert!(!had_errors, "failed to encode Cyrillic sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), "Привет"); } #[test] fn test_iso8859_6_arabic_text() { // ISO-8859-6 (code page 28596) covers the Arabic glyphs. let (encoded, _, had_errors) = ISO_8859_6.encode("مرحبا"); assert!(!had_errors, "failed to encode Arabic sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), "مرحبا"); } #[test] fn test_iso8859_7_greek_text() { // ISO-8859-7 (code page 28597) is used for Greek locales. let (encoded, _, had_errors) = ISO_8859_7.encode("Καλημέρα"); assert!(!had_errors, "failed to encode ISO-8859-7 sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), "Καλημέρα"); } #[test] fn test_iso8859_8_hebrew_text() { // ISO-8859-8 (code page 28598) covers the Hebrew glyphs. let (encoded, _, had_errors) = ISO_8859_8.encode("שלום"); assert!(!had_errors, "failed to encode Hebrew sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), "שלום"); } #[test] fn test_iso8859_9_turkish_text() { // ISO-8859-9 (code page 28599) mirrors Latin-1 but inserts Turkish letters. // encoding_rs exposes the equivalent Windows-1254 mapping. let (encoded, _, had_errors) = WINDOWS_1254.encode("İstanbul"); assert!(!had_errors, "failed to encode ISO-8859-9 sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), "İstanbul"); } #[test] fn test_iso8859_10_nordic_text() { // ISO-8859-10 (code page 28600) adds additional Nordic letters. let sample = "Þetta er prófun fyrir Ægir og Øystein."; let (encoded, _, had_errors) = ISO_8859_10.encode(sample); assert!(!had_errors, "failed to encode ISO-8859-10 sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), sample); } #[test] fn test_iso8859_11_thai_text() { // ISO-8859-11 (code page 28601) mirrors TIS-620 / Windows-874 for Thai. let sample = "ภาษาไทยสำหรับการทดสอบ ISO-8859-11"; // encoding_rs exposes the equivalent Windows-874 encoding, so use that constant. let (encoded, _, had_errors) = WINDOWS_874.encode(sample); assert!(!had_errors, "failed to encode ISO-8859-11 sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), sample); } // ISO-8859-12 was never standardized, and encodings 14–16 cannot be distinguished reliably // without the heuristics we removed (chardetng generally reports neighboring Latin pages), so // we intentionally omit coverage for those slots until the detector can identify them. #[test] fn test_iso8859_13_baltic_text() { // ISO-8859-13 (code page 28603) is common across Baltic languages. let (encoded, _, had_errors) = ISO_8859_13.encode("Sveiki"); assert!(!had_errors, "failed to encode ISO-8859-13 sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), "Sveiki"); } #[test] fn test_windows_1250_central_european_text() { let (encoded, _, had_errors) = WINDOWS_1250.encode("Příliš žluťoučký kůň"); assert!(!had_errors, "failed to encode Central European sample"); assert_eq!( bytes_to_string_smart(encoded.as_ref()), "Příliš žluťoučký kůň" ); } #[test] fn test_windows_1251_encoded_text() { let (encoded, _, had_errors) = WINDOWS_1251.encode("Привет из Windows-1251"); assert!(!had_errors, "failed to encode Windows-1251 sample"); assert_eq!( bytes_to_string_smart(encoded.as_ref()), "Привет из Windows-1251" ); } #[test] fn test_windows_1253_greek_text() { let (encoded, _, had_errors) = WINDOWS_1253.encode("Γειά σου"); assert!(!had_errors, "failed to encode Greek sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), "Γειά σου"); } #[test] fn test_windows_1254_turkish_text() { let (encoded, _, had_errors) = WINDOWS_1254.encode("İstanbul"); assert!(!had_errors, "failed to encode Turkish sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), "İstanbul"); } #[test] fn test_windows_1255_hebrew_text() { let (encoded, _, had_errors) = WINDOWS_1255.encode("שלום"); assert!(!had_errors, "failed to encode Windows-1255 Hebrew sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), "שלום"); } #[test] fn test_windows_1256_arabic_text() { let (encoded, _, had_errors) = WINDOWS_1256.encode("مرحبا"); assert!(!had_errors, "failed to encode Windows-1256 Arabic sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), "مرحبا"); } #[test] fn test_windows_1257_baltic_text() { let (encoded, _, had_errors) = WINDOWS_1257.encode("Pērkons"); assert!(!had_errors, "failed to encode Baltic sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), "Pērkons"); } #[test] fn test_windows_1258_vietnamese_text() { let (encoded, _, had_errors) = WINDOWS_1258.encode("Xin chào"); assert!(!had_errors, "failed to encode Vietnamese sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), "Xin chào"); } #[test] fn test_windows_874_thai_text() { let (encoded, _, had_errors) = WINDOWS_874.encode("สวัสดีครับ นี่คือการทดสอบภาษาไทย"); assert!(!had_errors, "failed to encode Thai sample"); assert_eq!( bytes_to_string_smart(encoded.as_ref()), "สวัสดีครับ นี่คือการทดสอบภาษาไทย" ); } #[test] fn test_windows_932_shift_jis_text() { let (encoded, _, had_errors) = SHIFT_JIS.encode("こんにちは"); assert!(!had_errors, "failed to encode Shift-JIS sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), "こんにちは"); } #[test] fn test_windows_936_gbk_text() { let (encoded, _, had_errors) = GBK.encode("你好,世界,这是一个测试"); assert!(!had_errors, "failed to encode GBK sample"); assert_eq!( bytes_to_string_smart(encoded.as_ref()), "你好,世界,这是一个测试" ); } #[test] fn test_windows_949_korean_text() { let (encoded, _, had_errors) = EUC_KR.encode("안녕하세요"); assert!(!had_errors, "failed to encode Korean sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), "안녕하세요"); } #[test] fn test_windows_950_big5_text() { let (encoded, _, had_errors) = BIG5.encode("繁體"); assert!(!had_errors, "failed to encode Big5 sample"); assert_eq!(bytes_to_string_smart(encoded.as_ref()), "繁體"); } #[test] fn test_latin1_cafe() { // Latin-1 bytes remain common in Western-European locales; decode them directly. let bytes = b"caf\xE9"; // codespell:ignore caf assert_eq!(bytes_to_string_smart(bytes), "café"); } #[test] fn test_preserves_ansi_sequences() { // ANSI escape sequences should survive regardless of the detected encoding. let bytes = b"\x1b[31mred\x1b[0m"; assert_eq!(bytes_to_string_smart(bytes), "\x1b[31mred\x1b[0m"); } #[test] fn test_fallback_to_lossy() { // Completely invalid sequences fall back to the old lossy behavior. let invalid_bytes = [0xFF, 0xFE, 0xFD]; let result = bytes_to_string_smart(&invalid_bytes); assert_eq!(result, String::from_utf8_lossy(&invalid_bytes)); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/token_data.rs
codex-rs/core/src/token_data.rs
use base64::Engine; use serde::Deserialize; use serde::Serialize; use thiserror::Error; #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Default)] pub struct TokenData { /// Flat info parsed from the JWT in auth.json. #[serde( deserialize_with = "deserialize_id_token", serialize_with = "serialize_id_token" )] pub id_token: IdTokenInfo, /// This is a JWT. pub access_token: String, pub refresh_token: String, pub account_id: Option<String>, } /// Flat subset of useful claims in id_token from auth.json. #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct IdTokenInfo { pub email: Option<String>, /// The ChatGPT subscription plan type /// (e.g., "free", "plus", "pro", "business", "enterprise", "edu"). /// (Note: values may vary by backend.) pub(crate) chatgpt_plan_type: Option<PlanType>, /// Organization/workspace identifier associated with the token, if present. pub chatgpt_account_id: Option<String>, pub raw_jwt: String, } impl IdTokenInfo { pub fn get_chatgpt_plan_type(&self) -> Option<String> { self.chatgpt_plan_type.as_ref().map(|t| match t { PlanType::Known(plan) => format!("{plan:?}"), PlanType::Unknown(s) => s.clone(), }) } } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(untagged)] pub(crate) enum PlanType { Known(KnownPlan), Unknown(String), } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] pub(crate) enum KnownPlan { Free, Plus, Pro, Team, Business, Enterprise, Edu, } #[derive(Deserialize)] struct IdClaims { #[serde(default)] email: Option<String>, #[serde(rename = "https://api.openai.com/auth", default)] auth: Option<AuthClaims>, } #[derive(Deserialize)] struct AuthClaims { #[serde(default)] chatgpt_plan_type: Option<PlanType>, #[serde(default)] chatgpt_account_id: Option<String>, } #[derive(Debug, Error)] pub enum IdTokenInfoError { #[error("invalid ID token format")] InvalidFormat, #[error(transparent)] Base64(#[from] base64::DecodeError), #[error(transparent)] Json(#[from] serde_json::Error), } pub fn parse_id_token(id_token: &str) -> Result<IdTokenInfo, IdTokenInfoError> { // JWT format: header.payload.signature let mut parts = id_token.split('.'); let (_header_b64, payload_b64, _sig_b64) = match (parts.next(), parts.next(), parts.next()) { (Some(h), Some(p), Some(s)) if !h.is_empty() && !p.is_empty() && !s.is_empty() => (h, p, s), _ => return Err(IdTokenInfoError::InvalidFormat), }; let payload_bytes = base64::engine::general_purpose::URL_SAFE_NO_PAD.decode(payload_b64)?; let claims: IdClaims = serde_json::from_slice(&payload_bytes)?; match claims.auth { Some(auth) => Ok(IdTokenInfo { email: claims.email, raw_jwt: id_token.to_string(), chatgpt_plan_type: auth.chatgpt_plan_type, chatgpt_account_id: auth.chatgpt_account_id, }), None => Ok(IdTokenInfo { email: claims.email, raw_jwt: id_token.to_string(), chatgpt_plan_type: None, chatgpt_account_id: None, }), } } fn deserialize_id_token<'de, D>(deserializer: D) -> Result<IdTokenInfo, D::Error> where D: serde::Deserializer<'de>, { let s = String::deserialize(deserializer)?; parse_id_token(&s).map_err(serde::de::Error::custom) } fn serialize_id_token<S>(id_token: &IdTokenInfo, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { serializer.serialize_str(&id_token.raw_jwt) } #[cfg(test)] mod tests { use super::*; use serde::Serialize; #[test] fn id_token_info_parses_email_and_plan() { #[derive(Serialize)] struct Header { alg: &'static str, typ: &'static str, } let header = Header { alg: "none", typ: "JWT", }; let payload = serde_json::json!({ "email": "user@example.com", "https://api.openai.com/auth": { "chatgpt_plan_type": "pro" } }); fn b64url_no_pad(bytes: &[u8]) -> String { base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(bytes) } let header_b64 = b64url_no_pad(&serde_json::to_vec(&header).unwrap()); let payload_b64 = b64url_no_pad(&serde_json::to_vec(&payload).unwrap()); let signature_b64 = b64url_no_pad(b"sig"); let fake_jwt = format!("{header_b64}.{payload_b64}.{signature_b64}"); let info = parse_id_token(&fake_jwt).expect("should parse"); assert_eq!(info.email.as_deref(), Some("user@example.com")); assert_eq!(info.get_chatgpt_plan_type().as_deref(), Some("Pro")); } #[test] fn id_token_info_handles_missing_fields() { #[derive(Serialize)] struct Header { alg: &'static str, typ: &'static str, } let header = Header { alg: "none", typ: "JWT", }; let payload = serde_json::json!({ "sub": "123" }); fn b64url_no_pad(bytes: &[u8]) -> String { base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(bytes) } let header_b64 = b64url_no_pad(&serde_json::to_vec(&header).unwrap()); let payload_b64 = b64url_no_pad(&serde_json::to_vec(&payload).unwrap()); let signature_b64 = b64url_no_pad(b"sig"); let fake_jwt = format!("{header_b64}.{payload_b64}.{signature_b64}"); let info = parse_id_token(&fake_jwt).expect("should parse"); assert!(info.email.is_none()); assert!(info.get_chatgpt_plan_type().is_none()); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/stream_events_utils.rs
codex-rs/core/src/stream_events_utils.rs
use std::pin::Pin; use std::sync::Arc; use codex_protocol::items::TurnItem; use tokio_util::sync::CancellationToken; use crate::codex::Session; use crate::codex::TurnContext; use crate::error::CodexErr; use crate::error::Result; use crate::function_tool::FunctionCallError; use crate::parse_turn_item; use crate::tools::parallel::ToolCallRuntime; use crate::tools::router::ToolRouter; use codex_protocol::models::FunctionCallOutputPayload; use codex_protocol::models::ResponseInputItem; use codex_protocol::models::ResponseItem; use futures::Future; use tracing::debug; use tracing::instrument; /// Handle a completed output item from the model stream, recording it and /// queuing any tool execution futures. This records items immediately so /// history and rollout stay in sync even if the turn is later cancelled. pub(crate) type InFlightFuture<'f> = Pin<Box<dyn Future<Output = Result<ResponseInputItem>> + Send + 'f>>; #[derive(Default)] pub(crate) struct OutputItemResult { pub last_agent_message: Option<String>, pub needs_follow_up: bool, pub tool_future: Option<InFlightFuture<'static>>, } pub(crate) struct HandleOutputCtx { pub sess: Arc<Session>, pub turn_context: Arc<TurnContext>, pub tool_runtime: ToolCallRuntime, pub cancellation_token: CancellationToken, } #[instrument(level = "trace", skip_all)] pub(crate) async fn handle_output_item_done( ctx: &mut HandleOutputCtx, item: ResponseItem, previously_active_item: Option<TurnItem>, ) -> Result<OutputItemResult> { let mut output = OutputItemResult::default(); match ToolRouter::build_tool_call(ctx.sess.as_ref(), item.clone()).await { // The model emitted a tool call; log it, persist the item immediately, and queue the tool execution. Ok(Some(call)) => { let payload_preview = call.payload.log_payload().into_owned(); tracing::info!("ToolCall: {} {}", call.tool_name, payload_preview); ctx.sess .record_conversation_items(&ctx.turn_context, std::slice::from_ref(&item)) .await; let cancellation_token = ctx.cancellation_token.child_token(); let tool_future: InFlightFuture<'static> = Box::pin( ctx.tool_runtime .clone() .handle_tool_call(call, cancellation_token), ); output.needs_follow_up = true; output.tool_future = Some(tool_future); } // No tool call: convert messages/reasoning into turn items and mark them as complete. Ok(None) => { if let Some(turn_item) = handle_non_tool_response_item(&item).await { if previously_active_item.is_none() { ctx.sess .emit_turn_item_started(&ctx.turn_context, &turn_item) .await; } ctx.sess .emit_turn_item_completed(&ctx.turn_context, turn_item) .await; } ctx.sess .record_conversation_items(&ctx.turn_context, std::slice::from_ref(&item)) .await; let last_agent_message = last_assistant_message_from_item(&item); output.last_agent_message = last_agent_message; } // Guardrail: the model issued a LocalShellCall without an id; surface the error back into history. Err(FunctionCallError::MissingLocalShellCallId) => { let msg = "LocalShellCall without call_id or id"; ctx.turn_context .client .get_otel_manager() .log_tool_failed("local_shell", msg); tracing::error!(msg); let response = ResponseInputItem::FunctionCallOutput { call_id: String::new(), output: FunctionCallOutputPayload { content: msg.to_string(), ..Default::default() }, }; ctx.sess .record_conversation_items(&ctx.turn_context, std::slice::from_ref(&item)) .await; if let Some(response_item) = response_input_to_response_item(&response) { ctx.sess .record_conversation_items( &ctx.turn_context, std::slice::from_ref(&response_item), ) .await; } output.needs_follow_up = true; } // The tool request should be answered directly (or was denied); push that response into the transcript. Err(FunctionCallError::RespondToModel(message)) | Err(FunctionCallError::Denied(message)) => { let response = ResponseInputItem::FunctionCallOutput { call_id: String::new(), output: FunctionCallOutputPayload { content: message, ..Default::default() }, }; ctx.sess .record_conversation_items(&ctx.turn_context, std::slice::from_ref(&item)) .await; if let Some(response_item) = response_input_to_response_item(&response) { ctx.sess .record_conversation_items( &ctx.turn_context, std::slice::from_ref(&response_item), ) .await; } output.needs_follow_up = true; } // A fatal error occurred; surface it back into history. Err(FunctionCallError::Fatal(message)) => { return Err(CodexErr::Fatal(message)); } } Ok(output) } pub(crate) async fn handle_non_tool_response_item(item: &ResponseItem) -> Option<TurnItem> { debug!(?item, "Output item"); match item { ResponseItem::Message { .. } | ResponseItem::Reasoning { .. } | ResponseItem::WebSearchCall { .. } => parse_turn_item(item), ResponseItem::FunctionCallOutput { .. } | ResponseItem::CustomToolCallOutput { .. } => { debug!("unexpected tool output from stream"); None } _ => None, } } pub(crate) fn last_assistant_message_from_item(item: &ResponseItem) -> Option<String> { if let ResponseItem::Message { role, content, .. } = item && role == "assistant" { return content.iter().rev().find_map(|ci| match ci { codex_protocol::models::ContentItem::OutputText { text } => Some(text.clone()), _ => None, }); } None } pub(crate) fn response_input_to_response_item(input: &ResponseInputItem) -> Option<ResponseItem> { match input { ResponseInputItem::FunctionCallOutput { call_id, output } => { Some(ResponseItem::FunctionCallOutput { call_id: call_id.clone(), output: output.clone(), }) } ResponseInputItem::CustomToolCallOutput { call_id, output } => { Some(ResponseItem::CustomToolCallOutput { call_id: call_id.clone(), output: output.clone(), }) } ResponseInputItem::McpToolCallOutput { call_id, result } => { let output = match result { Ok(call_tool_result) => FunctionCallOutputPayload::from(call_tool_result), Err(err) => FunctionCallOutputPayload { content: err.clone(), success: Some(false), ..Default::default() }, }; Some(ResponseItem::FunctionCallOutput { call_id: call_id.clone(), output, }) } _ => None, } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/util.rs
codex-rs/core/src/util.rs
use std::path::Path; use std::path::PathBuf; use std::time::Duration; use rand::Rng; use tracing::debug; use tracing::error; const INITIAL_DELAY_MS: u64 = 200; const BACKOFF_FACTOR: f64 = 2.0; /// Emit structured feedback metadata as key/value pairs. /// /// This logs a tracing event with `target: "feedback_tags"`. If /// `codex_feedback::CodexFeedback::metadata_layer()` is installed, these fields are captured and /// later attached as tags when feedback is uploaded. /// /// Values are wrapped with [`tracing::field::DebugValue`], so the expression only needs to /// implement [`std::fmt::Debug`]. /// /// Example: /// /// ```rust /// codex_core::feedback_tags!(model = "gpt-5", cached = true); /// codex_core::feedback_tags!(provider = provider_id, request_id = request_id); /// ``` #[macro_export] macro_rules! feedback_tags { ($( $key:ident = $value:expr ),+ $(,)?) => { ::tracing::info!( target: "feedback_tags", $( $key = ::tracing::field::debug(&$value) ),+ ); }; } pub(crate) fn backoff(attempt: u64) -> Duration { let exp = BACKOFF_FACTOR.powi(attempt.saturating_sub(1) as i32); let base = (INITIAL_DELAY_MS as f64 * exp) as u64; let jitter = rand::rng().random_range(0.9..1.1); Duration::from_millis((base as f64 * jitter) as u64) } pub(crate) fn error_or_panic(message: impl std::string::ToString) { if cfg!(debug_assertions) { panic!("{}", message.to_string()); } else { error!("{}", message.to_string()); } } pub(crate) fn try_parse_error_message(text: &str) -> String { debug!("Parsing server error response: {}", text); let json = serde_json::from_str::<serde_json::Value>(text).unwrap_or_default(); if let Some(error) = json.get("error") && let Some(message) = error.get("message") && let Some(message_str) = message.as_str() { return message_str.to_string(); } if text.is_empty() { return "Unknown error".to_string(); } text.to_string() } pub fn resolve_path(base: &Path, path: &PathBuf) -> PathBuf { if path.is_absolute() { path.clone() } else { base.join(path) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_try_parse_error_message() { let text = r#"{ "error": { "message": "Your refresh token has already been used to generate a new access token. Please try signing in again.", "type": "invalid_request_error", "param": null, "code": "refresh_token_reused" } }"#; let message = try_parse_error_message(text); assert_eq!( message, "Your refresh token has already been used to generate a new access token. Please try signing in again." ); } #[test] fn test_try_parse_error_message_no_error() { let text = r#"{"message": "test"}"#; let message = try_parse_error_message(text); assert_eq!(message, r#"{"message": "test"}"#); } #[test] fn feedback_tags_macro_compiles() { #[derive(Debug)] struct OnlyDebug; feedback_tags!(model = "gpt-5", cached = true, debug_only = OnlyDebug); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/conversation_manager.rs
codex-rs/core/src/conversation_manager.rs
use crate::AuthManager; #[cfg(any(test, feature = "test-support"))] use crate::CodexAuth; #[cfg(any(test, feature = "test-support"))] use crate::ModelProviderInfo; use crate::codex::Codex; use crate::codex::CodexSpawnOk; use crate::codex::INITIAL_SUBMIT_ID; use crate::codex_conversation::CodexConversation; use crate::config::Config; use crate::error::CodexErr; use crate::error::Result as CodexResult; use crate::models_manager::manager::ModelsManager; use crate::protocol::Event; use crate::protocol::EventMsg; use crate::protocol::SessionConfiguredEvent; use crate::rollout::RolloutRecorder; use crate::skills::SkillsManager; use codex_protocol::ConversationId; use codex_protocol::items::TurnItem; use codex_protocol::models::ResponseItem; use codex_protocol::openai_models::ModelPreset; use codex_protocol::protocol::InitialHistory; use codex_protocol::protocol::RolloutItem; use codex_protocol::protocol::SessionSource; use std::collections::HashMap; use std::path::PathBuf; use std::sync::Arc; #[cfg(any(test, feature = "test-support"))] use tempfile::TempDir; use tokio::sync::RwLock; /// Represents a newly created Codex conversation, including the first event /// (which is [`EventMsg::SessionConfigured`]). pub struct NewConversation { pub conversation_id: ConversationId, pub conversation: Arc<CodexConversation>, pub session_configured: SessionConfiguredEvent, } /// [`ConversationManager`] is responsible for creating conversations and /// maintaining them in memory. pub struct ConversationManager { conversations: Arc<RwLock<HashMap<ConversationId, Arc<CodexConversation>>>>, auth_manager: Arc<AuthManager>, models_manager: Arc<ModelsManager>, skills_manager: Arc<SkillsManager>, session_source: SessionSource, #[cfg(any(test, feature = "test-support"))] _test_codex_home_guard: Option<TempDir>, } impl ConversationManager { pub fn new(auth_manager: Arc<AuthManager>, session_source: SessionSource) -> Self { let skills_manager = Arc::new(SkillsManager::new(auth_manager.codex_home().to_path_buf())); Self { conversations: Arc::new(RwLock::new(HashMap::new())), auth_manager: auth_manager.clone(), session_source, models_manager: Arc::new(ModelsManager::new(auth_manager)), skills_manager, #[cfg(any(test, feature = "test-support"))] _test_codex_home_guard: None, } } #[cfg(any(test, feature = "test-support"))] /// Construct with a dummy AuthManager containing the provided CodexAuth. /// Used for integration tests: should not be used by ordinary business logic. pub fn with_models_provider(auth: CodexAuth, provider: ModelProviderInfo) -> Self { let temp_dir = tempfile::tempdir().unwrap_or_else(|err| panic!("temp codex home: {err}")); let codex_home = temp_dir.path().to_path_buf(); let mut manager = Self::with_models_provider_and_home(auth, provider, codex_home); manager._test_codex_home_guard = Some(temp_dir); manager } #[cfg(any(test, feature = "test-support"))] /// Construct with a dummy AuthManager containing the provided CodexAuth and codex home. /// Used for integration tests: should not be used by ordinary business logic. pub fn with_models_provider_and_home( auth: CodexAuth, provider: ModelProviderInfo, codex_home: PathBuf, ) -> Self { let auth_manager = crate::AuthManager::from_auth_for_testing_with_home(auth, codex_home); let skills_manager = Arc::new(SkillsManager::new(auth_manager.codex_home().to_path_buf())); Self { conversations: Arc::new(RwLock::new(HashMap::new())), auth_manager: auth_manager.clone(), session_source: SessionSource::Exec, models_manager: Arc::new(ModelsManager::with_provider(auth_manager, provider)), skills_manager, _test_codex_home_guard: None, } } pub fn session_source(&self) -> SessionSource { self.session_source.clone() } pub fn skills_manager(&self) -> Arc<SkillsManager> { self.skills_manager.clone() } pub async fn new_conversation(&self, config: Config) -> CodexResult<NewConversation> { self.spawn_conversation( config, self.auth_manager.clone(), self.models_manager.clone(), ) .await } async fn spawn_conversation( &self, config: Config, auth_manager: Arc<AuthManager>, models_manager: Arc<ModelsManager>, ) -> CodexResult<NewConversation> { let CodexSpawnOk { codex, conversation_id, } = Codex::spawn( config, auth_manager, models_manager, self.skills_manager.clone(), InitialHistory::New, self.session_source.clone(), ) .await?; self.finalize_spawn(codex, conversation_id).await } async fn finalize_spawn( &self, codex: Codex, conversation_id: ConversationId, ) -> CodexResult<NewConversation> { // The first event must be `SessionInitialized`. Validate and forward it // to the caller so that they can display it in the conversation // history. let event = codex.next_event().await?; let session_configured = match event { Event { id, msg: EventMsg::SessionConfigured(session_configured), } if id == INITIAL_SUBMIT_ID => session_configured, _ => { return Err(CodexErr::SessionConfiguredNotFirstEvent); } }; let conversation = Arc::new(CodexConversation::new( codex, session_configured.rollout_path.clone(), )); self.conversations .write() .await .insert(conversation_id, conversation.clone()); Ok(NewConversation { conversation_id, conversation, session_configured, }) } pub async fn get_conversation( &self, conversation_id: ConversationId, ) -> CodexResult<Arc<CodexConversation>> { let conversations = self.conversations.read().await; conversations .get(&conversation_id) .cloned() .ok_or_else(|| CodexErr::ConversationNotFound(conversation_id)) } pub async fn resume_conversation_from_rollout( &self, config: Config, rollout_path: PathBuf, auth_manager: Arc<AuthManager>, ) -> CodexResult<NewConversation> { let initial_history = RolloutRecorder::get_rollout_history(&rollout_path).await?; self.resume_conversation_with_history(config, initial_history, auth_manager) .await } pub async fn resume_conversation_with_history( &self, config: Config, initial_history: InitialHistory, auth_manager: Arc<AuthManager>, ) -> CodexResult<NewConversation> { let CodexSpawnOk { codex, conversation_id, } = Codex::spawn( config, auth_manager, self.models_manager.clone(), self.skills_manager.clone(), initial_history, self.session_source.clone(), ) .await?; self.finalize_spawn(codex, conversation_id).await } /// Removes the conversation from the manager's internal map, though the /// conversation is stored as `Arc<CodexConversation>`, it is possible that /// other references to it exist elsewhere. Returns the conversation if the /// conversation was found and removed. pub async fn remove_conversation( &self, conversation_id: &ConversationId, ) -> Option<Arc<CodexConversation>> { self.conversations.write().await.remove(conversation_id) } /// Fork an existing conversation by taking messages up to the given position /// (not including the message at the given position) and starting a new /// conversation with identical configuration (unless overridden by the /// caller's `config`). The new conversation will have a fresh id. pub async fn fork_conversation( &self, nth_user_message: usize, config: Config, path: PathBuf, ) -> CodexResult<NewConversation> { // Compute the prefix up to the cut point. let history = RolloutRecorder::get_rollout_history(&path).await?; let history = truncate_before_nth_user_message(history, nth_user_message); // Spawn a new conversation with the computed initial history. let auth_manager = self.auth_manager.clone(); let CodexSpawnOk { codex, conversation_id, } = Codex::spawn( config, auth_manager, self.models_manager.clone(), self.skills_manager.clone(), history, self.session_source.clone(), ) .await?; self.finalize_spawn(codex, conversation_id).await } pub async fn list_models(&self, config: &Config) -> Vec<ModelPreset> { self.models_manager.list_models(config).await } pub fn get_models_manager(&self) -> Arc<ModelsManager> { self.models_manager.clone() } } /// Return a prefix of `items` obtained by cutting strictly before the nth user message /// (0-based) and all items that follow it. fn truncate_before_nth_user_message(history: InitialHistory, n: usize) -> InitialHistory { // Work directly on rollout items, and cut the vector at the nth user message input. let items: Vec<RolloutItem> = history.get_rollout_items(); // Find indices of user message inputs in rollout order. let mut user_positions: Vec<usize> = Vec::new(); for (idx, item) in items.iter().enumerate() { if let RolloutItem::ResponseItem(item @ ResponseItem::Message { .. }) = item && matches!( crate::event_mapping::parse_turn_item(item), Some(TurnItem::UserMessage(_)) ) { user_positions.push(idx); } } // If fewer than or equal to n user messages exist, treat as empty (out of range). if user_positions.len() <= n { return InitialHistory::New; } // Cut strictly before the nth user message (do not keep the nth itself). let cut_idx = user_positions[n]; let rolled: Vec<RolloutItem> = items.into_iter().take(cut_idx).collect(); if rolled.is_empty() { InitialHistory::New } else { InitialHistory::Forked(rolled) } } #[cfg(test)] mod tests { use super::*; use crate::codex::make_session_and_context; use assert_matches::assert_matches; use codex_protocol::models::ContentItem; use codex_protocol::models::ReasoningItemReasoningSummary; use codex_protocol::models::ResponseItem; use pretty_assertions::assert_eq; fn user_msg(text: &str) -> ResponseItem { ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::OutputText { text: text.to_string(), }], } } fn assistant_msg(text: &str) -> ResponseItem { ResponseItem::Message { id: None, role: "assistant".to_string(), content: vec![ContentItem::OutputText { text: text.to_string(), }], } } #[test] fn drops_from_last_user_only() { let items = [ user_msg("u1"), assistant_msg("a1"), assistant_msg("a2"), user_msg("u2"), assistant_msg("a3"), ResponseItem::Reasoning { id: "r1".to_string(), summary: vec![ReasoningItemReasoningSummary::SummaryText { text: "s".to_string(), }], content: None, encrypted_content: None, }, ResponseItem::FunctionCall { id: None, name: "tool".to_string(), arguments: "{}".to_string(), call_id: "c1".to_string(), }, assistant_msg("a4"), ]; // Wrap as InitialHistory::Forked with response items only. let initial: Vec<RolloutItem> = items .iter() .cloned() .map(RolloutItem::ResponseItem) .collect(); let truncated = truncate_before_nth_user_message(InitialHistory::Forked(initial), 1); let got_items = truncated.get_rollout_items(); let expected_items = vec![ RolloutItem::ResponseItem(items[0].clone()), RolloutItem::ResponseItem(items[1].clone()), RolloutItem::ResponseItem(items[2].clone()), ]; assert_eq!( serde_json::to_value(&got_items).unwrap(), serde_json::to_value(&expected_items).unwrap() ); let initial2: Vec<RolloutItem> = items .iter() .cloned() .map(RolloutItem::ResponseItem) .collect(); let truncated2 = truncate_before_nth_user_message(InitialHistory::Forked(initial2), 2); assert_matches!(truncated2, InitialHistory::New); } #[tokio::test] async fn ignores_session_prefix_messages_when_truncating() { let (session, turn_context) = make_session_and_context().await; let mut items = session.build_initial_context(&turn_context); items.push(user_msg("feature request")); items.push(assistant_msg("ack")); items.push(user_msg("second question")); items.push(assistant_msg("answer")); let rollout_items: Vec<RolloutItem> = items .iter() .cloned() .map(RolloutItem::ResponseItem) .collect(); let truncated = truncate_before_nth_user_message(InitialHistory::Forked(rollout_items), 1); let got_items = truncated.get_rollout_items(); let expected: Vec<RolloutItem> = vec![ RolloutItem::ResponseItem(items[0].clone()), RolloutItem::ResponseItem(items[1].clone()), RolloutItem::ResponseItem(items[2].clone()), ]; assert_eq!( serde_json::to_value(&got_items).unwrap(), serde_json::to_value(&expected).unwrap() ); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/message_history.rs
codex-rs/core/src/message_history.rs
//! Persistence layer for the global, append-only *message history* file. //! //! The history is stored at `~/.codex/history.jsonl` with **one JSON object per //! line** so that it can be efficiently appended to and parsed with standard //! JSON-Lines tooling. Each record has the following schema: //! //! ````text //! {"conversation_id":"<uuid>","ts":<unix_seconds>,"text":"<message>"} //! ```` //! //! To minimise the chance of interleaved writes when multiple processes are //! appending concurrently, callers should *prepare the full line* (record + //! trailing `\n`) and write it with a **single `write(2)` system call** while //! the file descriptor is opened with the `O_APPEND` flag. POSIX guarantees //! that writes up to `PIPE_BUF` bytes are atomic in that case. use std::fs::File; use std::fs::OpenOptions; use std::io::BufRead; use std::io::BufReader; use std::io::Read; use std::io::Result; use std::io::Seek; use std::io::SeekFrom; use std::io::Write; use std::path::Path; use std::path::PathBuf; use serde::Deserialize; use serde::Serialize; use std::time::Duration; use tokio::fs; use tokio::io::AsyncReadExt; use crate::config::Config; use crate::config::types::HistoryPersistence; use codex_protocol::ConversationId; #[cfg(unix)] use std::os::unix::fs::OpenOptionsExt; #[cfg(unix)] use std::os::unix::fs::PermissionsExt; /// Filename that stores the message history inside `~/.codex`. const HISTORY_FILENAME: &str = "history.jsonl"; /// When history exceeds the hard cap, trim it down to this fraction of `max_bytes`. const HISTORY_SOFT_CAP_RATIO: f64 = 0.8; const MAX_RETRIES: usize = 10; const RETRY_SLEEP: Duration = Duration::from_millis(100); #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct HistoryEntry { pub session_id: String, pub ts: u64, pub text: String, } fn history_filepath(config: &Config) -> PathBuf { let mut path = config.codex_home.clone(); path.push(HISTORY_FILENAME); path } /// Append a `text` entry associated with `conversation_id` to the history file. Uses /// advisory file locking to ensure that concurrent writes do not interleave, /// which entails a small amount of blocking I/O internally. pub(crate) async fn append_entry( text: &str, conversation_id: &ConversationId, config: &Config, ) -> Result<()> { match config.history.persistence { HistoryPersistence::SaveAll => { // Save everything: proceed. } HistoryPersistence::None => { // No history persistence requested. return Ok(()); } } // TODO: check `text` for sensitive patterns // Resolve `~/.codex/history.jsonl` and ensure the parent directory exists. let path = history_filepath(config); if let Some(parent) = path.parent() { tokio::fs::create_dir_all(parent).await?; } // Compute timestamp (seconds since the Unix epoch). let ts = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .map_err(|e| std::io::Error::other(format!("system clock before Unix epoch: {e}")))? .as_secs(); // Construct the JSON line first so we can write it in a single syscall. let entry = HistoryEntry { session_id: conversation_id.to_string(), ts, text: text.to_string(), }; let mut line = serde_json::to_string(&entry) .map_err(|e| std::io::Error::other(format!("failed to serialise history entry: {e}")))?; line.push('\n'); // Open the history file for read/write access (append-only on Unix). let mut options = OpenOptions::new(); options.read(true).write(true).create(true); #[cfg(unix)] { options.append(true); options.mode(0o600); } let mut history_file = options.open(&path)?; // Ensure permissions. ensure_owner_only_permissions(&history_file).await?; let history_max_bytes = config.history.max_bytes; // Perform a blocking write under an advisory write lock using std::fs. tokio::task::spawn_blocking(move || -> Result<()> { // Retry a few times to avoid indefinite blocking when contended. for _ in 0..MAX_RETRIES { match history_file.try_lock() { Ok(()) => { // While holding the exclusive lock, write the full line. // We do not open the file with `append(true)` on Windows, so ensure the // cursor is positioned at the end before writing. history_file.seek(SeekFrom::End(0))?; history_file.write_all(line.as_bytes())?; history_file.flush()?; enforce_history_limit(&mut history_file, history_max_bytes)?; return Ok(()); } Err(std::fs::TryLockError::WouldBlock) => { std::thread::sleep(RETRY_SLEEP); } Err(e) => return Err(e.into()), } } Err(std::io::Error::new( std::io::ErrorKind::WouldBlock, "could not acquire exclusive lock on history file after multiple attempts", )) }) .await??; Ok(()) } /// Trim the history file to honor `max_bytes`, dropping the oldest lines while holding /// the write lock so the newest entry is always retained. When the file exceeds the /// hard cap, it rewrites the remaining tail to a soft cap to avoid trimming again /// immediately on the next write. fn enforce_history_limit(file: &mut File, max_bytes: Option<usize>) -> Result<()> { let Some(max_bytes) = max_bytes else { return Ok(()); }; if max_bytes == 0 { return Ok(()); } let max_bytes = match u64::try_from(max_bytes) { Ok(value) => value, Err(_) => return Ok(()), }; let mut current_len = file.metadata()?.len(); if current_len <= max_bytes { return Ok(()); } let mut reader_file = file.try_clone()?; reader_file.seek(SeekFrom::Start(0))?; let mut buf_reader = BufReader::new(reader_file); let mut line_lengths = Vec::new(); let mut line_buf = String::new(); loop { line_buf.clear(); let bytes = buf_reader.read_line(&mut line_buf)?; if bytes == 0 { break; } line_lengths.push(bytes as u64); } if line_lengths.is_empty() { return Ok(()); } let last_index = line_lengths.len() - 1; let trim_target = trim_target_bytes(max_bytes, line_lengths[last_index]); let mut drop_bytes = 0u64; let mut idx = 0usize; while current_len > trim_target && idx < last_index { current_len = current_len.saturating_sub(line_lengths[idx]); drop_bytes += line_lengths[idx]; idx += 1; } if drop_bytes == 0 { return Ok(()); } let mut reader = buf_reader.into_inner(); reader.seek(SeekFrom::Start(drop_bytes))?; let capacity = usize::try_from(current_len).unwrap_or(0); let mut tail = Vec::with_capacity(capacity); reader.read_to_end(&mut tail)?; file.set_len(0)?; file.seek(SeekFrom::Start(0))?; file.write_all(&tail)?; file.flush()?; Ok(()) } fn trim_target_bytes(max_bytes: u64, newest_entry_len: u64) -> u64 { let soft_cap_bytes = ((max_bytes as f64) * HISTORY_SOFT_CAP_RATIO) .floor() .clamp(1.0, max_bytes as f64) as u64; soft_cap_bytes.max(newest_entry_len) } /// Asynchronously fetch the history file's *identifier* (inode on Unix) and /// the current number of entries by counting newline characters. pub(crate) async fn history_metadata(config: &Config) -> (u64, usize) { let path = history_filepath(config); history_metadata_for_file(&path).await } /// Given a `log_id` (on Unix this is the file's inode number, /// on Windows this is the file's creation time) and a zero-based /// `offset`, return the corresponding `HistoryEntry` if the identifier matches /// the current history file **and** the requested offset exists. Any I/O or /// parsing errors are logged and result in `None`. /// /// Note this function is not async because it uses a sync advisory file /// locking API. pub(crate) fn lookup(log_id: u64, offset: usize, config: &Config) -> Option<HistoryEntry> { let path = history_filepath(config); lookup_history_entry(&path, log_id, offset) } /// On Unix systems, ensure the file permissions are `0o600` (rw-------). If the /// permissions cannot be changed the error is propagated to the caller. #[cfg(unix)] async fn ensure_owner_only_permissions(file: &File) -> Result<()> { let metadata = file.metadata()?; let current_mode = metadata.permissions().mode() & 0o777; if current_mode != 0o600 { let mut perms = metadata.permissions(); perms.set_mode(0o600); let perms_clone = perms.clone(); let file_clone = file.try_clone()?; tokio::task::spawn_blocking(move || file_clone.set_permissions(perms_clone)).await??; } Ok(()) } #[cfg(windows)] // On Windows, simply succeed. async fn ensure_owner_only_permissions(_file: &File) -> Result<()> { Ok(()) } async fn history_metadata_for_file(path: &Path) -> (u64, usize) { let log_id = match fs::metadata(path).await { Ok(metadata) => history_log_id(&metadata).unwrap_or(0), Err(e) if e.kind() == std::io::ErrorKind::NotFound => return (0, 0), Err(_) => return (0, 0), }; // Open the file. let mut file = match fs::File::open(path).await { Ok(f) => f, Err(_) => return (log_id, 0), }; // Count newline bytes. let mut buf = [0u8; 8192]; let mut count = 0usize; loop { match file.read(&mut buf).await { Ok(0) => break, Ok(n) => { count += buf[..n].iter().filter(|&&b| b == b'\n').count(); } Err(_) => return (log_id, 0), } } (log_id, count) } fn lookup_history_entry(path: &Path, log_id: u64, offset: usize) -> Option<HistoryEntry> { use std::io::BufRead; use std::io::BufReader; let file: File = match OpenOptions::new().read(true).open(path) { Ok(f) => f, Err(e) => { tracing::warn!(error = %e, "failed to open history file"); return None; } }; let metadata = match file.metadata() { Ok(m) => m, Err(e) => { tracing::warn!(error = %e, "failed to stat history file"); return None; } }; let current_log_id = history_log_id(&metadata)?; if log_id != 0 && current_log_id != log_id { return None; } // Open & lock file for reading using a shared lock. // Retry a few times to avoid indefinite blocking. for _ in 0..MAX_RETRIES { let lock_result = file.try_lock_shared(); match lock_result { Ok(()) => { let reader = BufReader::new(&file); for (idx, line_res) in reader.lines().enumerate() { let line = match line_res { Ok(l) => l, Err(e) => { tracing::warn!(error = %e, "failed to read line from history file"); return None; } }; if idx == offset { match serde_json::from_str::<HistoryEntry>(&line) { Ok(entry) => return Some(entry), Err(e) => { tracing::warn!(error = %e, "failed to parse history entry"); return None; } } } } // Not found at requested offset. return None; } Err(std::fs::TryLockError::WouldBlock) => { std::thread::sleep(RETRY_SLEEP); } Err(e) => { tracing::warn!(error = %e, "failed to acquire shared lock on history file"); return None; } } } None } #[cfg(unix)] fn history_log_id(metadata: &std::fs::Metadata) -> Option<u64> { use std::os::unix::fs::MetadataExt; Some(metadata.ino()) } #[cfg(windows)] fn history_log_id(metadata: &std::fs::Metadata) -> Option<u64> { use std::os::windows::fs::MetadataExt; Some(metadata.creation_time()) } #[cfg(not(any(unix, windows)))] fn history_log_id(_metadata: &std::fs::Metadata) -> Option<u64> { None } #[cfg(test)] mod tests { use super::*; use crate::config::ConfigBuilder; use codex_protocol::ConversationId; use pretty_assertions::assert_eq; use std::fs::File; use std::io::Write; use tempfile::TempDir; #[tokio::test] async fn lookup_reads_history_entries() { let temp_dir = TempDir::new().expect("create temp dir"); let history_path = temp_dir.path().join(HISTORY_FILENAME); let entries = vec![ HistoryEntry { session_id: "first-session".to_string(), ts: 1, text: "first".to_string(), }, HistoryEntry { session_id: "second-session".to_string(), ts: 2, text: "second".to_string(), }, ]; let mut file = File::create(&history_path).expect("create history file"); for entry in &entries { writeln!( file, "{}", serde_json::to_string(entry).expect("serialize history entry") ) .expect("write history entry"); } let (log_id, count) = history_metadata_for_file(&history_path).await; assert_eq!(count, entries.len()); let second_entry = lookup_history_entry(&history_path, log_id, 1).expect("fetch second history entry"); assert_eq!(second_entry, entries[1]); } #[tokio::test] async fn lookup_uses_stable_log_id_after_appends() { let temp_dir = TempDir::new().expect("create temp dir"); let history_path = temp_dir.path().join(HISTORY_FILENAME); let initial = HistoryEntry { session_id: "first-session".to_string(), ts: 1, text: "first".to_string(), }; let appended = HistoryEntry { session_id: "second-session".to_string(), ts: 2, text: "second".to_string(), }; let mut file = File::create(&history_path).expect("create history file"); writeln!( file, "{}", serde_json::to_string(&initial).expect("serialize initial entry") ) .expect("write initial entry"); let (log_id, count) = history_metadata_for_file(&history_path).await; assert_eq!(count, 1); let mut append = std::fs::OpenOptions::new() .append(true) .open(&history_path) .expect("open history file for append"); writeln!( append, "{}", serde_json::to_string(&appended).expect("serialize appended entry") ) .expect("append history entry"); let fetched = lookup_history_entry(&history_path, log_id, 1).expect("lookup appended history entry"); assert_eq!(fetched, appended); } #[tokio::test] async fn append_entry_trims_history_when_beyond_max_bytes() { let codex_home = TempDir::new().expect("create temp dir"); let mut config = ConfigBuilder::default() .codex_home(codex_home.path().to_path_buf()) .build() .await .expect("load config"); let conversation_id = ConversationId::new(); let entry_one = "a".repeat(200); let entry_two = "b".repeat(200); let history_path = codex_home.path().join("history.jsonl"); append_entry(&entry_one, &conversation_id, &config) .await .expect("write first entry"); let first_len = std::fs::metadata(&history_path).expect("metadata").len(); let limit_bytes = first_len + 10; config.history.max_bytes = Some(usize::try_from(limit_bytes).expect("limit should fit into usize")); append_entry(&entry_two, &conversation_id, &config) .await .expect("write second entry"); let contents = std::fs::read_to_string(&history_path).expect("read history"); let entries = contents .lines() .map(|line| serde_json::from_str::<HistoryEntry>(line).expect("parse entry")) .collect::<Vec<HistoryEntry>>(); assert_eq!( entries.len(), 1, "only one entry left because entry_one should be evicted" ); assert_eq!(entries[0].text, entry_two); assert!(std::fs::metadata(&history_path).expect("metadata").len() <= limit_bytes); } #[tokio::test] async fn append_entry_trims_history_to_soft_cap() { let codex_home = TempDir::new().expect("create temp dir"); let mut config = ConfigBuilder::default() .codex_home(codex_home.path().to_path_buf()) .build() .await .expect("load config"); let conversation_id = ConversationId::new(); let short_entry = "a".repeat(200); let long_entry = "b".repeat(400); let history_path = codex_home.path().join("history.jsonl"); append_entry(&short_entry, &conversation_id, &config) .await .expect("write first entry"); let short_entry_len = std::fs::metadata(&history_path).expect("metadata").len(); append_entry(&long_entry, &conversation_id, &config) .await .expect("write second entry"); let two_entry_len = std::fs::metadata(&history_path).expect("metadata").len(); let long_entry_len = two_entry_len .checked_sub(short_entry_len) .expect("second entry length should be larger than first entry length"); config.history.max_bytes = Some( usize::try_from((2 * long_entry_len) + (short_entry_len / 2)) .expect("max bytes should fit into usize"), ); append_entry(&long_entry, &conversation_id, &config) .await .expect("write third entry"); let contents = std::fs::read_to_string(&history_path).expect("read history"); let entries = contents .lines() .map(|line| serde_json::from_str::<HistoryEntry>(line).expect("parse entry")) .collect::<Vec<HistoryEntry>>(); assert_eq!(entries.len(), 1); assert_eq!(entries[0].text, long_entry); let pruned_len = std::fs::metadata(&history_path).expect("metadata").len(); let max_bytes = config .history .max_bytes .expect("max bytes should be configured") as u64; assert!(pruned_len <= max_bytes); let soft_cap_bytes = ((max_bytes as f64) * HISTORY_SOFT_CAP_RATIO) .floor() .clamp(1.0, max_bytes as f64) as u64; let len_without_first = 2 * long_entry_len; assert!( len_without_first <= max_bytes, "dropping only the first entry would satisfy the hard cap" ); assert!( len_without_first > soft_cap_bytes, "soft cap should require more aggressive trimming than the hard cap" ); assert_eq!(pruned_len, long_entry_len); assert!(pruned_len <= soft_cap_bytes.max(long_entry_len)); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/auth.rs
codex-rs/core/src/auth.rs
mod storage; use chrono::Utc; use reqwest::StatusCode; use serde::Deserialize; use serde::Serialize; #[cfg(test)] use serial_test::serial; use std::env; use std::fmt::Debug; use std::io::ErrorKind; use std::path::Path; use std::path::PathBuf; use std::sync::Arc; use std::sync::Mutex; use std::time::Duration; use codex_app_server_protocol::AuthMode; use codex_protocol::config_types::ForcedLoginMethod; pub use crate::auth::storage::AuthCredentialsStoreMode; pub use crate::auth::storage::AuthDotJson; use crate::auth::storage::AuthStorageBackend; use crate::auth::storage::create_auth_storage; use crate::config::Config; use crate::error::RefreshTokenFailedError; use crate::error::RefreshTokenFailedReason; use crate::token_data::KnownPlan as InternalKnownPlan; use crate::token_data::PlanType as InternalPlanType; use crate::token_data::TokenData; use crate::token_data::parse_id_token; use crate::util::try_parse_error_message; use codex_client::CodexHttpClient; use codex_protocol::account::PlanType as AccountPlanType; #[cfg(any(test, feature = "test-support"))] use once_cell::sync::Lazy; use serde_json::Value; #[cfg(any(test, feature = "test-support"))] use tempfile::TempDir; use thiserror::Error; #[derive(Debug, Clone)] pub struct CodexAuth { pub mode: AuthMode, pub(crate) api_key: Option<String>, pub(crate) auth_dot_json: Arc<Mutex<Option<AuthDotJson>>>, storage: Arc<dyn AuthStorageBackend>, pub(crate) client: CodexHttpClient, } impl PartialEq for CodexAuth { fn eq(&self, other: &Self) -> bool { self.mode == other.mode } } // TODO(pakrym): use token exp field to check for expiration instead const TOKEN_REFRESH_INTERVAL: i64 = 8; const REFRESH_TOKEN_EXPIRED_MESSAGE: &str = "Your access token could not be refreshed because your refresh token has expired. Please log out and sign in again."; const REFRESH_TOKEN_REUSED_MESSAGE: &str = "Your access token could not be refreshed because your refresh token was already used. Please log out and sign in again."; const REFRESH_TOKEN_INVALIDATED_MESSAGE: &str = "Your access token could not be refreshed because your refresh token was revoked. Please log out and sign in again."; const REFRESH_TOKEN_UNKNOWN_MESSAGE: &str = "Your access token could not be refreshed. Please log out and sign in again."; const REFRESH_TOKEN_URL: &str = "https://auth.openai.com/oauth/token"; pub const REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR: &str = "CODEX_REFRESH_TOKEN_URL_OVERRIDE"; #[cfg(any(test, feature = "test-support"))] static TEST_AUTH_TEMP_DIRS: Lazy<Mutex<Vec<TempDir>>> = Lazy::new(|| Mutex::new(Vec::new())); #[derive(Debug, Error)] pub enum RefreshTokenError { #[error("{0}")] Permanent(#[from] RefreshTokenFailedError), #[error(transparent)] Transient(#[from] std::io::Error), } impl RefreshTokenError { pub fn failed_reason(&self) -> Option<RefreshTokenFailedReason> { match self { Self::Permanent(error) => Some(error.reason), Self::Transient(_) => None, } } fn other_with_message(message: impl Into<String>) -> Self { Self::Transient(std::io::Error::other(message.into())) } } impl From<RefreshTokenError> for std::io::Error { fn from(err: RefreshTokenError) -> Self { match err { RefreshTokenError::Permanent(failed) => std::io::Error::other(failed), RefreshTokenError::Transient(inner) => inner, } } } impl CodexAuth { pub async fn refresh_token(&self) -> Result<String, RefreshTokenError> { tracing::info!("Refreshing token"); let token_data = self.get_current_token_data().ok_or_else(|| { RefreshTokenError::Transient(std::io::Error::other("Token data is not available.")) })?; let token = token_data.refresh_token; let refresh_response = try_refresh_token(token, &self.client).await?; let updated = update_tokens( &self.storage, refresh_response.id_token, refresh_response.access_token, refresh_response.refresh_token, ) .await .map_err(RefreshTokenError::from)?; if let Ok(mut auth_lock) = self.auth_dot_json.lock() { *auth_lock = Some(updated.clone()); } let access = match updated.tokens { Some(t) => t.access_token, None => { return Err(RefreshTokenError::other_with_message( "Token data is not available after refresh.", )); } }; Ok(access) } /// Loads the available auth information from auth storage. pub fn from_auth_storage( codex_home: &Path, auth_credentials_store_mode: AuthCredentialsStoreMode, ) -> std::io::Result<Option<CodexAuth>> { load_auth(codex_home, false, auth_credentials_store_mode) } pub async fn get_token_data(&self) -> Result<TokenData, std::io::Error> { let auth_dot_json: Option<AuthDotJson> = self.get_current_auth_json(); match auth_dot_json { Some(AuthDotJson { tokens: Some(mut tokens), last_refresh: Some(last_refresh), .. }) => { if last_refresh < Utc::now() - chrono::Duration::days(TOKEN_REFRESH_INTERVAL) { let refresh_result = tokio::time::timeout( Duration::from_secs(60), try_refresh_token(tokens.refresh_token.clone(), &self.client), ) .await; let refresh_response = match refresh_result { Ok(Ok(response)) => response, Ok(Err(err)) => return Err(err.into()), Err(_) => { return Err(std::io::Error::new( ErrorKind::TimedOut, "timed out while refreshing OpenAI API key", )); } }; let updated_auth_dot_json = update_tokens( &self.storage, refresh_response.id_token, refresh_response.access_token, refresh_response.refresh_token, ) .await?; tokens = updated_auth_dot_json .tokens .clone() .ok_or(std::io::Error::other( "Token data is not available after refresh.", ))?; #[expect(clippy::unwrap_used)] let mut auth_lock = self.auth_dot_json.lock().unwrap(); *auth_lock = Some(updated_auth_dot_json); } Ok(tokens) } _ => Err(std::io::Error::other("Token data is not available.")), } } pub async fn get_token(&self) -> Result<String, std::io::Error> { match self.mode { AuthMode::ApiKey => Ok(self.api_key.clone().unwrap_or_default()), AuthMode::ChatGPT => { let id_token = self.get_token_data().await?.access_token; Ok(id_token) } } } pub fn get_account_id(&self) -> Option<String> { self.get_current_token_data().and_then(|t| t.account_id) } pub fn get_account_email(&self) -> Option<String> { self.get_current_token_data().and_then(|t| t.id_token.email) } /// Account-facing plan classification derived from the current token. /// Returns a high-level `AccountPlanType` (e.g., Free/Plus/Pro/Team/…) /// mapped from the ID token's internal plan value. Prefer this when you /// need to make UI or product decisions based on the user's subscription. pub fn account_plan_type(&self) -> Option<AccountPlanType> { let map_known = |kp: &InternalKnownPlan| match kp { InternalKnownPlan::Free => AccountPlanType::Free, InternalKnownPlan::Plus => AccountPlanType::Plus, InternalKnownPlan::Pro => AccountPlanType::Pro, InternalKnownPlan::Team => AccountPlanType::Team, InternalKnownPlan::Business => AccountPlanType::Business, InternalKnownPlan::Enterprise => AccountPlanType::Enterprise, InternalKnownPlan::Edu => AccountPlanType::Edu, }; self.get_current_token_data() .and_then(|t| t.id_token.chatgpt_plan_type) .map(|pt| match pt { InternalPlanType::Known(k) => map_known(&k), InternalPlanType::Unknown(_) => AccountPlanType::Unknown, }) } fn get_current_auth_json(&self) -> Option<AuthDotJson> { #[expect(clippy::unwrap_used)] self.auth_dot_json.lock().unwrap().clone() } fn get_current_token_data(&self) -> Option<TokenData> { self.get_current_auth_json().and_then(|t| t.tokens) } /// Consider this private to integration tests. pub fn create_dummy_chatgpt_auth_for_testing() -> Self { let auth_dot_json = AuthDotJson { openai_api_key: None, tokens: Some(TokenData { id_token: Default::default(), access_token: "Access Token".to_string(), refresh_token: "test".to_string(), account_id: Some("account_id".to_string()), }), last_refresh: Some(Utc::now()), }; let auth_dot_json = Arc::new(Mutex::new(Some(auth_dot_json))); Self { api_key: None, mode: AuthMode::ChatGPT, storage: create_auth_storage(PathBuf::new(), AuthCredentialsStoreMode::File), auth_dot_json, client: crate::default_client::create_client(), } } fn from_api_key_with_client(api_key: &str, client: CodexHttpClient) -> Self { Self { api_key: Some(api_key.to_owned()), mode: AuthMode::ApiKey, storage: create_auth_storage(PathBuf::new(), AuthCredentialsStoreMode::File), auth_dot_json: Arc::new(Mutex::new(None)), client, } } pub fn from_api_key(api_key: &str) -> Self { Self::from_api_key_with_client(api_key, crate::default_client::create_client()) } } pub const OPENAI_API_KEY_ENV_VAR: &str = "OPENAI_API_KEY"; pub const CODEX_API_KEY_ENV_VAR: &str = "CODEX_API_KEY"; pub fn read_openai_api_key_from_env() -> Option<String> { env::var(OPENAI_API_KEY_ENV_VAR) .ok() .map(|value| value.trim().to_string()) .filter(|value| !value.is_empty()) } pub fn read_codex_api_key_from_env() -> Option<String> { env::var(CODEX_API_KEY_ENV_VAR) .ok() .map(|value| value.trim().to_string()) .filter(|value| !value.is_empty()) } /// Delete the auth.json file inside `codex_home` if it exists. Returns `Ok(true)` /// if a file was removed, `Ok(false)` if no auth file was present. pub fn logout( codex_home: &Path, auth_credentials_store_mode: AuthCredentialsStoreMode, ) -> std::io::Result<bool> { let storage = create_auth_storage(codex_home.to_path_buf(), auth_credentials_store_mode); storage.delete() } /// Writes an `auth.json` that contains only the API key. pub fn login_with_api_key( codex_home: &Path, api_key: &str, auth_credentials_store_mode: AuthCredentialsStoreMode, ) -> std::io::Result<()> { let auth_dot_json = AuthDotJson { openai_api_key: Some(api_key.to_string()), tokens: None, last_refresh: None, }; save_auth(codex_home, &auth_dot_json, auth_credentials_store_mode) } /// Persist the provided auth payload using the specified backend. pub fn save_auth( codex_home: &Path, auth: &AuthDotJson, auth_credentials_store_mode: AuthCredentialsStoreMode, ) -> std::io::Result<()> { let storage = create_auth_storage(codex_home.to_path_buf(), auth_credentials_store_mode); storage.save(auth) } /// Load CLI auth data using the configured credential store backend. /// Returns `None` when no credentials are stored. This function is /// provided only for tests. Production code should not directly load /// from the auth.json storage. It should use the AuthManager abstraction /// instead. pub fn load_auth_dot_json( codex_home: &Path, auth_credentials_store_mode: AuthCredentialsStoreMode, ) -> std::io::Result<Option<AuthDotJson>> { let storage = create_auth_storage(codex_home.to_path_buf(), auth_credentials_store_mode); storage.load() } pub async fn enforce_login_restrictions(config: &Config) -> std::io::Result<()> { let Some(auth) = load_auth( &config.codex_home, true, config.cli_auth_credentials_store_mode, )? else { return Ok(()); }; if let Some(required_method) = config.forced_login_method { let method_violation = match (required_method, auth.mode) { (ForcedLoginMethod::Api, AuthMode::ApiKey) => None, (ForcedLoginMethod::Chatgpt, AuthMode::ChatGPT) => None, (ForcedLoginMethod::Api, AuthMode::ChatGPT) => Some( "API key login is required, but ChatGPT is currently being used. Logging out." .to_string(), ), (ForcedLoginMethod::Chatgpt, AuthMode::ApiKey) => Some( "ChatGPT login is required, but an API key is currently being used. Logging out." .to_string(), ), }; if let Some(message) = method_violation { return logout_with_message( &config.codex_home, message, config.cli_auth_credentials_store_mode, ); } } if let Some(expected_account_id) = config.forced_chatgpt_workspace_id.as_deref() { if auth.mode != AuthMode::ChatGPT { return Ok(()); } let token_data = match auth.get_token_data().await { Ok(data) => data, Err(err) => { return logout_with_message( &config.codex_home, format!( "Failed to load ChatGPT credentials while enforcing workspace restrictions: {err}. Logging out." ), config.cli_auth_credentials_store_mode, ); } }; // workspace is the external identifier for account id. let chatgpt_account_id = token_data.id_token.chatgpt_account_id.as_deref(); if chatgpt_account_id != Some(expected_account_id) { let message = match chatgpt_account_id { Some(actual) => format!( "Login is restricted to workspace {expected_account_id}, but current credentials belong to {actual}. Logging out." ), None => format!( "Login is restricted to workspace {expected_account_id}, but current credentials lack a workspace identifier. Logging out." ), }; return logout_with_message( &config.codex_home, message, config.cli_auth_credentials_store_mode, ); } } Ok(()) } fn logout_with_message( codex_home: &Path, message: String, auth_credentials_store_mode: AuthCredentialsStoreMode, ) -> std::io::Result<()> { match logout(codex_home, auth_credentials_store_mode) { Ok(_) => Err(std::io::Error::other(message)), Err(err) => Err(std::io::Error::other(format!( "{message}. Failed to remove auth.json: {err}" ))), } } fn load_auth( codex_home: &Path, enable_codex_api_key_env: bool, auth_credentials_store_mode: AuthCredentialsStoreMode, ) -> std::io::Result<Option<CodexAuth>> { if enable_codex_api_key_env && let Some(api_key) = read_codex_api_key_from_env() { let client = crate::default_client::create_client(); return Ok(Some(CodexAuth::from_api_key_with_client( api_key.as_str(), client, ))); } let storage = create_auth_storage(codex_home.to_path_buf(), auth_credentials_store_mode); let client = crate::default_client::create_client(); let auth_dot_json = match storage.load()? { Some(auth) => auth, None => return Ok(None), }; let AuthDotJson { openai_api_key: auth_json_api_key, tokens, last_refresh, } = auth_dot_json; // Prefer AuthMode.ApiKey if it's set in the auth.json. if let Some(api_key) = &auth_json_api_key { return Ok(Some(CodexAuth::from_api_key_with_client(api_key, client))); } Ok(Some(CodexAuth { api_key: None, mode: AuthMode::ChatGPT, storage: storage.clone(), auth_dot_json: Arc::new(Mutex::new(Some(AuthDotJson { openai_api_key: None, tokens, last_refresh, }))), client, })) } async fn update_tokens( storage: &Arc<dyn AuthStorageBackend>, id_token: Option<String>, access_token: Option<String>, refresh_token: Option<String>, ) -> std::io::Result<AuthDotJson> { let mut auth_dot_json = storage .load()? .ok_or(std::io::Error::other("Token data is not available."))?; let tokens = auth_dot_json.tokens.get_or_insert_with(TokenData::default); if let Some(id_token) = id_token { tokens.id_token = parse_id_token(&id_token).map_err(std::io::Error::other)?; } if let Some(access_token) = access_token { tokens.access_token = access_token; } if let Some(refresh_token) = refresh_token { tokens.refresh_token = refresh_token; } auth_dot_json.last_refresh = Some(Utc::now()); storage.save(&auth_dot_json)?; Ok(auth_dot_json) } async fn try_refresh_token( refresh_token: String, client: &CodexHttpClient, ) -> Result<RefreshResponse, RefreshTokenError> { let refresh_request = RefreshRequest { client_id: CLIENT_ID, grant_type: "refresh_token", refresh_token, scope: "openid profile email", }; let endpoint = refresh_token_endpoint(); // Use shared client factory to include standard headers let response = client .post(endpoint.as_str()) .header("Content-Type", "application/json") .json(&refresh_request) .send() .await .map_err(|err| RefreshTokenError::Transient(std::io::Error::other(err)))?; let status = response.status(); if status.is_success() { let refresh_response = response .json::<RefreshResponse>() .await .map_err(|err| RefreshTokenError::Transient(std::io::Error::other(err)))?; Ok(refresh_response) } else { let body = response.text().await.unwrap_or_default(); if status == StatusCode::UNAUTHORIZED { let failed = classify_refresh_token_failure(&body); Err(RefreshTokenError::Permanent(failed)) } else { let message = try_parse_error_message(&body); Err(RefreshTokenError::Transient(std::io::Error::other( format!("Failed to refresh token: {status}: {message}"), ))) } } } fn classify_refresh_token_failure(body: &str) -> RefreshTokenFailedError { let code = extract_refresh_token_error_code(body); let normalized_code = code.as_deref().map(str::to_ascii_lowercase); let reason = match normalized_code.as_deref() { Some("refresh_token_expired") => RefreshTokenFailedReason::Expired, Some("refresh_token_reused") => RefreshTokenFailedReason::Exhausted, Some("refresh_token_invalidated") => RefreshTokenFailedReason::Revoked, _ => RefreshTokenFailedReason::Other, }; if reason == RefreshTokenFailedReason::Other { tracing::warn!( backend_code = normalized_code.as_deref(), backend_body = body, "Encountered unknown 401 response while refreshing token" ); } let message = match reason { RefreshTokenFailedReason::Expired => REFRESH_TOKEN_EXPIRED_MESSAGE.to_string(), RefreshTokenFailedReason::Exhausted => REFRESH_TOKEN_REUSED_MESSAGE.to_string(), RefreshTokenFailedReason::Revoked => REFRESH_TOKEN_INVALIDATED_MESSAGE.to_string(), RefreshTokenFailedReason::Other => REFRESH_TOKEN_UNKNOWN_MESSAGE.to_string(), }; RefreshTokenFailedError::new(reason, message) } fn extract_refresh_token_error_code(body: &str) -> Option<String> { if body.trim().is_empty() { return None; } let Value::Object(map) = serde_json::from_str::<Value>(body).ok()? else { return None; }; if let Some(error_value) = map.get("error") { match error_value { Value::Object(obj) => { if let Some(code) = obj.get("code").and_then(Value::as_str) { return Some(code.to_string()); } } Value::String(code) => { return Some(code.to_string()); } _ => {} } } map.get("code").and_then(Value::as_str).map(str::to_string) } #[derive(Serialize)] struct RefreshRequest { client_id: &'static str, grant_type: &'static str, refresh_token: String, scope: &'static str, } #[derive(Deserialize, Clone)] struct RefreshResponse { id_token: Option<String>, access_token: Option<String>, refresh_token: Option<String>, } // Shared constant for token refresh (client id used for oauth token refresh flow) pub const CLIENT_ID: &str = "app_EMoamEEZ73f0CkXaXp7hrann"; fn refresh_token_endpoint() -> String { std::env::var(REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR) .unwrap_or_else(|_| REFRESH_TOKEN_URL.to_string()) } use std::sync::RwLock; /// Internal cached auth state. #[derive(Clone, Debug)] struct CachedAuth { auth: Option<CodexAuth>, } #[cfg(test)] mod tests { use super::*; use crate::auth::storage::FileAuthStorage; use crate::auth::storage::get_auth_file; use crate::config::Config; use crate::config::ConfigBuilder; use crate::token_data::IdTokenInfo; use crate::token_data::KnownPlan as InternalKnownPlan; use crate::token_data::PlanType as InternalPlanType; use codex_protocol::account::PlanType as AccountPlanType; use base64::Engine; use codex_protocol::config_types::ForcedLoginMethod; use pretty_assertions::assert_eq; use serde::Serialize; use serde_json::json; use tempfile::tempdir; #[tokio::test] async fn refresh_without_id_token() { let codex_home = tempdir().unwrap(); let fake_jwt = write_auth_file( AuthFileParams { openai_api_key: None, chatgpt_plan_type: "pro".to_string(), chatgpt_account_id: None, }, codex_home.path(), ) .expect("failed to write auth file"); let storage = create_auth_storage( codex_home.path().to_path_buf(), AuthCredentialsStoreMode::File, ); let updated = super::update_tokens( &storage, None, Some("new-access-token".to_string()), Some("new-refresh-token".to_string()), ) .await .expect("update_tokens should succeed"); let tokens = updated.tokens.expect("tokens should exist"); assert_eq!(tokens.id_token.raw_jwt, fake_jwt); assert_eq!(tokens.access_token, "new-access-token"); assert_eq!(tokens.refresh_token, "new-refresh-token"); } #[test] fn login_with_api_key_overwrites_existing_auth_json() { let dir = tempdir().unwrap(); let auth_path = dir.path().join("auth.json"); let stale_auth = json!({ "OPENAI_API_KEY": "sk-old", "tokens": { "id_token": "stale.header.payload", "access_token": "stale-access", "refresh_token": "stale-refresh", "account_id": "stale-acc" } }); std::fs::write( &auth_path, serde_json::to_string_pretty(&stale_auth).unwrap(), ) .unwrap(); super::login_with_api_key(dir.path(), "sk-new", AuthCredentialsStoreMode::File) .expect("login_with_api_key should succeed"); let storage = FileAuthStorage::new(dir.path().to_path_buf()); let auth = storage .try_read_auth_json(&auth_path) .expect("auth.json should parse"); assert_eq!(auth.openai_api_key.as_deref(), Some("sk-new")); assert!(auth.tokens.is_none(), "tokens should be cleared"); } #[test] fn missing_auth_json_returns_none() { let dir = tempdir().unwrap(); let auth = CodexAuth::from_auth_storage(dir.path(), AuthCredentialsStoreMode::File) .expect("call should succeed"); assert_eq!(auth, None); } #[tokio::test] #[serial(codex_api_key)] async fn pro_account_with_no_api_key_uses_chatgpt_auth() { let codex_home = tempdir().unwrap(); let fake_jwt = write_auth_file( AuthFileParams { openai_api_key: None, chatgpt_plan_type: "pro".to_string(), chatgpt_account_id: None, }, codex_home.path(), ) .expect("failed to write auth file"); let CodexAuth { api_key, mode, auth_dot_json, storage: _, .. } = super::load_auth(codex_home.path(), false, AuthCredentialsStoreMode::File) .unwrap() .unwrap(); assert_eq!(None, api_key); assert_eq!(AuthMode::ChatGPT, mode); let guard = auth_dot_json.lock().unwrap(); let auth_dot_json = guard.as_ref().expect("AuthDotJson should exist"); let last_refresh = auth_dot_json .last_refresh .expect("last_refresh should be recorded"); assert_eq!( &AuthDotJson { openai_api_key: None, tokens: Some(TokenData { id_token: IdTokenInfo { email: Some("user@example.com".to_string()), chatgpt_plan_type: Some(InternalPlanType::Known(InternalKnownPlan::Pro)), chatgpt_account_id: None, raw_jwt: fake_jwt, }, access_token: "test-access-token".to_string(), refresh_token: "test-refresh-token".to_string(), account_id: None, }), last_refresh: Some(last_refresh), }, auth_dot_json ); } #[tokio::test] #[serial(codex_api_key)] async fn loads_api_key_from_auth_json() { let dir = tempdir().unwrap(); let auth_file = dir.path().join("auth.json"); std::fs::write( auth_file, r#"{"OPENAI_API_KEY":"sk-test-key","tokens":null,"last_refresh":null}"#, ) .unwrap(); let auth = super::load_auth(dir.path(), false, AuthCredentialsStoreMode::File) .unwrap() .unwrap(); assert_eq!(auth.mode, AuthMode::ApiKey); assert_eq!(auth.api_key, Some("sk-test-key".to_string())); assert!(auth.get_token_data().await.is_err()); } #[test] fn logout_removes_auth_file() -> Result<(), std::io::Error> { let dir = tempdir()?; let auth_dot_json = AuthDotJson { openai_api_key: Some("sk-test-key".to_string()), tokens: None, last_refresh: None, }; super::save_auth(dir.path(), &auth_dot_json, AuthCredentialsStoreMode::File)?; let auth_file = get_auth_file(dir.path()); assert!(auth_file.exists()); assert!(logout(dir.path(), AuthCredentialsStoreMode::File)?); assert!(!auth_file.exists()); Ok(()) } struct AuthFileParams { openai_api_key: Option<String>, chatgpt_plan_type: String, chatgpt_account_id: Option<String>, } fn write_auth_file(params: AuthFileParams, codex_home: &Path) -> std::io::Result<String> { let auth_file = get_auth_file(codex_home); // Create a minimal valid JWT for the id_token field. #[derive(Serialize)] struct Header { alg: &'static str, typ: &'static str, } let header = Header { alg: "none", typ: "JWT", }; let mut auth_payload = serde_json::json!({ "chatgpt_plan_type": params.chatgpt_plan_type, "chatgpt_user_id": "user-12345", "user_id": "user-12345", }); if let Some(chatgpt_account_id) = params.chatgpt_account_id { let org_value = serde_json::Value::String(chatgpt_account_id); auth_payload["chatgpt_account_id"] = org_value; } let payload = serde_json::json!({ "email": "user@example.com", "email_verified": true, "https://api.openai.com/auth": auth_payload, }); let b64 = |b: &[u8]| base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(b); let header_b64 = b64(&serde_json::to_vec(&header)?); let payload_b64 = b64(&serde_json::to_vec(&payload)?); let signature_b64 = b64(b"sig"); let fake_jwt = format!("{header_b64}.{payload_b64}.{signature_b64}"); let auth_json_data = json!({ "OPENAI_API_KEY": params.openai_api_key, "tokens": { "id_token": fake_jwt, "access_token": "test-access-token", "refresh_token": "test-refresh-token" }, "last_refresh": Utc::now(), }); let auth_json = serde_json::to_string_pretty(&auth_json_data)?; std::fs::write(auth_file, auth_json)?; Ok(fake_jwt) } async fn build_config( codex_home: &Path, forced_login_method: Option<ForcedLoginMethod>, forced_chatgpt_workspace_id: Option<String>, ) -> Config { let mut config = ConfigBuilder::default() .codex_home(codex_home.to_path_buf()) .build() .await .expect("config should load"); config.forced_login_method = forced_login_method; config.forced_chatgpt_workspace_id = forced_chatgpt_workspace_id; config } /// Use sparingly. /// TODO (gpeal): replace this with an injectable env var provider. #[cfg(test)] struct EnvVarGuard { key: &'static str, original: Option<std::ffi::OsString>, } #[cfg(test)] impl EnvVarGuard { fn set(key: &'static str, value: &str) -> Self { let original = env::var_os(key); unsafe { env::set_var(key, value); } Self { key, original } } } #[cfg(test)] impl Drop for EnvVarGuard { fn drop(&mut self) { unsafe { match &self.original { Some(value) => env::set_var(self.key, value), None => env::remove_var(self.key), } } } } #[tokio::test] async fn enforce_login_restrictions_logs_out_for_method_mismatch() { let codex_home = tempdir().unwrap(); login_with_api_key(codex_home.path(), "sk-test", AuthCredentialsStoreMode::File) .expect("seed api key"); let config = build_config(codex_home.path(), Some(ForcedLoginMethod::Chatgpt), None).await; let err = super::enforce_login_restrictions(&config) .await .expect_err("expected method mismatch to error"); assert!(err.to_string().contains("ChatGPT login is required")); assert!( !codex_home.path().join("auth.json").exists(), "auth.json should be removed on mismatch" ); } #[tokio::test] #[serial(codex_api_key)] async fn enforce_login_restrictions_logs_out_for_workspace_mismatch() { let codex_home = tempdir().unwrap(); let _jwt = write_auth_file( AuthFileParams { openai_api_key: None, chatgpt_plan_type: "pro".to_string(), chatgpt_account_id: Some("org_another_org".to_string()), }, codex_home.path(), )
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/terminal.rs
codex-rs/core/src/terminal.rs
//! Terminal detection utilities. //! //! This module feeds terminal metadata into OpenTelemetry user-agent logging and into //! terminal-specific configuration choices in the TUI. use std::sync::OnceLock; /// Structured terminal identification data. #[derive(Clone, Debug, Eq, PartialEq)] pub struct TerminalInfo { /// The detected terminal name category. pub name: TerminalName, /// The `TERM_PROGRAM` value when provided by the terminal. pub term_program: Option<String>, /// The terminal version string when available. pub version: Option<String>, /// The `TERM` value when falling back to capability strings. pub term: Option<String>, /// Multiplexer metadata when a terminal multiplexer is active. pub multiplexer: Option<Multiplexer>, } /// Known terminal name categories derived from environment variables. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum TerminalName { /// Apple Terminal (Terminal.app). AppleTerminal, /// Ghostty terminal emulator. Ghostty, /// iTerm2 terminal emulator. Iterm2, /// Warp terminal emulator. WarpTerminal, /// Visual Studio Code integrated terminal. VsCode, /// WezTerm terminal emulator. WezTerm, /// kitty terminal emulator. Kitty, /// Alacritty terminal emulator. Alacritty, /// KDE Konsole terminal emulator. Konsole, /// GNOME Terminal emulator. GnomeTerminal, /// VTE backend terminal. Vte, /// Windows Terminal emulator. WindowsTerminal, /// Unknown or missing terminal identification. Unknown, } /// Detected terminal multiplexer metadata. #[derive(Clone, Debug, Eq, PartialEq)] pub enum Multiplexer { /// tmux terminal multiplexer. Tmux { /// tmux version string when `TERM_PROGRAM=tmux` is available. /// /// This is derived from `TERM_PROGRAM_VERSION`. version: Option<String>, }, /// zellij terminal multiplexer. Zellij {}, } /// tmux client terminal identification captured via `tmux display-message`. /// /// `termtype` corresponds to `#{client_termtype}` and typically reflects the /// underlying terminal program (for example, `ghostty` or `wezterm`) with an /// optional version suffix. `termname` comes from `#{client_termname}` and /// preserves the TERM capability string exposed by the client (for example, /// `xterm-256color`). /// /// This information is only available when running under tmux and lets us /// attribute the session to the underlying terminal rather than to tmux itself. #[derive(Clone, Debug, Default, Eq, PartialEq)] struct TmuxClientInfo { termtype: Option<String>, termname: Option<String>, } impl TerminalInfo { /// Creates terminal metadata from detected fields. fn new( name: TerminalName, term_program: Option<String>, version: Option<String>, term: Option<String>, multiplexer: Option<Multiplexer>, ) -> Self { Self { name, term_program, version, term, multiplexer, } } /// Creates terminal metadata from a `TERM_PROGRAM` match. fn from_term_program( name: TerminalName, term_program: String, version: Option<String>, multiplexer: Option<Multiplexer>, ) -> Self { Self::new(name, Some(term_program), version, None, multiplexer) } /// Creates terminal metadata from a `TERM_PROGRAM` match plus a `TERM` value. fn from_term_program_and_term( name: TerminalName, term_program: String, version: Option<String>, term: Option<String>, multiplexer: Option<Multiplexer>, ) -> Self { Self::new(name, Some(term_program), version, term, multiplexer) } /// Creates terminal metadata from a known terminal name and optional version. fn from_name( name: TerminalName, version: Option<String>, multiplexer: Option<Multiplexer>, ) -> Self { Self::new(name, None, version, None, multiplexer) } /// Creates terminal metadata from a `TERM` capability value. fn from_term(term: String, multiplexer: Option<Multiplexer>) -> Self { Self::new(TerminalName::Unknown, None, None, Some(term), multiplexer) } /// Creates terminal metadata for unknown terminals. fn unknown(multiplexer: Option<Multiplexer>) -> Self { Self::new(TerminalName::Unknown, None, None, None, multiplexer) } /// Formats the terminal info as a User-Agent token. fn user_agent_token(&self) -> String { let raw = if let Some(program) = self.term_program.as_ref() { match self.version.as_ref().filter(|v| !v.is_empty()) { Some(version) => format!("{program}/{version}"), None => program.clone(), } } else if let Some(term) = self.term.as_ref().filter(|value| !value.is_empty()) { term.clone() } else { match self.name { TerminalName::AppleTerminal => { format_terminal_version("Apple_Terminal", &self.version) } TerminalName::Ghostty => format_terminal_version("Ghostty", &self.version), TerminalName::Iterm2 => format_terminal_version("iTerm.app", &self.version), TerminalName::WarpTerminal => { format_terminal_version("WarpTerminal", &self.version) } TerminalName::VsCode => format_terminal_version("vscode", &self.version), TerminalName::WezTerm => format_terminal_version("WezTerm", &self.version), TerminalName::Kitty => "kitty".to_string(), TerminalName::Alacritty => "Alacritty".to_string(), TerminalName::Konsole => format_terminal_version("Konsole", &self.version), TerminalName::GnomeTerminal => "gnome-terminal".to_string(), TerminalName::Vte => format_terminal_version("VTE", &self.version), TerminalName::WindowsTerminal => "WindowsTerminal".to_string(), TerminalName::Unknown => "unknown".to_string(), } }; sanitize_header_value(raw) } } static TERMINAL_INFO: OnceLock<TerminalInfo> = OnceLock::new(); /// Environment variable access used by terminal detection. /// /// This trait exists to allow faking the environment in tests. trait Environment { /// Returns an environment variable when set. fn var(&self, name: &str) -> Option<String>; /// Returns whether an environment variable is set. fn has(&self, name: &str) -> bool { self.var(name).is_some() } /// Returns a non-empty environment variable. fn var_non_empty(&self, name: &str) -> Option<String> { self.var(name).and_then(none_if_whitespace) } /// Returns whether an environment variable is set and non-empty. fn has_non_empty(&self, name: &str) -> bool { self.var_non_empty(name).is_some() } /// Returns tmux client details when available. fn tmux_client_info(&self) -> TmuxClientInfo; } /// Reads environment variables from the running process. struct ProcessEnvironment; impl Environment for ProcessEnvironment { fn var(&self, name: &str) -> Option<String> { match std::env::var(name) { Ok(value) => Some(value), Err(std::env::VarError::NotPresent) => None, Err(std::env::VarError::NotUnicode(_)) => { tracing::warn!("failed to read env var {name}: value not valid UTF-8"); None } } } fn tmux_client_info(&self) -> TmuxClientInfo { tmux_client_info() } } /// Returns a sanitized terminal identifier for User-Agent strings. pub fn user_agent() -> String { terminal_info().user_agent_token() } /// Returns structured terminal metadata for the current process. pub fn terminal_info() -> TerminalInfo { TERMINAL_INFO .get_or_init(|| detect_terminal_info_from_env(&ProcessEnvironment)) .clone() } /// Detects structured terminal metadata from an injectable environment. /// /// Detection order favors explicit identifiers before falling back to capability strings: /// - If `TERM_PROGRAM=tmux`, the tmux client term type/name are used instead. The client term /// type is split on whitespace to extract a program name plus optional version (for example, /// `ghostty 1.2.3`), while the client term name becomes the `TERM` capability string. /// - Otherwise, `TERM_PROGRAM` (plus `TERM_PROGRAM_VERSION`) drives the detected terminal name. /// - Next, terminal-specific variables (WEZTERM, iTerm2, Apple Terminal, kitty, etc.) are checked. /// - Finally, `TERM` is used as the capability fallback with `TerminalName::Unknown`. /// /// tmux client term info is only consulted when a tmux multiplexer is detected, and it is /// derived from `tmux display-message` to surface the underlying terminal program instead of /// reporting tmux itself. fn detect_terminal_info_from_env(env: &dyn Environment) -> TerminalInfo { let multiplexer = detect_multiplexer(env); if let Some(term_program) = env.var_non_empty("TERM_PROGRAM") { if is_tmux_term_program(&term_program) && matches!(multiplexer, Some(Multiplexer::Tmux { .. })) && let Some(terminal) = terminal_from_tmux_client_info(env.tmux_client_info(), multiplexer.clone()) { return terminal; } let version = env.var_non_empty("TERM_PROGRAM_VERSION"); let name = terminal_name_from_term_program(&term_program).unwrap_or(TerminalName::Unknown); return TerminalInfo::from_term_program(name, term_program, version, multiplexer); } if env.has("WEZTERM_VERSION") { let version = env.var_non_empty("WEZTERM_VERSION"); return TerminalInfo::from_name(TerminalName::WezTerm, version, multiplexer); } if env.has("ITERM_SESSION_ID") || env.has("ITERM_PROFILE") || env.has("ITERM_PROFILE_NAME") { return TerminalInfo::from_name(TerminalName::Iterm2, None, multiplexer); } if env.has("TERM_SESSION_ID") { return TerminalInfo::from_name(TerminalName::AppleTerminal, None, multiplexer); } if env.has("KITTY_WINDOW_ID") || env .var("TERM") .map(|term| term.contains("kitty")) .unwrap_or(false) { return TerminalInfo::from_name(TerminalName::Kitty, None, multiplexer); } if env.has("ALACRITTY_SOCKET") || env .var("TERM") .map(|term| term == "alacritty") .unwrap_or(false) { return TerminalInfo::from_name(TerminalName::Alacritty, None, multiplexer); } if env.has("KONSOLE_VERSION") { let version = env.var_non_empty("KONSOLE_VERSION"); return TerminalInfo::from_name(TerminalName::Konsole, version, multiplexer); } if env.has("GNOME_TERMINAL_SCREEN") { return TerminalInfo::from_name(TerminalName::GnomeTerminal, None, multiplexer); } if env.has("VTE_VERSION") { let version = env.var_non_empty("VTE_VERSION"); return TerminalInfo::from_name(TerminalName::Vte, version, multiplexer); } if env.has("WT_SESSION") { return TerminalInfo::from_name(TerminalName::WindowsTerminal, None, multiplexer); } if let Some(term) = env.var_non_empty("TERM") { return TerminalInfo::from_term(term, multiplexer); } TerminalInfo::unknown(multiplexer) } fn detect_multiplexer(env: &dyn Environment) -> Option<Multiplexer> { if env.has_non_empty("TMUX") || env.has_non_empty("TMUX_PANE") { return Some(Multiplexer::Tmux { version: tmux_version_from_env(env), }); } if env.has_non_empty("ZELLIJ") || env.has_non_empty("ZELLIJ_SESSION_NAME") || env.has_non_empty("ZELLIJ_VERSION") { return Some(Multiplexer::Zellij {}); } None } fn is_tmux_term_program(value: &str) -> bool { value.eq_ignore_ascii_case("tmux") } fn terminal_from_tmux_client_info( client_info: TmuxClientInfo, multiplexer: Option<Multiplexer>, ) -> Option<TerminalInfo> { let termtype = client_info.termtype.and_then(none_if_whitespace); let termname = client_info.termname.and_then(none_if_whitespace); if let Some(termtype) = termtype.as_ref() { let (program, version) = split_term_program_and_version(termtype); let name = terminal_name_from_term_program(&program).unwrap_or(TerminalName::Unknown); return Some(TerminalInfo::from_term_program_and_term( name, program, version, termname, multiplexer, )); } termname .as_ref() .map(|termname| TerminalInfo::from_term(termname.to_string(), multiplexer)) } fn tmux_version_from_env(env: &dyn Environment) -> Option<String> { let term_program = env.var("TERM_PROGRAM")?; if !is_tmux_term_program(&term_program) { return None; } env.var_non_empty("TERM_PROGRAM_VERSION") } fn split_term_program_and_version(value: &str) -> (String, Option<String>) { let mut parts = value.split_whitespace(); let program = parts.next().unwrap_or_default().to_string(); let version = parts.next().map(ToString::to_string); (program, version) } fn tmux_client_info() -> TmuxClientInfo { let termtype = tmux_display_message("#{client_termtype}"); let termname = tmux_display_message("#{client_termname}"); TmuxClientInfo { termtype, termname } } fn tmux_display_message(format: &str) -> Option<String> { let output = std::process::Command::new("tmux") .args(["display-message", "-p", format]) .output() .ok()?; if !output.status.success() { return None; } let value = String::from_utf8(output.stdout).ok()?; none_if_whitespace(value.trim().to_string()) } /// Sanitizes a terminal token for use in User-Agent headers. /// /// Invalid header characters are replaced with underscores. fn sanitize_header_value(value: String) -> String { value.replace(|c| !is_valid_header_value_char(c), "_") } /// Returns whether a character is allowed in User-Agent header values. fn is_valid_header_value_char(c: char) -> bool { c.is_ascii_alphanumeric() || c == '-' || c == '_' || c == '.' || c == '/' } fn terminal_name_from_term_program(value: &str) -> Option<TerminalName> { let normalized: String = value .trim() .chars() .filter(|c| !matches!(c, ' ' | '-' | '_' | '.')) .map(|c| c.to_ascii_lowercase()) .collect(); match normalized.as_str() { "appleterminal" => Some(TerminalName::AppleTerminal), "ghostty" => Some(TerminalName::Ghostty), "iterm" | "iterm2" | "itermapp" => Some(TerminalName::Iterm2), "warp" | "warpterminal" => Some(TerminalName::WarpTerminal), "vscode" => Some(TerminalName::VsCode), "wezterm" => Some(TerminalName::WezTerm), "kitty" => Some(TerminalName::Kitty), "alacritty" => Some(TerminalName::Alacritty), "konsole" => Some(TerminalName::Konsole), "gnometerminal" => Some(TerminalName::GnomeTerminal), "vte" => Some(TerminalName::Vte), "windowsterminal" => Some(TerminalName::WindowsTerminal), _ => None, } } fn format_terminal_version(name: &str, version: &Option<String>) -> String { match version.as_ref().filter(|value| !value.is_empty()) { Some(version) => format!("{name}/{version}"), None => name.to_string(), } } fn none_if_whitespace(value: String) -> Option<String> { (!value.trim().is_empty()).then_some(value) } #[cfg(test)] mod tests { use super::*; use pretty_assertions::assert_eq; use std::collections::HashMap; struct FakeEnvironment { vars: HashMap<String, String>, tmux_client_info: TmuxClientInfo, } impl FakeEnvironment { fn new() -> Self { Self { vars: HashMap::new(), tmux_client_info: TmuxClientInfo::default(), } } fn with_var(mut self, key: &str, value: &str) -> Self { self.vars.insert(key.to_string(), value.to_string()); self } fn with_tmux_client_info(mut self, termtype: Option<&str>, termname: Option<&str>) -> Self { self.tmux_client_info = TmuxClientInfo { termtype: termtype.map(ToString::to_string), termname: termname.map(ToString::to_string), }; self } } impl Environment for FakeEnvironment { fn var(&self, name: &str) -> Option<String> { self.vars.get(name).cloned() } fn tmux_client_info(&self) -> TmuxClientInfo { self.tmux_client_info.clone() } } fn terminal_info( name: TerminalName, term_program: Option<&str>, version: Option<&str>, term: Option<&str>, multiplexer: Option<Multiplexer>, ) -> TerminalInfo { TerminalInfo { name, term_program: term_program.map(ToString::to_string), version: version.map(ToString::to_string), term: term.map(ToString::to_string), multiplexer, } } #[test] fn detects_term_program() { let env = FakeEnvironment::new() .with_var("TERM_PROGRAM", "iTerm.app") .with_var("TERM_PROGRAM_VERSION", "3.5.0") .with_var("WEZTERM_VERSION", "2024.2"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info( TerminalName::Iterm2, Some("iTerm.app"), Some("3.5.0"), None, None, ), "term_program_with_version_info" ); assert_eq!( terminal.user_agent_token(), "iTerm.app/3.5.0", "term_program_with_version_user_agent" ); let env = FakeEnvironment::new() .with_var("TERM_PROGRAM", "iTerm.app") .with_var("TERM_PROGRAM_VERSION", ""); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info(TerminalName::Iterm2, Some("iTerm.app"), None, None, None), "term_program_without_version_info" ); assert_eq!( terminal.user_agent_token(), "iTerm.app", "term_program_without_version_user_agent" ); let env = FakeEnvironment::new() .with_var("TERM_PROGRAM", "iTerm.app") .with_var("WEZTERM_VERSION", "2024.2"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info(TerminalName::Iterm2, Some("iTerm.app"), None, None, None), "term_program_overrides_wezterm_info" ); assert_eq!( terminal.user_agent_token(), "iTerm.app", "term_program_overrides_wezterm_user_agent" ); } #[test] fn detects_iterm2() { let env = FakeEnvironment::new().with_var("ITERM_SESSION_ID", "w0t1p0"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info(TerminalName::Iterm2, None, None, None, None), "iterm_session_id_info" ); assert_eq!( terminal.user_agent_token(), "iTerm.app", "iterm_session_id_user_agent" ); } #[test] fn detects_apple_terminal() { let env = FakeEnvironment::new().with_var("TERM_PROGRAM", "Apple_Terminal"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info( TerminalName::AppleTerminal, Some("Apple_Terminal"), None, None, None, ), "apple_term_program_info" ); assert_eq!( terminal.user_agent_token(), "Apple_Terminal", "apple_term_program_user_agent" ); let env = FakeEnvironment::new().with_var("TERM_SESSION_ID", "A1B2C3"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info(TerminalName::AppleTerminal, None, None, None, None), "apple_term_session_id_info" ); assert_eq!( terminal.user_agent_token(), "Apple_Terminal", "apple_term_session_id_user_agent" ); } #[test] fn detects_ghostty() { let env = FakeEnvironment::new().with_var("TERM_PROGRAM", "Ghostty"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info(TerminalName::Ghostty, Some("Ghostty"), None, None, None), "ghostty_term_program_info" ); assert_eq!( terminal.user_agent_token(), "Ghostty", "ghostty_term_program_user_agent" ); } #[test] fn detects_vscode() { let env = FakeEnvironment::new() .with_var("TERM_PROGRAM", "vscode") .with_var("TERM_PROGRAM_VERSION", "1.86.0"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info( TerminalName::VsCode, Some("vscode"), Some("1.86.0"), None, None ), "vscode_term_program_info" ); assert_eq!( terminal.user_agent_token(), "vscode/1.86.0", "vscode_term_program_user_agent" ); } #[test] fn detects_warp_terminal() { let env = FakeEnvironment::new() .with_var("TERM_PROGRAM", "WarpTerminal") .with_var("TERM_PROGRAM_VERSION", "v0.2025.12.10.08.12.stable_03"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info( TerminalName::WarpTerminal, Some("WarpTerminal"), Some("v0.2025.12.10.08.12.stable_03"), None, None, ), "warp_term_program_info" ); assert_eq!( terminal.user_agent_token(), "WarpTerminal/v0.2025.12.10.08.12.stable_03", "warp_term_program_user_agent" ); } #[test] fn detects_tmux_multiplexer() { let env = FakeEnvironment::new() .with_var("TMUX", "/tmp/tmux-1000/default,123,0") .with_var("TERM_PROGRAM", "tmux") .with_tmux_client_info(Some("xterm-256color"), Some("screen-256color")); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info( TerminalName::Unknown, Some("xterm-256color"), None, Some("screen-256color"), Some(Multiplexer::Tmux { version: None }), ), "tmux_multiplexer_info" ); assert_eq!( terminal.user_agent_token(), "xterm-256color", "tmux_multiplexer_user_agent" ); } #[test] fn detects_zellij_multiplexer() { let env = FakeEnvironment::new().with_var("ZELLIJ", "1"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, TerminalInfo { name: TerminalName::Unknown, term_program: None, version: None, term: None, multiplexer: Some(Multiplexer::Zellij {}), }, "zellij_multiplexer" ); } #[test] fn detects_tmux_client_termtype() { let env = FakeEnvironment::new() .with_var("TMUX", "/tmp/tmux-1000/default,123,0") .with_var("TERM_PROGRAM", "tmux") .with_tmux_client_info(Some("WezTerm"), None); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info( TerminalName::WezTerm, Some("WezTerm"), None, None, Some(Multiplexer::Tmux { version: None }), ), "tmux_client_termtype_info" ); assert_eq!( terminal.user_agent_token(), "WezTerm", "tmux_client_termtype_user_agent" ); } #[test] fn detects_tmux_client_termname() { let env = FakeEnvironment::new() .with_var("TMUX", "/tmp/tmux-1000/default,123,0") .with_var("TERM_PROGRAM", "tmux") .with_tmux_client_info(None, Some("xterm-256color")); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info( TerminalName::Unknown, None, None, Some("xterm-256color"), Some(Multiplexer::Tmux { version: None }) ), "tmux_client_termname_info" ); assert_eq!( terminal.user_agent_token(), "xterm-256color", "tmux_client_termname_user_agent" ); } #[test] fn detects_tmux_term_program_uses_client_termtype() { let env = FakeEnvironment::new() .with_var("TMUX", "/tmp/tmux-1000/default,123,0") .with_var("TERM_PROGRAM", "tmux") .with_var("TERM_PROGRAM_VERSION", "3.6a") .with_tmux_client_info(Some("ghostty 1.2.3"), Some("xterm-ghostty")); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info( TerminalName::Ghostty, Some("ghostty"), Some("1.2.3"), Some("xterm-ghostty"), Some(Multiplexer::Tmux { version: Some("3.6a".to_string()), }), ), "tmux_term_program_client_termtype_info" ); assert_eq!( terminal.user_agent_token(), "ghostty/1.2.3", "tmux_term_program_client_termtype_user_agent" ); } #[test] fn detects_wezterm() { let env = FakeEnvironment::new().with_var("WEZTERM_VERSION", "2024.2"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info(TerminalName::WezTerm, None, Some("2024.2"), None, None), "wezterm_version_info" ); assert_eq!( terminal.user_agent_token(), "WezTerm/2024.2", "wezterm_version_user_agent" ); let env = FakeEnvironment::new() .with_var("TERM_PROGRAM", "WezTerm") .with_var("TERM_PROGRAM_VERSION", "2024.2"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info( TerminalName::WezTerm, Some("WezTerm"), Some("2024.2"), None, None ), "wezterm_term_program_info" ); assert_eq!( terminal.user_agent_token(), "WezTerm/2024.2", "wezterm_term_program_user_agent" ); let env = FakeEnvironment::new().with_var("WEZTERM_VERSION", ""); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info(TerminalName::WezTerm, None, None, None, None), "wezterm_empty_info" ); assert_eq!( terminal.user_agent_token(), "WezTerm", "wezterm_empty_user_agent" ); } #[test] fn detects_kitty() { let env = FakeEnvironment::new().with_var("KITTY_WINDOW_ID", "1"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info(TerminalName::Kitty, None, None, None, None), "kitty_window_id_info" ); assert_eq!( terminal.user_agent_token(), "kitty", "kitty_window_id_user_agent" ); let env = FakeEnvironment::new() .with_var("TERM_PROGRAM", "kitty") .with_var("TERM_PROGRAM_VERSION", "0.30.1"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info( TerminalName::Kitty, Some("kitty"), Some("0.30.1"), None, None ), "kitty_term_program_info" ); assert_eq!( terminal.user_agent_token(), "kitty/0.30.1", "kitty_term_program_user_agent" ); let env = FakeEnvironment::new() .with_var("TERM", "xterm-kitty") .with_var("ALACRITTY_SOCKET", "/tmp/alacritty"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info(TerminalName::Kitty, None, None, None, None), "kitty_term_over_alacritty_info" ); assert_eq!( terminal.user_agent_token(), "kitty", "kitty_term_over_alacritty_user_agent" ); } #[test] fn detects_alacritty() { let env = FakeEnvironment::new().with_var("ALACRITTY_SOCKET", "/tmp/alacritty"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info(TerminalName::Alacritty, None, None, None, None), "alacritty_socket_info" ); assert_eq!( terminal.user_agent_token(), "Alacritty", "alacritty_socket_user_agent" ); let env = FakeEnvironment::new() .with_var("TERM_PROGRAM", "Alacritty") .with_var("TERM_PROGRAM_VERSION", "0.13.2"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info( TerminalName::Alacritty, Some("Alacritty"), Some("0.13.2"), None, None, ), "alacritty_term_program_info" ); assert_eq!( terminal.user_agent_token(), "Alacritty/0.13.2", "alacritty_term_program_user_agent" ); let env = FakeEnvironment::new().with_var("TERM", "alacritty"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info(TerminalName::Alacritty, None, None, None, None), "alacritty_term_info" ); assert_eq!( terminal.user_agent_token(), "Alacritty", "alacritty_term_user_agent" ); } #[test] fn detects_konsole() { let env = FakeEnvironment::new().with_var("KONSOLE_VERSION", "230800"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info(TerminalName::Konsole, None, Some("230800"), None, None), "konsole_version_info" ); assert_eq!( terminal.user_agent_token(), "Konsole/230800", "konsole_version_user_agent" ); let env = FakeEnvironment::new() .with_var("TERM_PROGRAM", "Konsole") .with_var("TERM_PROGRAM_VERSION", "230800"); let terminal = detect_terminal_info_from_env(&env); assert_eq!( terminal, terminal_info( TerminalName::Konsole, Some("Konsole"), Some("230800"), None, None ), "konsole_term_program_info" ); assert_eq!( terminal.user_agent_token(), "Konsole/230800", "konsole_term_program_user_agent" ); let env = FakeEnvironment::new().with_var("KONSOLE_VERSION", ""); let terminal = detect_terminal_info_from_env(&env);
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/path_utils.rs
codex-rs/core/src/path_utils.rs
use std::path::Path; use std::path::PathBuf; use crate::env; pub fn normalize_for_path_comparison(path: impl AsRef<Path>) -> std::io::Result<PathBuf> { let canonical = path.as_ref().canonicalize()?; Ok(normalize_for_wsl(canonical)) } fn normalize_for_wsl(path: PathBuf) -> PathBuf { normalize_for_wsl_with_flag(path, env::is_wsl()) } fn normalize_for_wsl_with_flag(path: PathBuf, is_wsl: bool) -> PathBuf { if !is_wsl { return path; } if !is_wsl_case_insensitive_path(&path) { return path; } lower_ascii_path(path) } fn is_wsl_case_insensitive_path(path: &Path) -> bool { #[cfg(target_os = "linux")] { use std::os::unix::ffi::OsStrExt; use std::path::Component; let mut components = path.components(); let Some(Component::RootDir) = components.next() else { return false; }; let Some(Component::Normal(mnt)) = components.next() else { return false; }; if !ascii_eq_ignore_case(mnt.as_bytes(), b"mnt") { return false; } let Some(Component::Normal(drive)) = components.next() else { return false; }; let drive_bytes = drive.as_bytes(); drive_bytes.len() == 1 && drive_bytes[0].is_ascii_alphabetic() } #[cfg(not(target_os = "linux"))] { let _ = path; false } } #[cfg(target_os = "linux")] fn ascii_eq_ignore_case(left: &[u8], right: &[u8]) -> bool { left.len() == right.len() && left .iter() .zip(right) .all(|(lhs, rhs)| lhs.to_ascii_lowercase() == *rhs) } #[cfg(target_os = "linux")] fn lower_ascii_path(path: PathBuf) -> PathBuf { use std::ffi::OsString; use std::os::unix::ffi::OsStrExt; use std::os::unix::ffi::OsStringExt; // WSL mounts Windows drives under /mnt/<drive>, which are case-insensitive. let bytes = path.as_os_str().as_bytes(); let mut lowered = Vec::with_capacity(bytes.len()); for byte in bytes { lowered.push(byte.to_ascii_lowercase()); } PathBuf::from(OsString::from_vec(lowered)) } #[cfg(not(target_os = "linux"))] fn lower_ascii_path(path: PathBuf) -> PathBuf { path } #[cfg(test)] mod tests { #[cfg(target_os = "linux")] mod wsl { use super::super::normalize_for_wsl_with_flag; use pretty_assertions::assert_eq; use std::path::PathBuf; #[test] fn wsl_mnt_drive_paths_lowercase() { let normalized = normalize_for_wsl_with_flag(PathBuf::from("/mnt/C/Users/Dev"), true); assert_eq!(normalized, PathBuf::from("/mnt/c/users/dev")); } #[test] fn wsl_non_drive_paths_unchanged() { let path = PathBuf::from("/mnt/cc/Users/Dev"); let normalized = normalize_for_wsl_with_flag(path.clone(), true); assert_eq!(normalized, path); } #[test] fn wsl_non_mnt_paths_unchanged() { let path = PathBuf::from("/home/Dev"); let normalized = normalize_for_wsl_with_flag(path.clone(), true); assert_eq!(normalized, path); } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/codex_conversation.rs
codex-rs/core/src/codex_conversation.rs
use crate::codex::Codex; use crate::error::Result as CodexResult; use crate::protocol::Event; use crate::protocol::Op; use crate::protocol::Submission; use std::path::PathBuf; pub struct CodexConversation { codex: Codex, rollout_path: PathBuf, } /// Conduit for the bidirectional stream of messages that compose a conversation /// in Codex. impl CodexConversation { pub(crate) fn new(codex: Codex, rollout_path: PathBuf) -> Self { Self { codex, rollout_path, } } pub async fn submit(&self, op: Op) -> CodexResult<String> { self.codex.submit(op).await } /// Use sparingly: this is intended to be removed soon. pub async fn submit_with_id(&self, sub: Submission) -> CodexResult<()> { self.codex.submit_with_id(sub).await } pub async fn next_event(&self) -> CodexResult<Event> { self.codex.next_event().await } pub fn rollout_path(&self) -> PathBuf { self.rollout_path.clone() } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/review_format.rs
codex-rs/core/src/review_format.rs
use crate::protocol::ReviewFinding; use crate::protocol::ReviewOutputEvent; // Note: We keep this module UI-agnostic. It returns plain strings that // higher layers (e.g., TUI) may style as needed. fn format_location(item: &ReviewFinding) -> String { let path = item.code_location.absolute_file_path.display(); let start = item.code_location.line_range.start; let end = item.code_location.line_range.end; format!("{path}:{start}-{end}") } const REVIEW_FALLBACK_MESSAGE: &str = "Reviewer failed to output a response."; /// Format a full review findings block as plain text lines. /// /// - When `selection` is `Some`, each item line includes a checkbox marker: /// "[x]" for selected items and "[ ]" for unselected. Missing indices /// default to selected. /// - When `selection` is `None`, the marker is omitted and a simple bullet is /// rendered ("- Title — path:start-end"). pub fn format_review_findings_block( findings: &[ReviewFinding], selection: Option<&[bool]>, ) -> String { let mut lines: Vec<String> = Vec::new(); lines.push(String::new()); // Header if findings.len() > 1 { lines.push("Full review comments:".to_string()); } else { lines.push("Review comment:".to_string()); } for (idx, item) in findings.iter().enumerate() { lines.push(String::new()); let title = &item.title; let location = format_location(item); if let Some(flags) = selection { // Default to selected if index is out of bounds. let checked = flags.get(idx).copied().unwrap_or(true); let marker = if checked { "[x]" } else { "[ ]" }; lines.push(format!("- {marker} {title} — {location}")); } else { lines.push(format!("- {title} — {location}")); } for body_line in item.body.lines() { lines.push(format!(" {body_line}")); } } lines.join("\n") } /// Render a human-readable review summary suitable for a user-facing message. /// /// Returns either the explanation, the formatted findings block, or both /// separated by a blank line. If neither is present, emits a fallback message. pub fn render_review_output_text(output: &ReviewOutputEvent) -> String { let mut sections = Vec::new(); let explanation = output.overall_explanation.trim(); if !explanation.is_empty() { sections.push(explanation.to_string()); } if !output.findings.is_empty() { let findings = format_review_findings_block(&output.findings, None); let trimmed = findings.trim(); if !trimmed.is_empty() { sections.push(trimmed.to_string()); } } if sections.is_empty() { REVIEW_FALLBACK_MESSAGE.to_string() } else { sections.join("\n\n") } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/codex.rs
codex-rs/core/src/codex.rs
use std::collections::HashMap; use std::fmt::Debug; use std::path::PathBuf; use std::sync::Arc; use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; use crate::AuthManager; use crate::SandboxState; use crate::client_common::REVIEW_PROMPT; use crate::compact; use crate::compact::run_inline_auto_compact_task; use crate::compact::should_use_remote_compact_task; use crate::compact_remote::run_inline_remote_auto_compact_task; use crate::exec_policy::ExecPolicyManager; use crate::features::Feature; use crate::features::Features; use crate::models_manager::manager::ModelsManager; use crate::models_manager::model_family::ModelFamily; use crate::parse_command::parse_command; use crate::parse_turn_item; use crate::stream_events_utils::HandleOutputCtx; use crate::stream_events_utils::handle_non_tool_response_item; use crate::stream_events_utils::handle_output_item_done; use crate::terminal; use crate::truncate::TruncationPolicy; use crate::user_notification::UserNotifier; use crate::util::error_or_panic; use async_channel::Receiver; use async_channel::Sender; use codex_protocol::ConversationId; use codex_protocol::approvals::ExecPolicyAmendment; use codex_protocol::items::TurnItem; use codex_protocol::protocol::FileChange; use codex_protocol::protocol::HasLegacyEvent; use codex_protocol::protocol::ItemCompletedEvent; use codex_protocol::protocol::ItemStartedEvent; use codex_protocol::protocol::RawResponseItemEvent; use codex_protocol::protocol::ReviewRequest; use codex_protocol::protocol::RolloutItem; use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::TaskStartedEvent; use codex_protocol::protocol::TurnAbortReason; use codex_protocol::protocol::TurnContextItem; use codex_rmcp_client::ElicitationResponse; use futures::future::BoxFuture; use futures::prelude::*; use futures::stream::FuturesOrdered; use mcp_types::CallToolResult; use mcp_types::ListResourceTemplatesRequestParams; use mcp_types::ListResourceTemplatesResult; use mcp_types::ListResourcesRequestParams; use mcp_types::ListResourcesResult; use mcp_types::ReadResourceRequestParams; use mcp_types::ReadResourceResult; use mcp_types::RequestId; use serde_json; use serde_json::Value; use tokio::sync::Mutex; use tokio::sync::RwLock; use tokio::sync::oneshot; use tokio_util::sync::CancellationToken; use tracing::Instrument; use tracing::debug; use tracing::error; use tracing::field; use tracing::info; use tracing::instrument; use tracing::trace_span; use tracing::warn; use crate::ModelProviderInfo; use crate::WireApi; use crate::client::ModelClient; use crate::client_common::Prompt; use crate::client_common::ResponseEvent; use crate::compact::collect_user_messages; use crate::config::Config; use crate::config::Constrained; use crate::config::ConstraintResult; use crate::config::GhostSnapshotConfig; use crate::config::types::ShellEnvironmentPolicy; use crate::context_manager::ContextManager; use crate::environment_context::EnvironmentContext; use crate::error::CodexErr; use crate::error::Result as CodexResult; #[cfg(test)] use crate::exec::StreamOutput; use crate::exec_policy::ExecPolicyUpdateError; use crate::feedback_tags; use crate::mcp::auth::compute_auth_statuses; use crate::mcp_connection_manager::McpConnectionManager; use crate::model_provider_info::CHAT_WIRE_API_DEPRECATION_SUMMARY; use crate::project_doc::get_user_instructions; use crate::protocol::AgentMessageContentDeltaEvent; use crate::protocol::AgentReasoningSectionBreakEvent; use crate::protocol::ApplyPatchApprovalRequestEvent; use crate::protocol::AskForApproval; use crate::protocol::BackgroundEventEvent; use crate::protocol::DeprecationNoticeEvent; use crate::protocol::ErrorEvent; use crate::protocol::Event; use crate::protocol::EventMsg; use crate::protocol::ExecApprovalRequestEvent; use crate::protocol::Op; use crate::protocol::RateLimitSnapshot; use crate::protocol::ReasoningContentDeltaEvent; use crate::protocol::ReasoningRawContentDeltaEvent; use crate::protocol::ReviewDecision; use crate::protocol::SandboxPolicy; use crate::protocol::SessionConfiguredEvent; use crate::protocol::SkillErrorInfo; use crate::protocol::SkillMetadata as ProtocolSkillMetadata; use crate::protocol::StreamErrorEvent; use crate::protocol::Submission; use crate::protocol::TokenCountEvent; use crate::protocol::TokenUsage; use crate::protocol::TokenUsageInfo; use crate::protocol::TurnDiffEvent; use crate::protocol::WarningEvent; use crate::rollout::RolloutRecorder; use crate::rollout::RolloutRecorderParams; use crate::rollout::map_session_init_error; use crate::shell; use crate::shell_snapshot::ShellSnapshot; use crate::skills::SkillError; use crate::skills::SkillInjections; use crate::skills::SkillMetadata; use crate::skills::SkillsManager; use crate::skills::build_skill_injections; use crate::state::ActiveTurn; use crate::state::SessionServices; use crate::state::SessionState; use crate::tasks::GhostSnapshotTask; use crate::tasks::ReviewTask; use crate::tasks::SessionTask; use crate::tasks::SessionTaskContext; use crate::tools::ToolRouter; use crate::tools::context::SharedTurnDiffTracker; use crate::tools::parallel::ToolCallRuntime; use crate::tools::sandboxing::ApprovalStore; use crate::tools::spec::ToolsConfig; use crate::tools::spec::ToolsConfigParams; use crate::turn_diff_tracker::TurnDiffTracker; use crate::unified_exec::UnifiedExecSessionManager; use crate::user_instructions::DeveloperInstructions; use crate::user_instructions::UserInstructions; use crate::user_notification::UserNotification; use crate::util::backoff; use codex_async_utils::OrCancelExt; use codex_otel::otel_manager::OtelManager; use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseInputItem; use codex_protocol::models::ResponseItem; use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig; use codex_protocol::protocol::CodexErrorInfo; use codex_protocol::protocol::InitialHistory; use codex_protocol::user_input::UserInput; use codex_utils_readiness::Readiness; use codex_utils_readiness::ReadinessFlag; /// The high-level interface to the Codex system. /// It operates as a queue pair where you send submissions and receive events. pub struct Codex { pub(crate) next_id: AtomicU64, pub(crate) tx_sub: Sender<Submission>, pub(crate) rx_event: Receiver<Event>, } /// Wrapper returned by [`Codex::spawn`] containing the spawned [`Codex`], /// the submission id for the initial `ConfigureSession` request and the /// unique session id. pub struct CodexSpawnOk { pub codex: Codex, pub conversation_id: ConversationId, } pub(crate) const INITIAL_SUBMIT_ID: &str = ""; pub(crate) const SUBMISSION_CHANNEL_CAPACITY: usize = 64; static CHAT_WIRE_API_DEPRECATION_EMITTED: AtomicBool = AtomicBool::new(false); fn maybe_push_chat_wire_api_deprecation( config: &Config, post_session_configured_events: &mut Vec<Event>, ) { if config.model_provider.wire_api != WireApi::Chat { return; } if CHAT_WIRE_API_DEPRECATION_EMITTED .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) .is_err() { return; } post_session_configured_events.push(Event { id: INITIAL_SUBMIT_ID.to_owned(), msg: EventMsg::DeprecationNotice(DeprecationNoticeEvent { summary: CHAT_WIRE_API_DEPRECATION_SUMMARY.to_string(), details: None, }), }); } impl Codex { /// Spawn a new [`Codex`] and initialize the session. pub async fn spawn( config: Config, auth_manager: Arc<AuthManager>, models_manager: Arc<ModelsManager>, skills_manager: Arc<SkillsManager>, conversation_history: InitialHistory, session_source: SessionSource, ) -> CodexResult<CodexSpawnOk> { let (tx_sub, rx_sub) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY); let (tx_event, rx_event) = async_channel::unbounded(); let loaded_skills = config .features .enabled(Feature::Skills) .then(|| skills_manager.skills_for_cwd(&config.cwd)); if let Some(outcome) = &loaded_skills { for err in &outcome.errors { error!( "failed to load skill {}: {}", err.path.display(), err.message ); } } let user_instructions = get_user_instructions( &config, loaded_skills .as_ref() .map(|outcome| outcome.skills.as_slice()), ) .await; let exec_policy = ExecPolicyManager::load(&config.features, &config.config_layer_stack) .await .map_err(|err| CodexErr::Fatal(format!("failed to load execpolicy: {err}")))?; let config = Arc::new(config); if config.features.enabled(Feature::RemoteModels) && let Err(err) = models_manager .refresh_available_models_with_cache(&config) .await { error!("failed to refresh available models: {err:?}"); } let model = models_manager.get_model(&config.model, &config).await; let session_configuration = SessionConfiguration { provider: config.model_provider.clone(), model: model.clone(), model_reasoning_effort: config.model_reasoning_effort, model_reasoning_summary: config.model_reasoning_summary, developer_instructions: config.developer_instructions.clone(), user_instructions, base_instructions: config.base_instructions.clone(), compact_prompt: config.compact_prompt.clone(), approval_policy: config.approval_policy.clone(), sandbox_policy: config.sandbox_policy.clone(), cwd: config.cwd.clone(), original_config_do_not_use: Arc::clone(&config), session_source, }; // Generate a unique ID for the lifetime of this Codex session. let session_source_clone = session_configuration.session_source.clone(); let session = Session::new( session_configuration, config.clone(), auth_manager.clone(), models_manager.clone(), exec_policy, tx_event.clone(), conversation_history, session_source_clone, skills_manager, ) .await .map_err(|e| { error!("Failed to create session: {e:#}"); map_session_init_error(&e, &config.codex_home) })?; let conversation_id = session.conversation_id; // This task will run until Op::Shutdown is received. tokio::spawn(submission_loop(session, config, rx_sub)); let codex = Codex { next_id: AtomicU64::new(0), tx_sub, rx_event, }; Ok(CodexSpawnOk { codex, conversation_id, }) } /// Submit the `op` wrapped in a `Submission` with a unique ID. pub async fn submit(&self, op: Op) -> CodexResult<String> { let id = self .next_id .fetch_add(1, std::sync::atomic::Ordering::SeqCst) .to_string(); let sub = Submission { id: id.clone(), op }; self.submit_with_id(sub).await?; Ok(id) } /// Use sparingly: prefer `submit()` so Codex is responsible for generating /// unique IDs for each submission. pub async fn submit_with_id(&self, sub: Submission) -> CodexResult<()> { self.tx_sub .send(sub) .await .map_err(|_| CodexErr::InternalAgentDied)?; Ok(()) } pub async fn next_event(&self) -> CodexResult<Event> { let event = self .rx_event .recv() .await .map_err(|_| CodexErr::InternalAgentDied)?; Ok(event) } } /// Context for an initialized model agent /// /// A session has at most 1 running task at a time, and can be interrupted by user input. pub(crate) struct Session { conversation_id: ConversationId, tx_event: Sender<Event>, state: Mutex<SessionState>, /// The set of enabled features should be invariant for the lifetime of the /// session. features: Features, pub(crate) active_turn: Mutex<Option<ActiveTurn>>, pub(crate) services: SessionServices, next_internal_sub_id: AtomicU64, } /// The context needed for a single turn of the conversation. #[derive(Debug)] pub(crate) struct TurnContext { pub(crate) sub_id: String, pub(crate) client: ModelClient, /// The session's current working directory. All relative paths provided by /// the model as well as sandbox policies are resolved against this path /// instead of `std::env::current_dir()`. pub(crate) cwd: PathBuf, pub(crate) developer_instructions: Option<String>, pub(crate) base_instructions: Option<String>, pub(crate) compact_prompt: Option<String>, pub(crate) user_instructions: Option<String>, pub(crate) approval_policy: AskForApproval, pub(crate) sandbox_policy: SandboxPolicy, pub(crate) shell_environment_policy: ShellEnvironmentPolicy, pub(crate) tools_config: ToolsConfig, pub(crate) ghost_snapshot: GhostSnapshotConfig, pub(crate) final_output_json_schema: Option<Value>, pub(crate) codex_linux_sandbox_exe: Option<PathBuf>, pub(crate) tool_call_gate: Arc<ReadinessFlag>, pub(crate) truncation_policy: TruncationPolicy, } impl TurnContext { pub(crate) fn resolve_path(&self, path: Option<String>) -> PathBuf { path.as_ref() .map(PathBuf::from) .map_or_else(|| self.cwd.clone(), |p| self.cwd.join(p)) } pub(crate) fn compact_prompt(&self) -> &str { self.compact_prompt .as_deref() .unwrap_or(compact::SUMMARIZATION_PROMPT) } } #[derive(Clone)] pub(crate) struct SessionConfiguration { /// Provider identifier ("openai", "openrouter", ...). provider: ModelProviderInfo, /// If not specified, server will use its default model. model: String, model_reasoning_effort: Option<ReasoningEffortConfig>, model_reasoning_summary: ReasoningSummaryConfig, /// Developer instructions that supplement the base instructions. developer_instructions: Option<String>, /// Model instructions that are appended to the base instructions. user_instructions: Option<String>, /// Base instructions override. base_instructions: Option<String>, /// Compact prompt override. compact_prompt: Option<String>, /// When to escalate for approval for execution approval_policy: Constrained<AskForApproval>, /// How to sandbox commands executed in the system sandbox_policy: Constrained<SandboxPolicy>, /// Working directory that should be treated as the *root* of the /// session. All relative paths supplied by the model as well as the /// execution sandbox are resolved against this directory **instead** /// of the process-wide current working directory. CLI front-ends are /// expected to expand this to an absolute path before sending the /// `ConfigureSession` operation so that the business-logic layer can /// operate deterministically. cwd: PathBuf, // TODO(pakrym): Remove config from here original_config_do_not_use: Arc<Config>, /// Source of the session (cli, vscode, exec, mcp, ...) session_source: SessionSource, } impl SessionConfiguration { pub(crate) fn apply(&self, updates: &SessionSettingsUpdate) -> ConstraintResult<Self> { let mut next_configuration = self.clone(); if let Some(model) = updates.model.clone() { next_configuration.model = model; } if let Some(effort) = updates.reasoning_effort { next_configuration.model_reasoning_effort = effort; } if let Some(summary) = updates.reasoning_summary { next_configuration.model_reasoning_summary = summary; } if let Some(approval_policy) = updates.approval_policy { next_configuration.approval_policy.set(approval_policy)?; } if let Some(sandbox_policy) = updates.sandbox_policy.clone() { next_configuration.sandbox_policy.set(sandbox_policy)?; } if let Some(cwd) = updates.cwd.clone() { next_configuration.cwd = cwd; } Ok(next_configuration) } } #[derive(Default, Clone)] pub(crate) struct SessionSettingsUpdate { pub(crate) cwd: Option<PathBuf>, pub(crate) approval_policy: Option<AskForApproval>, pub(crate) sandbox_policy: Option<SandboxPolicy>, pub(crate) model: Option<String>, pub(crate) reasoning_effort: Option<Option<ReasoningEffortConfig>>, pub(crate) reasoning_summary: Option<ReasoningSummaryConfig>, pub(crate) final_output_json_schema: Option<Option<Value>>, } impl Session { /// Don't expand the number of mutated arguments on config. We are in the process of getting rid of it. fn build_per_turn_config(session_configuration: &SessionConfiguration) -> Config { // todo(aibrahim): store this state somewhere else so we don't need to mut config let config = session_configuration.original_config_do_not_use.clone(); let mut per_turn_config = (*config).clone(); per_turn_config.model_reasoning_effort = session_configuration.model_reasoning_effort; per_turn_config.model_reasoning_summary = session_configuration.model_reasoning_summary; per_turn_config.features = config.features.clone(); per_turn_config } #[allow(clippy::too_many_arguments)] fn make_turn_context( auth_manager: Option<Arc<AuthManager>>, otel_manager: &OtelManager, provider: ModelProviderInfo, session_configuration: &SessionConfiguration, per_turn_config: Config, model_family: ModelFamily, conversation_id: ConversationId, sub_id: String, ) -> TurnContext { let otel_manager = otel_manager.clone().with_model( session_configuration.model.as_str(), model_family.get_model_slug(), ); let per_turn_config = Arc::new(per_turn_config); let client = ModelClient::new( per_turn_config.clone(), auth_manager, model_family.clone(), otel_manager, provider, session_configuration.model_reasoning_effort, session_configuration.model_reasoning_summary, conversation_id, session_configuration.session_source.clone(), ); let tools_config = ToolsConfig::new(&ToolsConfigParams { model_family: &model_family, features: &per_turn_config.features, }); TurnContext { sub_id, client, cwd: session_configuration.cwd.clone(), developer_instructions: session_configuration.developer_instructions.clone(), base_instructions: session_configuration.base_instructions.clone(), compact_prompt: session_configuration.compact_prompt.clone(), user_instructions: session_configuration.user_instructions.clone(), approval_policy: session_configuration.approval_policy.value(), sandbox_policy: session_configuration.sandbox_policy.get().clone(), shell_environment_policy: per_turn_config.shell_environment_policy.clone(), tools_config, ghost_snapshot: per_turn_config.ghost_snapshot.clone(), final_output_json_schema: None, codex_linux_sandbox_exe: per_turn_config.codex_linux_sandbox_exe.clone(), tool_call_gate: Arc::new(ReadinessFlag::new()), truncation_policy: TruncationPolicy::new( per_turn_config.as_ref(), model_family.truncation_policy, ), } } #[allow(clippy::too_many_arguments)] async fn new( session_configuration: SessionConfiguration, config: Arc<Config>, auth_manager: Arc<AuthManager>, models_manager: Arc<ModelsManager>, exec_policy: ExecPolicyManager, tx_event: Sender<Event>, initial_history: InitialHistory, session_source: SessionSource, skills_manager: Arc<SkillsManager>, ) -> anyhow::Result<Arc<Self>> { debug!( "Configuring session: model={}; provider={:?}", session_configuration.model, session_configuration.provider ); if !session_configuration.cwd.is_absolute() { return Err(anyhow::anyhow!( "cwd is not absolute: {:?}", session_configuration.cwd )); } let (conversation_id, rollout_params) = match &initial_history { InitialHistory::New | InitialHistory::Forked(_) => { let conversation_id = ConversationId::default(); ( conversation_id, RolloutRecorderParams::new( conversation_id, session_configuration.user_instructions.clone(), session_source, ), ) } InitialHistory::Resumed(resumed_history) => ( resumed_history.conversation_id, RolloutRecorderParams::resume(resumed_history.rollout_path.clone()), ), }; // Kick off independent async setup tasks in parallel to reduce startup latency. // // - initialize RolloutRecorder with new or resumed session info // - perform default shell discovery // - load history metadata let rollout_fut = RolloutRecorder::new(&config, rollout_params); let history_meta_fut = crate::message_history::history_metadata(&config); let auth_statuses_fut = compute_auth_statuses( config.mcp_servers.iter(), config.mcp_oauth_credentials_store_mode, ); // Join all independent futures. let (rollout_recorder, (history_log_id, history_entry_count), auth_statuses) = tokio::join!(rollout_fut, history_meta_fut, auth_statuses_fut); let rollout_recorder = rollout_recorder.map_err(|e| { error!("failed to initialize rollout recorder: {e:#}"); anyhow::Error::from(e) })?; let rollout_path = rollout_recorder.rollout_path.clone(); let mut post_session_configured_events = Vec::<Event>::new(); for (alias, feature) in config.features.legacy_feature_usages() { let canonical = feature.key(); let summary = format!("`{alias}` is deprecated. Use `[features].{canonical}` instead."); let details = if alias == canonical { None } else { Some(format!( "Enable it with `--enable {canonical}` or `[features].{canonical}` in config.toml. See https://github.com/openai/codex/blob/main/docs/config.md#feature-flags for details." )) }; post_session_configured_events.push(Event { id: INITIAL_SUBMIT_ID.to_owned(), msg: EventMsg::DeprecationNotice(DeprecationNoticeEvent { summary, details }), }); } maybe_push_chat_wire_api_deprecation(&config, &mut post_session_configured_events); // todo(aibrahim): why are we passing model here while it can change? let otel_manager = OtelManager::new( conversation_id, session_configuration.model.as_str(), session_configuration.model.as_str(), auth_manager.auth().and_then(|a| a.get_account_id()), auth_manager.auth().and_then(|a| a.get_account_email()), auth_manager.auth().map(|a| a.mode), config.otel.log_user_prompt, terminal::user_agent(), session_configuration.session_source.clone(), ); otel_manager.conversation_starts( config.model_provider.name.as_str(), config.model_reasoning_effort, config.model_reasoning_summary, config.model_context_window, config.model_auto_compact_token_limit, config.approval_policy.value(), config.sandbox_policy.get().clone(), config.mcp_servers.keys().map(String::as_str).collect(), config.active_profile.clone(), ); let mut default_shell = shell::default_user_shell(); // Create the mutable state for the Session. if config.features.enabled(Feature::ShellSnapshot) { default_shell.shell_snapshot = ShellSnapshot::try_new(&config.codex_home, &default_shell) .await .map(Arc::new); } let state = SessionState::new(session_configuration.clone()); let services = SessionServices { mcp_connection_manager: Arc::new(RwLock::new(McpConnectionManager::default())), mcp_startup_cancellation_token: CancellationToken::new(), unified_exec_manager: UnifiedExecSessionManager::default(), notifier: UserNotifier::new(config.notify.clone()), rollout: Mutex::new(Some(rollout_recorder)), user_shell: Arc::new(default_shell), show_raw_agent_reasoning: config.show_raw_agent_reasoning, exec_policy, auth_manager: Arc::clone(&auth_manager), otel_manager, models_manager: Arc::clone(&models_manager), tool_approvals: Mutex::new(ApprovalStore::default()), skills_manager, }; let sess = Arc::new(Session { conversation_id, tx_event: tx_event.clone(), state: Mutex::new(state), features: config.features.clone(), active_turn: Mutex::new(None), services, next_internal_sub_id: AtomicU64::new(0), }); // Dispatch the SessionConfiguredEvent first and then report any errors. // If resuming, include converted initial messages in the payload so UIs can render them immediately. let initial_messages = initial_history.get_event_msgs(); let events = std::iter::once(Event { id: INITIAL_SUBMIT_ID.to_owned(), msg: EventMsg::SessionConfigured(SessionConfiguredEvent { session_id: conversation_id, model: session_configuration.model.clone(), model_provider_id: config.model_provider_id.clone(), approval_policy: session_configuration.approval_policy.value(), sandbox_policy: session_configuration.sandbox_policy.get().clone(), cwd: session_configuration.cwd.clone(), reasoning_effort: session_configuration.model_reasoning_effort, history_log_id, history_entry_count, initial_messages, rollout_path, }), }) .chain(post_session_configured_events.into_iter()); for event in events { sess.send_event_raw(event).await; } // Construct sandbox_state before initialize() so it can be sent to each // MCP server immediately after it becomes ready (avoiding blocking). let sandbox_state = SandboxState { sandbox_policy: session_configuration.sandbox_policy.get().clone(), codex_linux_sandbox_exe: config.codex_linux_sandbox_exe.clone(), sandbox_cwd: session_configuration.cwd.clone(), }; sess.services .mcp_connection_manager .write() .await .initialize( config.mcp_servers.clone(), config.mcp_oauth_credentials_store_mode, auth_statuses.clone(), tx_event.clone(), sess.services.mcp_startup_cancellation_token.clone(), sandbox_state, ) .await; // record_initial_history can emit events. We record only after the SessionConfiguredEvent is emitted. sess.record_initial_history(initial_history).await; Ok(sess) } pub(crate) fn get_tx_event(&self) -> Sender<Event> { self.tx_event.clone() } /// Ensure all rollout writes are durably flushed. pub(crate) async fn flush_rollout(&self) { let recorder = { let guard = self.services.rollout.lock().await; guard.clone() }; if let Some(rec) = recorder && let Err(e) = rec.flush().await { warn!("failed to flush rollout recorder: {e}"); } } fn next_internal_sub_id(&self) -> String { let id = self .next_internal_sub_id .fetch_add(1, std::sync::atomic::Ordering::SeqCst); format!("auto-compact-{id}") } async fn get_total_token_usage(&self) -> i64 { let state = self.state.lock().await; state.get_total_token_usage() } async fn record_initial_history(&self, conversation_history: InitialHistory) { let turn_context = self.new_default_turn().await; match conversation_history { InitialHistory::New => { // Build and record initial items (user instructions + environment context) let items = self.build_initial_context(&turn_context); self.record_conversation_items(&turn_context, &items).await; // Ensure initial items are visible to immediate readers (e.g., tests, forks). self.flush_rollout().await; } InitialHistory::Resumed(_) | InitialHistory::Forked(_) => { let rollout_items = conversation_history.get_rollout_items(); let persist = matches!(conversation_history, InitialHistory::Forked(_)); // If resuming, warn when the last recorded model differs from the current one. if let InitialHistory::Resumed(_) = conversation_history && let Some(prev) = rollout_items.iter().rev().find_map(|it| { if let RolloutItem::TurnContext(ctx) = it { Some(ctx.model.as_str()) } else { None } }) { let curr = turn_context.client.get_model(); if prev != curr { warn!( "resuming session with different model: previous={prev}, current={curr}" ); self.send_event( &turn_context, EventMsg::Warning(WarningEvent { message: format!( "This session was recorded with model `{prev}` but is resuming with `{curr}`. \ Consider switching back to `{prev}` as it may affect Codex performance." ), }), ) .await; } } // Always add response items to conversation history let reconstructed_history = self.reconstruct_history_from_rollout(&turn_context, &rollout_items); if !reconstructed_history.is_empty() { self.record_into_history(&reconstructed_history, &turn_context) .await; } // Seed usage info from the recorded rollout so UIs can show token counts // immediately on resume/fork. if let Some(info) = Self::last_token_info_from_rollout(&rollout_items) { let mut state = self.state.lock().await; state.set_token_info(Some(info)); } // If persisting, persist all rollout items as-is (recorder filters) if persist && !rollout_items.is_empty() { self.persist_rollout_items(&rollout_items).await; } // Flush after seeding history and any persisted rollout copy. self.flush_rollout().await; } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/event_mapping.rs
codex-rs/core/src/event_mapping.rs
use codex_protocol::items::AgentMessageContent; use codex_protocol::items::AgentMessageItem; use codex_protocol::items::ReasoningItem; use codex_protocol::items::TurnItem; use codex_protocol::items::UserMessageItem; use codex_protocol::items::WebSearchItem; use codex_protocol::models::ContentItem; use codex_protocol::models::ReasoningItemContent; use codex_protocol::models::ReasoningItemReasoningSummary; use codex_protocol::models::ResponseItem; use codex_protocol::models::WebSearchAction; use codex_protocol::user_input::UserInput; use tracing::warn; use uuid::Uuid; use crate::user_instructions::SkillInstructions; use crate::user_instructions::UserInstructions; use crate::user_shell_command::is_user_shell_command_text; fn is_session_prefix(text: &str) -> bool { let trimmed = text.trim_start(); let lowered = trimmed.to_ascii_lowercase(); lowered.starts_with("<environment_context>") } fn parse_user_message(message: &[ContentItem]) -> Option<UserMessageItem> { if UserInstructions::is_user_instructions(message) || SkillInstructions::is_skill_instructions(message) { return None; } let mut content: Vec<UserInput> = Vec::new(); for content_item in message.iter() { match content_item { ContentItem::InputText { text } => { if is_session_prefix(text) || is_user_shell_command_text(text) { return None; } content.push(UserInput::Text { text: text.clone() }); } ContentItem::InputImage { image_url } => { content.push(UserInput::Image { image_url: image_url.clone(), }); } ContentItem::OutputText { text } => { if is_session_prefix(text) { return None; } warn!("Output text in user message: {}", text); } } } Some(UserMessageItem::new(&content)) } fn parse_agent_message(id: Option<&String>, message: &[ContentItem]) -> AgentMessageItem { let mut content: Vec<AgentMessageContent> = Vec::new(); for content_item in message.iter() { match content_item { ContentItem::OutputText { text } => { content.push(AgentMessageContent::Text { text: text.clone() }); } _ => { warn!( "Unexpected content item in agent message: {:?}", content_item ); } } } let id = id.cloned().unwrap_or_else(|| Uuid::new_v4().to_string()); AgentMessageItem { id, content } } pub fn parse_turn_item(item: &ResponseItem) -> Option<TurnItem> { match item { ResponseItem::Message { role, content, id } => match role.as_str() { "user" => parse_user_message(content).map(TurnItem::UserMessage), "assistant" => Some(TurnItem::AgentMessage(parse_agent_message( id.as_ref(), content, ))), "system" => None, _ => None, }, ResponseItem::Reasoning { id, summary, content, .. } => { let summary_text = summary .iter() .map(|entry| match entry { ReasoningItemReasoningSummary::SummaryText { text } => text.clone(), }) .collect(); let raw_content = content .clone() .unwrap_or_default() .into_iter() .map(|entry| match entry { ReasoningItemContent::ReasoningText { text } | ReasoningItemContent::Text { text } => text, }) .collect(); Some(TurnItem::Reasoning(ReasoningItem { id: id.clone(), summary_text, raw_content, })) } ResponseItem::WebSearchCall { id, action: WebSearchAction::Search { query }, .. } => Some(TurnItem::WebSearch(WebSearchItem { id: id.clone().unwrap_or_default(), query: query.clone().unwrap_or_default(), })), _ => None, } } #[cfg(test)] mod tests { use super::parse_turn_item; use codex_protocol::items::AgentMessageContent; use codex_protocol::items::TurnItem; use codex_protocol::models::ContentItem; use codex_protocol::models::ReasoningItemContent; use codex_protocol::models::ReasoningItemReasoningSummary; use codex_protocol::models::ResponseItem; use codex_protocol::models::WebSearchAction; use codex_protocol::user_input::UserInput; use pretty_assertions::assert_eq; #[test] fn parses_user_message_with_text_and_two_images() { let img1 = "https://example.com/one.png".to_string(); let img2 = "https://example.com/two.jpg".to_string(); let item = ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ ContentItem::InputText { text: "Hello world".to_string(), }, ContentItem::InputImage { image_url: img1.clone(), }, ContentItem::InputImage { image_url: img2.clone(), }, ], }; let turn_item = parse_turn_item(&item).expect("expected user message turn item"); match turn_item { TurnItem::UserMessage(user) => { let expected_content = vec![ UserInput::Text { text: "Hello world".to_string(), }, UserInput::Image { image_url: img1 }, UserInput::Image { image_url: img2 }, ]; assert_eq!(user.content, expected_content); } other => panic!("expected TurnItem::UserMessage, got {other:?}"), } } #[test] fn skips_user_instructions_and_env() { let items = vec![ ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: "<user_instructions>test_text</user_instructions>".to_string(), }], }, ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: "<environment_context>test_text</environment_context>".to_string(), }], }, ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: "# AGENTS.md instructions for test_directory\n\n<INSTRUCTIONS>\ntest_text\n</INSTRUCTIONS>".to_string(), }], }, ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: "<skill>\n<name>demo</name>\n<path>skills/demo/SKILL.md</path>\nbody\n</skill>" .to_string(), }], }, ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: "<user_shell_command>echo 42</user_shell_command>".to_string(), }], }, ]; for item in items { let turn_item = parse_turn_item(&item); assert!(turn_item.is_none(), "expected none, got {turn_item:?}"); } } #[test] fn parses_agent_message() { let item = ResponseItem::Message { id: Some("msg-1".to_string()), role: "assistant".to_string(), content: vec![ContentItem::OutputText { text: "Hello from Codex".to_string(), }], }; let turn_item = parse_turn_item(&item).expect("expected agent message turn item"); match turn_item { TurnItem::AgentMessage(message) => { let Some(AgentMessageContent::Text { text }) = message.content.first() else { panic!("expected agent message text content"); }; assert_eq!(text, "Hello from Codex"); } other => panic!("expected TurnItem::AgentMessage, got {other:?}"), } } #[test] fn parses_reasoning_summary_and_raw_content() { let item = ResponseItem::Reasoning { id: "reasoning_1".to_string(), summary: vec![ ReasoningItemReasoningSummary::SummaryText { text: "Step 1".to_string(), }, ReasoningItemReasoningSummary::SummaryText { text: "Step 2".to_string(), }, ], content: Some(vec![ReasoningItemContent::ReasoningText { text: "raw details".to_string(), }]), encrypted_content: None, }; let turn_item = parse_turn_item(&item).expect("expected reasoning turn item"); match turn_item { TurnItem::Reasoning(reasoning) => { assert_eq!( reasoning.summary_text, vec!["Step 1".to_string(), "Step 2".to_string()] ); assert_eq!(reasoning.raw_content, vec!["raw details".to_string()]); } other => panic!("expected TurnItem::Reasoning, got {other:?}"), } } #[test] fn parses_reasoning_including_raw_content() { let item = ResponseItem::Reasoning { id: "reasoning_2".to_string(), summary: vec![ReasoningItemReasoningSummary::SummaryText { text: "Summarized step".to_string(), }], content: Some(vec![ ReasoningItemContent::ReasoningText { text: "raw step".to_string(), }, ReasoningItemContent::Text { text: "final thought".to_string(), }, ]), encrypted_content: None, }; let turn_item = parse_turn_item(&item).expect("expected reasoning turn item"); match turn_item { TurnItem::Reasoning(reasoning) => { assert_eq!(reasoning.summary_text, vec!["Summarized step".to_string()]); assert_eq!( reasoning.raw_content, vec!["raw step".to_string(), "final thought".to_string()] ); } other => panic!("expected TurnItem::Reasoning, got {other:?}"), } } #[test] fn parses_web_search_call() { let item = ResponseItem::WebSearchCall { id: Some("ws_1".to_string()), status: Some("completed".to_string()), action: WebSearchAction::Search { query: Some("weather".to_string()), }, }; let turn_item = parse_turn_item(&item).expect("expected web search turn item"); match turn_item { TurnItem::WebSearch(search) => { assert_eq!(search.id, "ws_1"); assert_eq!(search.query, "weather"); } other => panic!("expected TurnItem::WebSearch, got {other:?}"), } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/environment_context.rs
codex-rs/core/src/environment_context.rs
use crate::codex::TurnContext; use crate::protocol::AskForApproval; use crate::protocol::NetworkAccess; use crate::protocol::SandboxPolicy; use crate::shell::Shell; use codex_protocol::config_types::SandboxMode; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseItem; use codex_protocol::protocol::ENVIRONMENT_CONTEXT_CLOSE_TAG; use codex_protocol::protocol::ENVIRONMENT_CONTEXT_OPEN_TAG; use codex_utils_absolute_path::AbsolutePathBuf; use serde::Deserialize; use serde::Serialize; use std::path::PathBuf; #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] #[serde(rename = "environment_context", rename_all = "snake_case")] pub(crate) struct EnvironmentContext { pub cwd: Option<PathBuf>, pub approval_policy: Option<AskForApproval>, pub sandbox_mode: Option<SandboxMode>, pub network_access: Option<NetworkAccess>, pub writable_roots: Option<Vec<AbsolutePathBuf>>, pub shell: Shell, } impl EnvironmentContext { pub fn new( cwd: Option<PathBuf>, approval_policy: Option<AskForApproval>, sandbox_policy: Option<SandboxPolicy>, shell: Shell, ) -> Self { Self { cwd, approval_policy, sandbox_mode: match sandbox_policy { Some(SandboxPolicy::DangerFullAccess) => Some(SandboxMode::DangerFullAccess), Some(SandboxPolicy::ReadOnly) => Some(SandboxMode::ReadOnly), Some(SandboxPolicy::ExternalSandbox { .. }) => Some(SandboxMode::DangerFullAccess), Some(SandboxPolicy::WorkspaceWrite { .. }) => Some(SandboxMode::WorkspaceWrite), None => None, }, network_access: match sandbox_policy { Some(SandboxPolicy::DangerFullAccess) => Some(NetworkAccess::Enabled), Some(SandboxPolicy::ReadOnly) => Some(NetworkAccess::Restricted), Some(SandboxPolicy::ExternalSandbox { network_access }) => Some(network_access), Some(SandboxPolicy::WorkspaceWrite { network_access, .. }) => { if network_access { Some(NetworkAccess::Enabled) } else { Some(NetworkAccess::Restricted) } } None => None, }, writable_roots: match sandbox_policy { Some(SandboxPolicy::WorkspaceWrite { writable_roots, .. }) => { if writable_roots.is_empty() { None } else { Some(writable_roots) } } _ => None, }, shell, } } /// Compares two environment contexts, ignoring the shell. Useful when /// comparing turn to turn, since the initial environment_context will /// include the shell, and then it is not configurable from turn to turn. pub fn equals_except_shell(&self, other: &EnvironmentContext) -> bool { let EnvironmentContext { cwd, approval_policy, sandbox_mode, network_access, writable_roots, // should compare all fields except shell shell: _, } = other; self.cwd == *cwd && self.approval_policy == *approval_policy && self.sandbox_mode == *sandbox_mode && self.network_access == *network_access && self.writable_roots == *writable_roots } pub fn diff(before: &TurnContext, after: &TurnContext, shell: &Shell) -> Self { let cwd = if before.cwd != after.cwd { Some(after.cwd.clone()) } else { None }; let approval_policy = if before.approval_policy != after.approval_policy { Some(after.approval_policy) } else { None }; let sandbox_policy = if before.sandbox_policy != after.sandbox_policy { Some(after.sandbox_policy.clone()) } else { None }; EnvironmentContext::new(cwd, approval_policy, sandbox_policy, shell.clone()) } pub fn from_turn_context(turn_context: &TurnContext, shell: &Shell) -> Self { Self::new( Some(turn_context.cwd.clone()), Some(turn_context.approval_policy), Some(turn_context.sandbox_policy.clone()), shell.clone(), ) } } impl EnvironmentContext { /// Serializes the environment context to XML. Libraries like `quick-xml` /// require custom macros to handle Enums with newtypes, so we just do it /// manually, to keep things simple. Output looks like: /// /// ```xml /// <environment_context> /// <cwd>...</cwd> /// <approval_policy>...</approval_policy> /// <sandbox_mode>...</sandbox_mode> /// <writable_roots>...</writable_roots> /// <network_access>...</network_access> /// <shell>...</shell> /// </environment_context> /// ``` pub fn serialize_to_xml(self) -> String { let mut lines = vec![ENVIRONMENT_CONTEXT_OPEN_TAG.to_string()]; if let Some(cwd) = self.cwd { lines.push(format!(" <cwd>{}</cwd>", cwd.to_string_lossy())); } if let Some(approval_policy) = self.approval_policy { lines.push(format!( " <approval_policy>{approval_policy}</approval_policy>" )); } if let Some(sandbox_mode) = self.sandbox_mode { lines.push(format!(" <sandbox_mode>{sandbox_mode}</sandbox_mode>")); } if let Some(network_access) = self.network_access { lines.push(format!( " <network_access>{network_access}</network_access>" )); } if let Some(writable_roots) = self.writable_roots { lines.push(" <writable_roots>".to_string()); for writable_root in writable_roots { lines.push(format!( " <root>{}</root>", writable_root.to_string_lossy() )); } lines.push(" </writable_roots>".to_string()); } let shell_name = self.shell.name(); lines.push(format!(" <shell>{shell_name}</shell>")); lines.push(ENVIRONMENT_CONTEXT_CLOSE_TAG.to_string()); lines.join("\n") } } impl From<EnvironmentContext> for ResponseItem { fn from(ec: EnvironmentContext) -> Self { ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::InputText { text: ec.serialize_to_xml(), }], } } } #[cfg(test)] mod tests { use crate::shell::ShellType; use super::*; use core_test_support::test_path_buf; use core_test_support::test_tmp_path_buf; use pretty_assertions::assert_eq; fn fake_shell() -> Shell { Shell { shell_type: ShellType::Bash, shell_path: PathBuf::from("/bin/bash"), shell_snapshot: None, } } fn workspace_write_policy(writable_roots: Vec<&str>, network_access: bool) -> SandboxPolicy { SandboxPolicy::WorkspaceWrite { writable_roots: writable_roots .into_iter() .map(|s| AbsolutePathBuf::try_from(s).unwrap()) .collect(), network_access, exclude_tmpdir_env_var: false, exclude_slash_tmp: false, } } #[test] fn serialize_workspace_write_environment_context() { let cwd = test_path_buf("/repo"); let writable_root = test_tmp_path_buf(); let cwd_str = cwd.to_str().expect("cwd is valid utf-8"); let writable_root_str = writable_root .to_str() .expect("writable root is valid utf-8"); let context = EnvironmentContext::new( Some(cwd.clone()), Some(AskForApproval::OnRequest), Some(workspace_write_policy( vec![cwd_str, writable_root_str], false, )), fake_shell(), ); let expected = format!( r#"<environment_context> <cwd>{cwd}</cwd> <approval_policy>on-request</approval_policy> <sandbox_mode>workspace-write</sandbox_mode> <network_access>restricted</network_access> <writable_roots> <root>{cwd}</root> <root>{writable_root}</root> </writable_roots> <shell>bash</shell> </environment_context>"#, cwd = cwd.display(), writable_root = writable_root.display(), ); assert_eq!(context.serialize_to_xml(), expected); } #[test] fn serialize_read_only_environment_context() { let context = EnvironmentContext::new( None, Some(AskForApproval::Never), Some(SandboxPolicy::ReadOnly), fake_shell(), ); let expected = r#"<environment_context> <approval_policy>never</approval_policy> <sandbox_mode>read-only</sandbox_mode> <network_access>restricted</network_access> <shell>bash</shell> </environment_context>"#; assert_eq!(context.serialize_to_xml(), expected); } #[test] fn serialize_external_sandbox_environment_context() { let context = EnvironmentContext::new( None, Some(AskForApproval::OnRequest), Some(SandboxPolicy::ExternalSandbox { network_access: NetworkAccess::Enabled, }), fake_shell(), ); let expected = r#"<environment_context> <approval_policy>on-request</approval_policy> <sandbox_mode>danger-full-access</sandbox_mode> <network_access>enabled</network_access> <shell>bash</shell> </environment_context>"#; assert_eq!(context.serialize_to_xml(), expected); } #[test] fn serialize_external_sandbox_with_restricted_network_environment_context() { let context = EnvironmentContext::new( None, Some(AskForApproval::OnRequest), Some(SandboxPolicy::ExternalSandbox { network_access: NetworkAccess::Restricted, }), fake_shell(), ); let expected = r#"<environment_context> <approval_policy>on-request</approval_policy> <sandbox_mode>danger-full-access</sandbox_mode> <network_access>restricted</network_access> <shell>bash</shell> </environment_context>"#; assert_eq!(context.serialize_to_xml(), expected); } #[test] fn serialize_full_access_environment_context() { let context = EnvironmentContext::new( None, Some(AskForApproval::OnFailure), Some(SandboxPolicy::DangerFullAccess), fake_shell(), ); let expected = r#"<environment_context> <approval_policy>on-failure</approval_policy> <sandbox_mode>danger-full-access</sandbox_mode> <network_access>enabled</network_access> <shell>bash</shell> </environment_context>"#; assert_eq!(context.serialize_to_xml(), expected); } #[test] fn equals_except_shell_compares_approval_policy() { // Approval policy let context1 = EnvironmentContext::new( Some(PathBuf::from("/repo")), Some(AskForApproval::OnRequest), Some(workspace_write_policy(vec!["/repo"], false)), fake_shell(), ); let context2 = EnvironmentContext::new( Some(PathBuf::from("/repo")), Some(AskForApproval::Never), Some(workspace_write_policy(vec!["/repo"], true)), fake_shell(), ); assert!(!context1.equals_except_shell(&context2)); } #[test] fn equals_except_shell_compares_sandbox_policy() { let context1 = EnvironmentContext::new( Some(PathBuf::from("/repo")), Some(AskForApproval::OnRequest), Some(SandboxPolicy::new_read_only_policy()), fake_shell(), ); let context2 = EnvironmentContext::new( Some(PathBuf::from("/repo")), Some(AskForApproval::OnRequest), Some(SandboxPolicy::new_workspace_write_policy()), fake_shell(), ); assert!(!context1.equals_except_shell(&context2)); } #[test] fn equals_except_shell_compares_workspace_write_policy() { let context1 = EnvironmentContext::new( Some(PathBuf::from("/repo")), Some(AskForApproval::OnRequest), Some(workspace_write_policy(vec!["/repo", "/tmp", "/var"], false)), fake_shell(), ); let context2 = EnvironmentContext::new( Some(PathBuf::from("/repo")), Some(AskForApproval::OnRequest), Some(workspace_write_policy(vec!["/repo", "/tmp"], true)), fake_shell(), ); assert!(!context1.equals_except_shell(&context2)); } #[test] fn equals_except_shell_ignores_shell() { let context1 = EnvironmentContext::new( Some(PathBuf::from("/repo")), Some(AskForApproval::OnRequest), Some(workspace_write_policy(vec!["/repo"], false)), Shell { shell_type: ShellType::Bash, shell_path: "/bin/bash".into(), shell_snapshot: None, }, ); let context2 = EnvironmentContext::new( Some(PathBuf::from("/repo")), Some(AskForApproval::OnRequest), Some(workspace_write_policy(vec!["/repo"], false)), Shell { shell_type: ShellType::Zsh, shell_path: "/bin/zsh".into(), shell_snapshot: None, }, ); assert!(context1.equals_except_shell(&context2)); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/client_common.rs
codex-rs/core/src/client_common.rs
use crate::client_common::tools::ToolSpec; use crate::error::Result; use crate::models_manager::model_family::ModelFamily; pub use codex_api::common::ResponseEvent; use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS; use codex_protocol::models::ResponseItem; use futures::Stream; use serde::Deserialize; use serde_json::Value; use std::borrow::Cow; use std::collections::HashSet; use std::ops::Deref; use std::pin::Pin; use std::task::Context; use std::task::Poll; use tokio::sync::mpsc; /// Review thread system prompt. Edit `core/src/review_prompt.md` to customize. pub const REVIEW_PROMPT: &str = include_str!("../review_prompt.md"); // Centralized templates for review-related user messages pub const REVIEW_EXIT_SUCCESS_TMPL: &str = include_str!("../templates/review/exit_success.xml"); pub const REVIEW_EXIT_INTERRUPTED_TMPL: &str = include_str!("../templates/review/exit_interrupted.xml"); /// API request payload for a single model turn #[derive(Default, Debug, Clone)] pub struct Prompt { /// Conversation context input items. pub input: Vec<ResponseItem>, /// Tools available to the model, including additional tools sourced from /// external MCP servers. pub(crate) tools: Vec<ToolSpec>, /// Whether parallel tool calls are permitted for this prompt. pub(crate) parallel_tool_calls: bool, /// Optional override for the built-in BASE_INSTRUCTIONS. pub base_instructions_override: Option<String>, /// Optional the output schema for the model's response. pub output_schema: Option<Value>, } impl Prompt { pub(crate) fn get_full_instructions<'a>(&'a self, model: &'a ModelFamily) -> Cow<'a, str> { let base = self .base_instructions_override .as_deref() .unwrap_or(model.base_instructions.deref()); // When there are no custom instructions, add apply_patch_tool_instructions if: // - the model needs special instructions (4.1) // AND // - there is no apply_patch tool present let is_apply_patch_tool_present = self.tools.iter().any(|tool| match tool { ToolSpec::Function(f) => f.name == "apply_patch", ToolSpec::Freeform(f) => f.name == "apply_patch", _ => false, }); if self.base_instructions_override.is_none() && model.needs_special_apply_patch_instructions && !is_apply_patch_tool_present { Cow::Owned(format!("{base}\n{APPLY_PATCH_TOOL_INSTRUCTIONS}")) } else { Cow::Borrowed(base) } } pub(crate) fn get_formatted_input(&self) -> Vec<ResponseItem> { let mut input = self.input.clone(); // when using the *Freeform* apply_patch tool specifically, tool outputs // should be structured text, not json. Do NOT reserialize when using // the Function tool - note that this differs from the check above for // instructions. We declare the result as a named variable for clarity. let is_freeform_apply_patch_tool_present = self.tools.iter().any(|tool| match tool { ToolSpec::Freeform(f) => f.name == "apply_patch", _ => false, }); if is_freeform_apply_patch_tool_present { reserialize_shell_outputs(&mut input); } input } } fn reserialize_shell_outputs(items: &mut [ResponseItem]) { let mut shell_call_ids: HashSet<String> = HashSet::new(); items.iter_mut().for_each(|item| match item { ResponseItem::LocalShellCall { call_id, id, .. } => { if let Some(identifier) = call_id.clone().or_else(|| id.clone()) { shell_call_ids.insert(identifier); } } ResponseItem::CustomToolCall { id: _, status: _, call_id, name, input: _, } => { if name == "apply_patch" { shell_call_ids.insert(call_id.clone()); } } ResponseItem::CustomToolCallOutput { call_id, output } => { if shell_call_ids.remove(call_id) && let Some(structured) = parse_structured_shell_output(output) { *output = structured } } ResponseItem::FunctionCall { name, call_id, .. } if is_shell_tool_name(name) || name == "apply_patch" => { shell_call_ids.insert(call_id.clone()); } ResponseItem::FunctionCallOutput { call_id, output } => { if shell_call_ids.remove(call_id) && let Some(structured) = parse_structured_shell_output(&output.content) { output.content = structured } } _ => {} }) } fn is_shell_tool_name(name: &str) -> bool { matches!(name, "shell" | "container.exec") } #[derive(Deserialize)] struct ExecOutputJson { output: String, metadata: ExecOutputMetadataJson, } #[derive(Deserialize)] struct ExecOutputMetadataJson { exit_code: i32, duration_seconds: f32, } fn parse_structured_shell_output(raw: &str) -> Option<String> { let parsed: ExecOutputJson = serde_json::from_str(raw).ok()?; Some(build_structured_output(&parsed)) } fn build_structured_output(parsed: &ExecOutputJson) -> String { let mut sections = Vec::new(); sections.push(format!("Exit code: {}", parsed.metadata.exit_code)); sections.push(format!( "Wall time: {} seconds", parsed.metadata.duration_seconds )); let mut output = parsed.output.clone(); if let Some((stripped, total_lines)) = strip_total_output_header(&parsed.output) { sections.push(format!("Total output lines: {total_lines}")); output = stripped.to_string(); } sections.push("Output:".to_string()); sections.push(output); sections.join("\n") } fn strip_total_output_header(output: &str) -> Option<(&str, u32)> { let after_prefix = output.strip_prefix("Total output lines: ")?; let (total_segment, remainder) = after_prefix.split_once('\n')?; let total_lines = total_segment.parse::<u32>().ok()?; let remainder = remainder.strip_prefix('\n').unwrap_or(remainder); Some((remainder, total_lines)) } pub(crate) mod tools { use crate::tools::spec::JsonSchema; use serde::Deserialize; use serde::Serialize; /// When serialized as JSON, this produces a valid "Tool" in the OpenAI /// Responses API. #[derive(Debug, Clone, Serialize, PartialEq)] #[serde(tag = "type")] pub(crate) enum ToolSpec { #[serde(rename = "function")] Function(ResponsesApiTool), #[serde(rename = "local_shell")] LocalShell {}, // TODO: Understand why we get an error on web_search although the API docs say it's supported. // https://platform.openai.com/docs/guides/tools-web-search?api-mode=responses#:~:text=%7B%20type%3A%20%22web_search%22%20%7D%2C #[serde(rename = "web_search")] WebSearch {}, #[serde(rename = "custom")] Freeform(FreeformTool), } impl ToolSpec { pub(crate) fn name(&self) -> &str { match self { ToolSpec::Function(tool) => tool.name.as_str(), ToolSpec::LocalShell {} => "local_shell", ToolSpec::WebSearch {} => "web_search", ToolSpec::Freeform(tool) => tool.name.as_str(), } } } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct FreeformTool { pub(crate) name: String, pub(crate) description: String, pub(crate) format: FreeformToolFormat, } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct FreeformToolFormat { pub(crate) r#type: String, pub(crate) syntax: String, pub(crate) definition: String, } #[derive(Debug, Clone, Serialize, PartialEq)] pub struct ResponsesApiTool { pub(crate) name: String, pub(crate) description: String, /// TODO: Validation. When strict is set to true, the JSON schema, /// `required` and `additional_properties` must be present. All fields in /// `properties` must be present in `required`. pub(crate) strict: bool, pub(crate) parameters: JsonSchema, } } pub struct ResponseStream { pub(crate) rx_event: mpsc::Receiver<Result<ResponseEvent>>, } impl Stream for ResponseStream { type Item = Result<ResponseEvent>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { self.rx_event.poll_recv(cx) } } #[cfg(test)] mod tests { use codex_api::ResponsesApiRequest; use codex_api::common::OpenAiVerbosity; use codex_api::common::TextControls; use codex_api::create_text_param_for_request; use pretty_assertions::assert_eq; use crate::config::test_config; use crate::models_manager::manager::ModelsManager; use super::*; struct InstructionsTestCase { pub slug: &'static str, pub expects_apply_patch_instructions: bool, } #[test] fn get_full_instructions_no_user_content() { let prompt = Prompt { ..Default::default() }; let test_cases = vec![ InstructionsTestCase { slug: "gpt-3.5", expects_apply_patch_instructions: true, }, InstructionsTestCase { slug: "gpt-4.1", expects_apply_patch_instructions: true, }, InstructionsTestCase { slug: "gpt-4o", expects_apply_patch_instructions: true, }, InstructionsTestCase { slug: "gpt-5", expects_apply_patch_instructions: true, }, InstructionsTestCase { slug: "gpt-5.1", expects_apply_patch_instructions: false, }, InstructionsTestCase { slug: "codex-mini-latest", expects_apply_patch_instructions: true, }, InstructionsTestCase { slug: "gpt-oss:120b", expects_apply_patch_instructions: false, }, InstructionsTestCase { slug: "gpt-5.1-codex", expects_apply_patch_instructions: false, }, InstructionsTestCase { slug: "gpt-5.1-codex-max", expects_apply_patch_instructions: false, }, ]; for test_case in test_cases { let config = test_config(); let model_family = ModelsManager::construct_model_family_offline(test_case.slug, &config); let expected = if test_case.expects_apply_patch_instructions { format!( "{}\n{}", model_family.clone().base_instructions, APPLY_PATCH_TOOL_INSTRUCTIONS ) } else { model_family.clone().base_instructions }; let full = prompt.get_full_instructions(&model_family); assert_eq!(full, expected); } } #[test] fn serializes_text_verbosity_when_set() { let input: Vec<ResponseItem> = vec![]; let tools: Vec<serde_json::Value> = vec![]; let req = ResponsesApiRequest { model: "gpt-5.1", instructions: "i", input: &input, tools: &tools, tool_choice: "auto", parallel_tool_calls: true, reasoning: None, store: false, stream: true, include: vec![], prompt_cache_key: None, text: Some(TextControls { verbosity: Some(OpenAiVerbosity::Low), format: None, }), }; let v = serde_json::to_value(&req).expect("json"); assert_eq!( v.get("text") .and_then(|t| t.get("verbosity")) .and_then(|s| s.as_str()), Some("low") ); } #[test] fn serializes_text_schema_with_strict_format() { let input: Vec<ResponseItem> = vec![]; let tools: Vec<serde_json::Value> = vec![]; let schema = serde_json::json!({ "type": "object", "properties": { "answer": {"type": "string"} }, "required": ["answer"], }); let text_controls = create_text_param_for_request(None, &Some(schema.clone())).expect("text controls"); let req = ResponsesApiRequest { model: "gpt-5.1", instructions: "i", input: &input, tools: &tools, tool_choice: "auto", parallel_tool_calls: true, reasoning: None, store: false, stream: true, include: vec![], prompt_cache_key: None, text: Some(text_controls), }; let v = serde_json::to_value(&req).expect("json"); let text = v.get("text").expect("text field"); assert!(text.get("verbosity").is_none()); let format = text.get("format").expect("format field"); assert_eq!( format.get("name"), Some(&serde_json::Value::String("codex_output_schema".into())) ); assert_eq!( format.get("type"), Some(&serde_json::Value::String("json_schema".into())) ); assert_eq!(format.get("strict"), Some(&serde_json::Value::Bool(true))); assert_eq!(format.get("schema"), Some(&schema)); } #[test] fn omits_text_when_not_set() { let input: Vec<ResponseItem> = vec![]; let tools: Vec<serde_json::Value> = vec![]; let req = ResponsesApiRequest { model: "gpt-5.1", instructions: "i", input: &input, tools: &tools, tool_choice: "auto", parallel_tool_calls: true, reasoning: None, store: false, stream: true, include: vec![], prompt_cache_key: None, text: None, }; let v = serde_json::to_value(&req).expect("json"); assert!(v.get("text").is_none()); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/powershell.rs
codex-rs/core/src/powershell.rs
use std::path::PathBuf; #[cfg(any(windows, test))] use codex_utils_absolute_path::AbsolutePathBuf; use crate::shell::ShellType; use crate::shell::detect_shell_type; const POWERSHELL_FLAGS: &[&str] = &["-nologo", "-noprofile", "-command", "-c"]; /// Prefixed command for powershell shell calls to force UTF-8 console output. pub(crate) const UTF8_OUTPUT_PREFIX: &str = "[Console]::OutputEncoding=[System.Text.Encoding]::UTF8;\n"; pub(crate) fn prefix_powershell_script_with_utf8(command: &[String]) -> Vec<String> { let Some((_, script)) = extract_powershell_command(command) else { return command.to_vec(); }; let trimmed = script.trim_start(); let script = if trimmed.starts_with(UTF8_OUTPUT_PREFIX) { script.to_string() } else { format!("{UTF8_OUTPUT_PREFIX}{script}") }; let mut command: Vec<String> = command[..(command.len() - 1)] .iter() .map(std::string::ToString::to_string) .collect(); command.push(script); command } /// Extract the PowerShell script body from an invocation such as: /// /// - ["pwsh", "-NoProfile", "-Command", "Get-ChildItem -Recurse | Select-String foo"] /// - ["powershell.exe", "-Command", "Write-Host hi"] /// - ["powershell", "-NoLogo", "-NoProfile", "-Command", "...script..."] /// /// Returns (`shell`, `script`) when the first arg is a PowerShell executable and a /// `-Command` (or `-c`) flag is present followed by a script string. pub fn extract_powershell_command(command: &[String]) -> Option<(&str, &str)> { if command.len() < 3 { return None; } let shell = &command[0]; if !matches!( detect_shell_type(&PathBuf::from(shell)), Some(ShellType::PowerShell) ) { return None; } // Find the first occurrence of -Command (accept common short alias -c as well) let mut i = 1usize; while i + 1 < command.len() { let flag = &command[i]; // Reject unknown flags if !POWERSHELL_FLAGS.contains(&flag.to_ascii_lowercase().as_str()) { return None; } if flag.eq_ignore_ascii_case("-Command") || flag.eq_ignore_ascii_case("-c") { let script = &command[i + 1]; return Some((shell, script)); } i += 1; } None } /// This function attempts to find a valid PowerShell executable on the system. /// It first tries to find pwsh.exe, and if that fails, it tries to find /// powershell.exe. #[cfg(windows)] #[allow(dead_code)] pub(crate) fn try_find_powershellish_executable_blocking() -> Option<AbsolutePathBuf> { if let Some(pwsh_path) = try_find_pwsh_executable_blocking() { Some(pwsh_path) } else { try_find_powershell_executable_blocking() } } /// This function attempts to find a powershell.exe executable on the system. #[cfg(any(windows, test))] pub(crate) fn try_find_powershell_executable_blocking() -> Option<AbsolutePathBuf> { try_find_powershellish_executable_in_path(&["powershell.exe"]) } /// This function attempts to find a pwsh.exe executable on the system. /// Note that pwsh.exe and powershell.exe are different executables: /// /// - pwsh.exe is the cross-platform PowerShell Core (v6+) executable /// - powershell.exe is the Windows PowerShell (v5.1 and earlier) executable /// /// Further, while powershell.exe is included by default on Windows systems, /// pwsh.exe must be installed separately by the user. And even when the user /// has installed pwsh.exe, it may not be available in the system PATH, in which /// case we attempt to locate it via other means. #[cfg(any(windows, test))] pub(crate) fn try_find_pwsh_executable_blocking() -> Option<AbsolutePathBuf> { if let Some(ps_home) = std::process::Command::new("cmd") .args(["/C", "pwsh", "-NoProfile", "-Command", "$PSHOME"]) .output() .ok() .and_then(|out| { if !out.status.success() { return None; } let stdout = String::from_utf8_lossy(&out.stdout); let trimmed = stdout.trim(); (!trimmed.is_empty()).then(|| trimmed.to_string()) }) { let candidate = AbsolutePathBuf::resolve_path_against_base("pwsh.exe", &ps_home); if let Ok(candidate_abs_path) = candidate && is_powershellish_executable_available(candidate_abs_path.as_path()) { return Some(candidate_abs_path); } } try_find_powershellish_executable_in_path(&["pwsh.exe"]) } #[cfg(any(windows, test))] fn try_find_powershellish_executable_in_path(candidates: &[&str]) -> Option<AbsolutePathBuf> { for candidate in candidates { let Ok(resolved_path) = which::which(candidate) else { continue; }; if !is_powershellish_executable_available(&resolved_path) { continue; } let Ok(abs_path) = AbsolutePathBuf::from_absolute_path(resolved_path) else { continue; }; return Some(abs_path); } None } #[cfg(any(windows, test))] fn is_powershellish_executable_available(powershell_or_pwsh_exe: &std::path::Path) -> bool { // This test works for both powershell.exe and pwsh.exe. std::process::Command::new(powershell_or_pwsh_exe) .args(["-NoLogo", "-NoProfile", "-Command", "Write-Output ok"]) .output() .map(|output| output.status.success()) .unwrap_or(false) } #[cfg(test)] mod tests { use super::extract_powershell_command; #[test] fn extracts_basic_powershell_command() { let cmd = vec![ "powershell".to_string(), "-Command".to_string(), "Write-Host hi".to_string(), ]; let (_shell, script) = extract_powershell_command(&cmd).expect("extract"); assert_eq!(script, "Write-Host hi"); } #[test] fn extracts_lowercase_flags() { let cmd = vec![ "powershell".to_string(), "-nologo".to_string(), "-command".to_string(), "Write-Host hi".to_string(), ]; let (_shell, script) = extract_powershell_command(&cmd).expect("extract"); assert_eq!(script, "Write-Host hi"); } #[test] fn extracts_full_path_powershell_command() { let command = if cfg!(windows) { "C:\\windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe".to_string() } else { "/usr/local/bin/powershell.exe".to_string() }; let cmd = vec![command, "-Command".to_string(), "Write-Host hi".to_string()]; let (_shell, script) = extract_powershell_command(&cmd).expect("extract"); assert_eq!(script, "Write-Host hi"); } #[test] fn extracts_with_noprofile_and_alias() { let cmd = vec![ "pwsh".to_string(), "-NoProfile".to_string(), "-c".to_string(), "Get-ChildItem | Select-String foo".to_string(), ]; let (_shell, script) = extract_powershell_command(&cmd).expect("extract"); assert_eq!(script, "Get-ChildItem | Select-String foo"); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/truncate.rs
codex-rs/core/src/truncate.rs
//! Utilities for truncating large chunks of output while preserving a prefix //! and suffix on UTF-8 boundaries, and helpers for line/token‑based truncation //! used across the core crate. use crate::config::Config; use codex_protocol::models::FunctionCallOutputContentItem; use codex_protocol::openai_models::TruncationMode; use codex_protocol::openai_models::TruncationPolicyConfig; use codex_protocol::protocol::TruncationPolicy as ProtocolTruncationPolicy; const APPROX_BYTES_PER_TOKEN: usize = 4; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum TruncationPolicy { Bytes(usize), Tokens(usize), } impl From<TruncationPolicy> for ProtocolTruncationPolicy { fn from(value: TruncationPolicy) -> Self { match value { TruncationPolicy::Bytes(bytes) => Self::Bytes(bytes), TruncationPolicy::Tokens(tokens) => Self::Tokens(tokens), } } } impl From<TruncationPolicyConfig> for TruncationPolicy { fn from(config: TruncationPolicyConfig) -> Self { match config.mode { TruncationMode::Bytes => Self::Bytes(config.limit as usize), TruncationMode::Tokens => Self::Tokens(config.limit as usize), } } } impl TruncationPolicy { /// Scale the underlying budget by `multiplier`, rounding up to avoid under-budgeting. pub fn mul(self, multiplier: f64) -> Self { match self { TruncationPolicy::Bytes(bytes) => { TruncationPolicy::Bytes((bytes as f64 * multiplier).ceil() as usize) } TruncationPolicy::Tokens(tokens) => { TruncationPolicy::Tokens((tokens as f64 * multiplier).ceil() as usize) } } } pub fn new(config: &Config, truncation_policy: TruncationPolicy) -> Self { let config_token_limit = config.tool_output_token_limit; match truncation_policy { TruncationPolicy::Bytes(family_bytes) => { if let Some(token_limit) = config_token_limit { Self::Bytes(approx_bytes_for_tokens(token_limit)) } else { Self::Bytes(family_bytes) } } TruncationPolicy::Tokens(family_tokens) => { if let Some(token_limit) = config_token_limit { Self::Tokens(token_limit) } else { Self::Tokens(family_tokens) } } } } /// Returns a token budget derived from this policy. /// /// - For `Tokens`, this is the explicit token limit. /// - For `Bytes`, this is an approximate token budget using the global /// bytes-per-token heuristic. pub fn token_budget(&self) -> usize { match self { TruncationPolicy::Bytes(bytes) => { usize::try_from(approx_tokens_from_byte_count(*bytes)).unwrap_or(usize::MAX) } TruncationPolicy::Tokens(tokens) => *tokens, } } /// Returns a byte budget derived from this policy. /// /// - For `Bytes`, this is the explicit byte limit. /// - For `Tokens`, this is an approximate byte budget using the global /// bytes-per-token heuristic. pub fn byte_budget(&self) -> usize { match self { TruncationPolicy::Bytes(bytes) => *bytes, TruncationPolicy::Tokens(tokens) => approx_bytes_for_tokens(*tokens), } } } pub(crate) fn formatted_truncate_text(content: &str, policy: TruncationPolicy) -> String { if content.len() <= policy.byte_budget() { return content.to_string(); } let total_lines = content.lines().count(); let result = truncate_text(content, policy); format!("Total output lines: {total_lines}\n\n{result}") } pub(crate) fn truncate_text(content: &str, policy: TruncationPolicy) -> String { match policy { TruncationPolicy::Bytes(_) => truncate_with_byte_estimate(content, policy), TruncationPolicy::Tokens(_) => { let (truncated, _) = truncate_with_token_budget(content, policy); truncated } } } /// Globally truncate function output items to fit within the given /// truncation policy's budget, preserving as many text/image items as /// possible and appending a summary for any omitted text items. pub(crate) fn truncate_function_output_items_with_policy( items: &[FunctionCallOutputContentItem], policy: TruncationPolicy, ) -> Vec<FunctionCallOutputContentItem> { let mut out: Vec<FunctionCallOutputContentItem> = Vec::with_capacity(items.len()); let mut remaining_budget = match policy { TruncationPolicy::Bytes(_) => policy.byte_budget(), TruncationPolicy::Tokens(_) => policy.token_budget(), }; let mut omitted_text_items = 0usize; for it in items { match it { FunctionCallOutputContentItem::InputText { text } => { if remaining_budget == 0 { omitted_text_items += 1; continue; } let cost = match policy { TruncationPolicy::Bytes(_) => text.len(), TruncationPolicy::Tokens(_) => approx_token_count(text), }; if cost <= remaining_budget { out.push(FunctionCallOutputContentItem::InputText { text: text.clone() }); remaining_budget = remaining_budget.saturating_sub(cost); } else { let snippet_policy = match policy { TruncationPolicy::Bytes(_) => TruncationPolicy::Bytes(remaining_budget), TruncationPolicy::Tokens(_) => TruncationPolicy::Tokens(remaining_budget), }; let snippet = truncate_text(text, snippet_policy); if snippet.is_empty() { omitted_text_items += 1; } else { out.push(FunctionCallOutputContentItem::InputText { text: snippet }); } remaining_budget = 0; } } FunctionCallOutputContentItem::InputImage { image_url } => { out.push(FunctionCallOutputContentItem::InputImage { image_url: image_url.clone(), }); } } } if omitted_text_items > 0 { out.push(FunctionCallOutputContentItem::InputText { text: format!("[omitted {omitted_text_items} text items ...]"), }); } out } /// Truncate the middle of a UTF-8 string to at most `max_tokens` tokens, /// preserving the beginning and the end. Returns the possibly truncated string /// and `Some(original_token_count)` if truncation occurred; otherwise returns /// the original string and `None`. fn truncate_with_token_budget(s: &str, policy: TruncationPolicy) -> (String, Option<u64>) { if s.is_empty() { return (String::new(), None); } let max_tokens = policy.token_budget(); let byte_len = s.len(); if max_tokens > 0 && byte_len <= approx_bytes_for_tokens(max_tokens) { return (s.to_string(), None); } let truncated = truncate_with_byte_estimate(s, policy); let approx_total_usize = approx_token_count(s); let approx_total = u64::try_from(approx_total_usize).unwrap_or(u64::MAX); if truncated == s { (truncated, None) } else { (truncated, Some(approx_total)) } } /// Truncate a string using a byte budget derived from the token budget, without /// performing any real tokenization. This keeps the logic purely byte-based and /// uses a bytes placeholder in the truncated output. fn truncate_with_byte_estimate(s: &str, policy: TruncationPolicy) -> String { if s.is_empty() { return String::new(); } let total_chars = s.chars().count(); let max_bytes = policy.byte_budget(); if max_bytes == 0 { // No budget to show content; just report that everything was truncated. let marker = format_truncation_marker( policy, removed_units_for_source(policy, s.len(), total_chars), ); return marker; } if s.len() <= max_bytes { return s.to_string(); } let total_bytes = s.len(); let (left_budget, right_budget) = split_budget(max_bytes); let (removed_chars, left, right) = split_string(s, left_budget, right_budget); let marker = format_truncation_marker( policy, removed_units_for_source(policy, total_bytes.saturating_sub(max_bytes), removed_chars), ); assemble_truncated_output(left, right, &marker) } fn split_string(s: &str, beginning_bytes: usize, end_bytes: usize) -> (usize, &str, &str) { if s.is_empty() { return (0, "", ""); } let len = s.len(); let tail_start_target = len.saturating_sub(end_bytes); let mut prefix_end = 0usize; let mut suffix_start = len; let mut removed_chars = 0usize; let mut suffix_started = false; for (idx, ch) in s.char_indices() { let char_end = idx + ch.len_utf8(); if char_end <= beginning_bytes { prefix_end = char_end; continue; } if idx >= tail_start_target { if !suffix_started { suffix_start = idx; suffix_started = true; } continue; } removed_chars = removed_chars.saturating_add(1); } if suffix_start < prefix_end { suffix_start = prefix_end; } let before = &s[..prefix_end]; let after = &s[suffix_start..]; (removed_chars, before, after) } fn format_truncation_marker(policy: TruncationPolicy, removed_count: u64) -> String { match policy { TruncationPolicy::Tokens(_) => format!("…{removed_count} tokens truncated…"), TruncationPolicy::Bytes(_) => format!("…{removed_count} chars truncated…"), } } fn split_budget(budget: usize) -> (usize, usize) { let left = budget / 2; (left, budget - left) } fn removed_units_for_source( policy: TruncationPolicy, removed_bytes: usize, removed_chars: usize, ) -> u64 { match policy { TruncationPolicy::Tokens(_) => approx_tokens_from_byte_count(removed_bytes), TruncationPolicy::Bytes(_) => u64::try_from(removed_chars).unwrap_or(u64::MAX), } } fn assemble_truncated_output(prefix: &str, suffix: &str, marker: &str) -> String { let mut out = String::with_capacity(prefix.len() + marker.len() + suffix.len() + 1); out.push_str(prefix); out.push_str(marker); out.push_str(suffix); out } pub(crate) fn approx_token_count(text: &str) -> usize { let len = text.len(); len.saturating_add(APPROX_BYTES_PER_TOKEN.saturating_sub(1)) / APPROX_BYTES_PER_TOKEN } fn approx_bytes_for_tokens(tokens: usize) -> usize { tokens.saturating_mul(APPROX_BYTES_PER_TOKEN) } pub(crate) fn approx_tokens_from_byte_count(bytes: usize) -> u64 { let bytes_u64 = bytes as u64; bytes_u64.saturating_add((APPROX_BYTES_PER_TOKEN as u64).saturating_sub(1)) / (APPROX_BYTES_PER_TOKEN as u64) } #[cfg(test)] mod tests { use super::TruncationPolicy; use super::approx_token_count; use super::formatted_truncate_text; use super::split_string; use super::truncate_function_output_items_with_policy; use super::truncate_text; use super::truncate_with_token_budget; use codex_protocol::models::FunctionCallOutputContentItem; use pretty_assertions::assert_eq; #[test] fn split_string_works() { assert_eq!(split_string("hello world", 5, 5), (1, "hello", "world")); assert_eq!(split_string("abc", 0, 0), (3, "", "")); } #[test] fn split_string_handles_empty_string() { assert_eq!(split_string("", 4, 4), (0, "", "")); } #[test] fn split_string_only_keeps_prefix_when_tail_budget_is_zero() { assert_eq!(split_string("abcdef", 3, 0), (3, "abc", "")); } #[test] fn split_string_only_keeps_suffix_when_prefix_budget_is_zero() { assert_eq!(split_string("abcdef", 0, 3), (3, "", "def")); } #[test] fn split_string_handles_overlapping_budgets_without_removal() { assert_eq!(split_string("abcdef", 4, 4), (0, "abcd", "ef")); } #[test] fn split_string_respects_utf8_boundaries() { assert_eq!(split_string("😀abc😀", 5, 5), (1, "😀a", "c😀")); assert_eq!(split_string("😀😀😀😀😀", 1, 1), (5, "", "")); assert_eq!(split_string("😀😀😀😀😀", 7, 7), (3, "😀", "😀")); assert_eq!(split_string("😀😀😀😀😀", 8, 8), (1, "😀😀", "😀😀")); } #[test] fn truncate_bytes_less_than_placeholder_returns_placeholder() { let content = "example output"; assert_eq!( "Total output lines: 1\n\n…13 chars truncated…t", formatted_truncate_text(content, TruncationPolicy::Bytes(1)), ); } #[test] fn truncate_tokens_less_than_placeholder_returns_placeholder() { let content = "example output"; assert_eq!( "Total output lines: 1\n\nex…3 tokens truncated…ut", formatted_truncate_text(content, TruncationPolicy::Tokens(1)), ); } #[test] fn truncate_tokens_under_limit_returns_original() { let content = "example output"; assert_eq!( content, formatted_truncate_text(content, TruncationPolicy::Tokens(10)), ); } #[test] fn truncate_bytes_under_limit_returns_original() { let content = "example output"; assert_eq!( content, formatted_truncate_text(content, TruncationPolicy::Bytes(20)), ); } #[test] fn truncate_tokens_over_limit_returns_truncated() { let content = "this is an example of a long output that should be truncated"; assert_eq!( "Total output lines: 1\n\nthis is an…10 tokens truncated… truncated", formatted_truncate_text(content, TruncationPolicy::Tokens(5)), ); } #[test] fn truncate_bytes_over_limit_returns_truncated() { let content = "this is an example of a long output that should be truncated"; assert_eq!( "Total output lines: 1\n\nthis is an exam…30 chars truncated…ld be truncated", formatted_truncate_text(content, TruncationPolicy::Bytes(30)), ); } #[test] fn truncate_bytes_reports_original_line_count_when_truncated() { let content = "this is an example of a long output that should be truncated\nalso some other line"; assert_eq!( "Total output lines: 2\n\nthis is an exam…51 chars truncated…some other line", formatted_truncate_text(content, TruncationPolicy::Bytes(30)), ); } #[test] fn truncate_tokens_reports_original_line_count_when_truncated() { let content = "this is an example of a long output that should be truncated\nalso some other line"; assert_eq!( "Total output lines: 2\n\nthis is an example o…11 tokens truncated…also some other line", formatted_truncate_text(content, TruncationPolicy::Tokens(10)), ); } #[test] fn truncate_with_token_budget_returns_original_when_under_limit() { let s = "short output"; let limit = 100; let (out, original) = truncate_with_token_budget(s, TruncationPolicy::Tokens(limit)); assert_eq!(out, s); assert_eq!(original, None); } #[test] fn truncate_with_token_budget_reports_truncation_at_zero_limit() { let s = "abcdef"; let (out, original) = truncate_with_token_budget(s, TruncationPolicy::Tokens(0)); assert_eq!(out, "…2 tokens truncated…"); assert_eq!(original, Some(2)); } #[test] fn truncate_middle_tokens_handles_utf8_content() { let s = "😀😀😀😀😀😀😀😀😀😀\nsecond line with text\n"; let (out, tokens) = truncate_with_token_budget(s, TruncationPolicy::Tokens(8)); assert_eq!(out, "😀😀😀😀…8 tokens truncated… line with text\n"); assert_eq!(tokens, Some(16)); } #[test] fn truncate_middle_bytes_handles_utf8_content() { let s = "😀😀😀😀😀😀😀😀😀😀\nsecond line with text\n"; let out = truncate_text(s, TruncationPolicy::Bytes(20)); assert_eq!(out, "😀😀…21 chars truncated…with text\n"); } #[test] fn truncates_across_multiple_under_limit_texts_and_reports_omitted() { let chunk = "alpha beta gamma delta epsilon zeta eta theta iota kappa lambda mu nu xi omicron pi rho sigma tau upsilon phi chi psi omega.\n"; let chunk_tokens = approx_token_count(chunk); assert!(chunk_tokens > 0, "chunk must consume tokens"); let limit = chunk_tokens * 3; let t1 = chunk.to_string(); let t2 = chunk.to_string(); let t3 = chunk.repeat(10); let t4 = chunk.to_string(); let t5 = chunk.to_string(); let items = vec![ FunctionCallOutputContentItem::InputText { text: t1.clone() }, FunctionCallOutputContentItem::InputText { text: t2.clone() }, FunctionCallOutputContentItem::InputImage { image_url: "img:mid".to_string(), }, FunctionCallOutputContentItem::InputText { text: t3 }, FunctionCallOutputContentItem::InputText { text: t4 }, FunctionCallOutputContentItem::InputText { text: t5 }, ]; let output = truncate_function_output_items_with_policy(&items, TruncationPolicy::Tokens(limit)); // Expect: t1 (full), t2 (full), image, t3 (truncated), summary mentioning 2 omitted. assert_eq!(output.len(), 5); let first_text = match &output[0] { FunctionCallOutputContentItem::InputText { text } => text, other => panic!("unexpected first item: {other:?}"), }; assert_eq!(first_text, &t1); let second_text = match &output[1] { FunctionCallOutputContentItem::InputText { text } => text, other => panic!("unexpected second item: {other:?}"), }; assert_eq!(second_text, &t2); assert_eq!( output[2], FunctionCallOutputContentItem::InputImage { image_url: "img:mid".to_string() } ); let fourth_text = match &output[3] { FunctionCallOutputContentItem::InputText { text } => text, other => panic!("unexpected fourth item: {other:?}"), }; assert!( fourth_text.contains("tokens truncated"), "expected marker in truncated snippet: {fourth_text}" ); let summary_text = match &output[4] { FunctionCallOutputContentItem::InputText { text } => text, other => panic!("unexpected summary item: {other:?}"), }; assert!(summary_text.contains("omitted 2 text items")); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/api_bridge.rs
codex-rs/core/src/api_bridge.rs
use chrono::DateTime; use chrono::Utc; use codex_api::AuthProvider as ApiAuthProvider; use codex_api::TransportError; use codex_api::error::ApiError; use codex_api::rate_limits::parse_rate_limit; use http::HeaderMap; use serde::Deserialize; use crate::auth::CodexAuth; use crate::error::CodexErr; use crate::error::RetryLimitReachedError; use crate::error::UnexpectedResponseError; use crate::error::UsageLimitReachedError; use crate::model_provider_info::ModelProviderInfo; use crate::token_data::PlanType; pub(crate) fn map_api_error(err: ApiError) -> CodexErr { match err { ApiError::ContextWindowExceeded => CodexErr::ContextWindowExceeded, ApiError::QuotaExceeded => CodexErr::QuotaExceeded, ApiError::UsageNotIncluded => CodexErr::UsageNotIncluded, ApiError::Retryable { message, delay } => CodexErr::Stream(message, delay), ApiError::Stream(msg) => CodexErr::Stream(msg, None), ApiError::Api { status, message } => CodexErr::UnexpectedStatus(UnexpectedResponseError { status, body: message, request_id: None, }), ApiError::Transport(transport) => match transport { TransportError::Http { status, headers, body, } => { let body_text = body.unwrap_or_default(); if status == http::StatusCode::BAD_REQUEST { if body_text .contains("The image data you provided does not represent a valid image") { CodexErr::InvalidImageRequest() } else { CodexErr::InvalidRequest(body_text) } } else if status == http::StatusCode::INTERNAL_SERVER_ERROR { CodexErr::InternalServerError } else if status == http::StatusCode::TOO_MANY_REQUESTS { if let Ok(err) = serde_json::from_str::<UsageErrorResponse>(&body_text) { if err.error.error_type.as_deref() == Some("usage_limit_reached") { let rate_limits = headers.as_ref().and_then(parse_rate_limit); let resets_at = err .error .resets_at .and_then(|seconds| DateTime::<Utc>::from_timestamp(seconds, 0)); return CodexErr::UsageLimitReached(UsageLimitReachedError { plan_type: err.error.plan_type, resets_at, rate_limits, }); } else if err.error.error_type.as_deref() == Some("usage_not_included") { return CodexErr::UsageNotIncluded; } } CodexErr::RetryLimit(RetryLimitReachedError { status, request_id: extract_request_id(headers.as_ref()), }) } else { CodexErr::UnexpectedStatus(UnexpectedResponseError { status, body: body_text, request_id: extract_request_id(headers.as_ref()), }) } } TransportError::RetryLimit => CodexErr::RetryLimit(RetryLimitReachedError { status: http::StatusCode::INTERNAL_SERVER_ERROR, request_id: None, }), TransportError::Timeout => CodexErr::Timeout, TransportError::Network(msg) | TransportError::Build(msg) => { CodexErr::Stream(msg, None) } }, ApiError::RateLimit(msg) => CodexErr::Stream(msg, None), } } fn extract_request_id(headers: Option<&HeaderMap>) -> Option<String> { headers.and_then(|map| { ["cf-ray", "x-request-id", "x-oai-request-id"] .iter() .find_map(|name| { map.get(*name) .and_then(|v| v.to_str().ok()) .map(str::to_string) }) }) } pub(crate) async fn auth_provider_from_auth( auth: Option<CodexAuth>, provider: &ModelProviderInfo, ) -> crate::error::Result<CoreAuthProvider> { if let Some(api_key) = provider.api_key()? { return Ok(CoreAuthProvider { token: Some(api_key), account_id: None, }); } if let Some(token) = provider.experimental_bearer_token.clone() { return Ok(CoreAuthProvider { token: Some(token), account_id: None, }); } if let Some(auth) = auth { let token = auth.get_token().await?; Ok(CoreAuthProvider { token: Some(token), account_id: auth.get_account_id(), }) } else { Ok(CoreAuthProvider { token: None, account_id: None, }) } } #[derive(Debug, Deserialize)] struct UsageErrorResponse { error: UsageErrorBody, } #[derive(Debug, Deserialize)] struct UsageErrorBody { #[serde(rename = "type")] error_type: Option<String>, plan_type: Option<PlanType>, resets_at: Option<i64>, } #[derive(Clone, Default)] pub(crate) struct CoreAuthProvider { token: Option<String>, account_id: Option<String>, } impl ApiAuthProvider for CoreAuthProvider { fn bearer_token(&self) -> Option<String> { self.token.clone() } fn account_id(&self) -> Option<String> { self.account_id.clone() } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/features.rs
codex-rs/core/src/features.rs
//! Centralized feature flags and metadata. //! //! This module defines a small set of toggles that gate experimental and //! optional behavior across the codebase. Instead of wiring individual //! booleans through multiple types, call sites consult a single `Features` //! container attached to `Config`. use crate::config::ConfigToml; use crate::config::profile::ConfigProfile; use serde::Deserialize; use serde::Serialize; use std::collections::BTreeMap; use std::collections::BTreeSet; mod legacy; pub(crate) use legacy::LegacyFeatureToggles; /// High-level lifecycle stage for a feature. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Stage { Experimental, Beta { name: &'static str, menu_description: &'static str, announcement: &'static str, }, Stable, Deprecated, Removed, } impl Stage { pub fn beta_menu_name(self) -> Option<&'static str> { match self { Stage::Beta { name, .. } => Some(name), _ => None, } } pub fn beta_menu_description(self) -> Option<&'static str> { match self { Stage::Beta { menu_description, .. } => Some(menu_description), _ => None, } } pub fn beta_announcement(self) -> Option<&'static str> { match self { Stage::Beta { announcement, .. } => Some(announcement), _ => None, } } } /// Unique features toggled via configuration. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum Feature { // Stable. /// Create a ghost commit at each turn. GhostCommit, /// Include the view_image tool. ViewImageTool, /// Send warnings to the model to correct it on the tool usage. ModelWarnings, /// Enable the default shell tool. ShellTool, // Experimental /// Use the single unified PTY-backed exec tool. UnifiedExec, /// Include the freeform apply_patch tool. ApplyPatchFreeform, /// Allow the model to request web searches. WebSearchRequest, /// Gate the execpolicy enforcement for shell/unified exec. ExecPolicy, /// Enable Windows sandbox (restricted token) on Windows. WindowsSandbox, /// Use the elevated Windows sandbox pipeline (setup + runner). WindowsSandboxElevated, /// Remote compaction enabled (only for ChatGPT auth) RemoteCompaction, /// Refresh remote models and emit AppReady once the list is available. RemoteModels, /// Allow model to call multiple tools in parallel (only for models supporting it). ParallelToolCalls, /// Experimental shell snapshotting. ShellSnapshot, /// Experimental TUI v2 (viewport) implementation. Tui2, /// Enable discovery and injection of skills. Skills, /// Enforce UTF8 output in Powershell. PowershellUtf8, } impl Feature { pub fn key(self) -> &'static str { self.info().key } pub fn stage(self) -> Stage { self.info().stage } pub fn default_enabled(self) -> bool { self.info().default_enabled } fn info(self) -> &'static FeatureSpec { FEATURES .iter() .find(|spec| spec.id == self) .unwrap_or_else(|| unreachable!("missing FeatureSpec for {:?}", self)) } } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct LegacyFeatureUsage { pub alias: String, pub feature: Feature, } /// Holds the effective set of enabled features. #[derive(Debug, Clone, Default, PartialEq)] pub struct Features { enabled: BTreeSet<Feature>, legacy_usages: BTreeSet<LegacyFeatureUsage>, } #[derive(Debug, Clone, Default)] pub struct FeatureOverrides { pub include_apply_patch_tool: Option<bool>, pub web_search_request: Option<bool>, } impl FeatureOverrides { fn apply(self, features: &mut Features) { LegacyFeatureToggles { include_apply_patch_tool: self.include_apply_patch_tool, tools_web_search: self.web_search_request, ..Default::default() } .apply(features); } } impl Features { /// Starts with built-in defaults. pub fn with_defaults() -> Self { let mut set = BTreeSet::new(); for spec in FEATURES { if spec.default_enabled { set.insert(spec.id); } } Self { enabled: set, legacy_usages: BTreeSet::new(), } } pub fn enabled(&self, f: Feature) -> bool { self.enabled.contains(&f) } pub fn enable(&mut self, f: Feature) -> &mut Self { self.enabled.insert(f); self } pub fn disable(&mut self, f: Feature) -> &mut Self { self.enabled.remove(&f); self } pub fn record_legacy_usage_force(&mut self, alias: &str, feature: Feature) { self.legacy_usages.insert(LegacyFeatureUsage { alias: alias.to_string(), feature, }); } pub fn record_legacy_usage(&mut self, alias: &str, feature: Feature) { if alias == feature.key() { return; } self.record_legacy_usage_force(alias, feature); } pub fn legacy_feature_usages(&self) -> impl Iterator<Item = (&str, Feature)> + '_ { self.legacy_usages .iter() .map(|usage| (usage.alias.as_str(), usage.feature)) } /// Apply a table of key -> bool toggles (e.g. from TOML). pub fn apply_map(&mut self, m: &BTreeMap<String, bool>) { for (k, v) in m { match feature_for_key(k) { Some(feat) => { if k != feat.key() { self.record_legacy_usage(k.as_str(), feat); } if *v { self.enable(feat); } else { self.disable(feat); } } None => { tracing::warn!("unknown feature key in config: {k}"); } } } } pub fn from_config( cfg: &ConfigToml, config_profile: &ConfigProfile, overrides: FeatureOverrides, ) -> Self { let mut features = Features::with_defaults(); let base_legacy = LegacyFeatureToggles { experimental_use_freeform_apply_patch: cfg.experimental_use_freeform_apply_patch, experimental_use_unified_exec_tool: cfg.experimental_use_unified_exec_tool, tools_web_search: cfg.tools.as_ref().and_then(|t| t.web_search), tools_view_image: cfg.tools.as_ref().and_then(|t| t.view_image), ..Default::default() }; base_legacy.apply(&mut features); if let Some(base_features) = cfg.features.as_ref() { features.apply_map(&base_features.entries); } let profile_legacy = LegacyFeatureToggles { include_apply_patch_tool: config_profile.include_apply_patch_tool, experimental_use_freeform_apply_patch: config_profile .experimental_use_freeform_apply_patch, experimental_use_unified_exec_tool: config_profile.experimental_use_unified_exec_tool, tools_web_search: config_profile.tools_web_search, tools_view_image: config_profile.tools_view_image, }; profile_legacy.apply(&mut features); if let Some(profile_features) = config_profile.features.as_ref() { features.apply_map(&profile_features.entries); } overrides.apply(&mut features); features } pub fn enabled_features(&self) -> Vec<Feature> { self.enabled.iter().copied().collect() } } /// Keys accepted in `[features]` tables. fn feature_for_key(key: &str) -> Option<Feature> { for spec in FEATURES { if spec.key == key { return Some(spec.id); } } legacy::feature_for_key(key) } /// Returns `true` if the provided string matches a known feature toggle key. pub fn is_known_feature_key(key: &str) -> bool { feature_for_key(key).is_some() } /// Deserializable features table for TOML. #[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] pub struct FeaturesToml { #[serde(flatten)] pub entries: BTreeMap<String, bool>, } /// Single, easy-to-read registry of all feature definitions. #[derive(Debug, Clone, Copy)] pub struct FeatureSpec { pub id: Feature, pub key: &'static str, pub stage: Stage, pub default_enabled: bool, } pub const FEATURES: &[FeatureSpec] = &[ // Stable features. FeatureSpec { id: Feature::GhostCommit, key: "undo", stage: Stage::Stable, default_enabled: false, }, FeatureSpec { id: Feature::ParallelToolCalls, key: "parallel", stage: Stage::Stable, default_enabled: true, }, FeatureSpec { id: Feature::ViewImageTool, key: "view_image_tool", stage: Stage::Stable, default_enabled: true, }, FeatureSpec { id: Feature::ShellTool, key: "shell_tool", stage: Stage::Stable, default_enabled: true, }, FeatureSpec { id: Feature::ModelWarnings, key: "warnings", stage: Stage::Stable, default_enabled: true, }, FeatureSpec { id: Feature::WebSearchRequest, key: "web_search_request", stage: Stage::Stable, default_enabled: false, }, // Beta program. Rendered in the `/experimental` menu for users. FeatureSpec { id: Feature::UnifiedExec, key: "unified_exec", stage: Stage::Beta { name: "Background terminal", menu_description: "Run long-running terminal commands in the background.", announcement: "NEW! Try Background terminals for long running processes. Enable in /experimental!", }, default_enabled: false, }, FeatureSpec { id: Feature::ShellSnapshot, key: "shell_snapshot", stage: Stage::Beta { name: "Shell snapshot", menu_description: "Snapshot your shell environment to avoid re-running login scripts for every command.", announcement: "NEW! Try shell snapshotting to make your Codex faster. Enable in /experimental!", }, default_enabled: false, }, FeatureSpec { id: Feature::ApplyPatchFreeform, key: "apply_patch_freeform", stage: Stage::Experimental, default_enabled: false, }, FeatureSpec { id: Feature::ExecPolicy, key: "exec_policy", stage: Stage::Experimental, default_enabled: true, }, FeatureSpec { id: Feature::WindowsSandbox, key: "experimental_windows_sandbox", stage: Stage::Experimental, default_enabled: false, }, FeatureSpec { id: Feature::WindowsSandboxElevated, key: "elevated_windows_sandbox", stage: Stage::Experimental, default_enabled: false, }, FeatureSpec { id: Feature::RemoteCompaction, key: "remote_compaction", stage: Stage::Experimental, default_enabled: true, }, FeatureSpec { id: Feature::RemoteModels, key: "remote_models", stage: Stage::Experimental, default_enabled: false, }, FeatureSpec { id: Feature::Skills, key: "skills", stage: Stage::Experimental, default_enabled: true, }, FeatureSpec { id: Feature::PowershellUtf8, key: "powershell_utf8", stage: Stage::Experimental, default_enabled: false, }, FeatureSpec { id: Feature::Tui2, key: "tui2", stage: Stage::Experimental, default_enabled: false, }, ];
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/otel_init.rs
codex-rs/core/src/otel_init.rs
use crate::config::Config; use crate::config::types::OtelExporterKind as Kind; use crate::config::types::OtelHttpProtocol as Protocol; use crate::default_client::originator; use codex_otel::config::OtelExporter; use codex_otel::config::OtelHttpProtocol; use codex_otel::config::OtelSettings; use codex_otel::config::OtelTlsConfig as OtelTlsSettings; use codex_otel::otel_provider::OtelProvider; use std::error::Error; /// Build an OpenTelemetry provider from the app Config. /// /// Returns `None` when OTEL export is disabled. pub fn build_provider( config: &Config, service_version: &str, ) -> Result<Option<OtelProvider>, Box<dyn Error>> { let to_otel_exporter = |kind: &Kind| match kind { Kind::None => OtelExporter::None, Kind::OtlpHttp { endpoint, headers, protocol, tls, } => { let protocol = match protocol { Protocol::Json => OtelHttpProtocol::Json, Protocol::Binary => OtelHttpProtocol::Binary, }; OtelExporter::OtlpHttp { endpoint: endpoint.clone(), headers: headers .iter() .map(|(k, v)| (k.clone(), v.clone())) .collect(), protocol, tls: tls.as_ref().map(|config| OtelTlsSettings { ca_certificate: config.ca_certificate.clone(), client_certificate: config.client_certificate.clone(), client_private_key: config.client_private_key.clone(), }), } } Kind::OtlpGrpc { endpoint, headers, tls, } => OtelExporter::OtlpGrpc { endpoint: endpoint.clone(), headers: headers .iter() .map(|(k, v)| (k.clone(), v.clone())) .collect(), tls: tls.as_ref().map(|config| OtelTlsSettings { ca_certificate: config.ca_certificate.clone(), client_certificate: config.client_certificate.clone(), client_private_key: config.client_private_key.clone(), }), }, }; let exporter = to_otel_exporter(&config.otel.exporter); let trace_exporter = to_otel_exporter(&config.otel.trace_exporter); OtelProvider::from(&OtelSettings { service_name: originator().value.to_owned(), service_version: service_version.to_string(), codex_home: config.codex_home.clone(), environment: config.otel.environment.to_string(), exporter, trace_exporter, }) } /// Filter predicate for exporting only Codex-owned events via OTEL. /// Keeps events that originated from codex_otel module pub fn codex_export_filter(meta: &tracing::Metadata<'_>) -> bool { meta.target().starts_with("codex_otel") }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/exec.rs
codex-rs/core/src/exec.rs
#[cfg(unix)] use std::os::unix::process::ExitStatusExt; use std::collections::HashMap; use std::io; use std::path::Path; use std::path::PathBuf; use std::process::ExitStatus; use std::time::Duration; use std::time::Instant; use async_channel::Sender; use tokio::io::AsyncRead; use tokio::io::AsyncReadExt; use tokio::io::BufReader; use tokio::process::Child; use tokio_util::sync::CancellationToken; use crate::error::CodexErr; use crate::error::Result; use crate::error::SandboxErr; use crate::get_platform_sandbox; use crate::protocol::Event; use crate::protocol::EventMsg; use crate::protocol::ExecCommandOutputDeltaEvent; use crate::protocol::ExecOutputStream; use crate::protocol::SandboxPolicy; use crate::sandboxing::CommandSpec; use crate::sandboxing::ExecEnv; use crate::sandboxing::SandboxManager; use crate::sandboxing::SandboxPermissions; use crate::spawn::StdioPolicy; use crate::spawn::spawn_child_async; use crate::text_encoding::bytes_to_string_smart; pub const DEFAULT_EXEC_COMMAND_TIMEOUT_MS: u64 = 10_000; // Hardcode these since it does not seem worth including the libc crate just // for these. const SIGKILL_CODE: i32 = 9; const TIMEOUT_CODE: i32 = 64; const EXIT_CODE_SIGNAL_BASE: i32 = 128; // conventional shell: 128 + signal const EXEC_TIMEOUT_EXIT_CODE: i32 = 124; // conventional timeout exit code // I/O buffer sizing const READ_CHUNK_SIZE: usize = 8192; // bytes per read const AGGREGATE_BUFFER_INITIAL_CAPACITY: usize = 8 * 1024; // 8 KiB /// Limit the number of ExecCommandOutputDelta events emitted per exec call. /// Aggregation still collects full output; only the live event stream is capped. pub(crate) const MAX_EXEC_OUTPUT_DELTAS_PER_CALL: usize = 10_000; #[derive(Debug)] pub struct ExecParams { pub command: Vec<String>, pub cwd: PathBuf, pub expiration: ExecExpiration, pub env: HashMap<String, String>, pub sandbox_permissions: SandboxPermissions, pub justification: Option<String>, pub arg0: Option<String>, } /// Mechanism to terminate an exec invocation before it finishes naturally. #[derive(Debug)] pub enum ExecExpiration { Timeout(Duration), DefaultTimeout, Cancellation(CancellationToken), } impl From<Option<u64>> for ExecExpiration { fn from(timeout_ms: Option<u64>) -> Self { timeout_ms.map_or(ExecExpiration::DefaultTimeout, |timeout_ms| { ExecExpiration::Timeout(Duration::from_millis(timeout_ms)) }) } } impl From<u64> for ExecExpiration { fn from(timeout_ms: u64) -> Self { ExecExpiration::Timeout(Duration::from_millis(timeout_ms)) } } impl ExecExpiration { async fn wait(self) { match self { ExecExpiration::Timeout(duration) => tokio::time::sleep(duration).await, ExecExpiration::DefaultTimeout => { tokio::time::sleep(Duration::from_millis(DEFAULT_EXEC_COMMAND_TIMEOUT_MS)).await } ExecExpiration::Cancellation(cancel) => { cancel.cancelled().await; } } } /// If ExecExpiration is a timeout, returns the timeout in milliseconds. pub(crate) fn timeout_ms(&self) -> Option<u64> { match self { ExecExpiration::Timeout(duration) => Some(duration.as_millis() as u64), ExecExpiration::DefaultTimeout => Some(DEFAULT_EXEC_COMMAND_TIMEOUT_MS), ExecExpiration::Cancellation(_) => None, } } } #[derive(Clone, Copy, Debug, PartialEq)] pub enum SandboxType { None, /// Only available on macOS. MacosSeatbelt, /// Only available on Linux. LinuxSeccomp, /// Only available on Windows. WindowsRestrictedToken, } #[derive(Clone)] pub struct StdoutStream { pub sub_id: String, pub call_id: String, pub tx_event: Sender<Event>, } pub async fn process_exec_tool_call( params: ExecParams, sandbox_policy: &SandboxPolicy, sandbox_cwd: &Path, codex_linux_sandbox_exe: &Option<PathBuf>, stdout_stream: Option<StdoutStream>, ) -> Result<ExecToolCallOutput> { let sandbox_type = match &sandbox_policy { SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. } => { SandboxType::None } _ => get_platform_sandbox().unwrap_or(SandboxType::None), }; tracing::debug!("Sandbox type: {sandbox_type:?}"); let ExecParams { command, cwd, expiration, env, sandbox_permissions, justification, arg0: _, } = params; let (program, args) = command.split_first().ok_or_else(|| { CodexErr::Io(io::Error::new( io::ErrorKind::InvalidInput, "command args are empty", )) })?; let spec = CommandSpec { program: program.clone(), args: args.to_vec(), cwd, env, expiration, sandbox_permissions, justification, }; let manager = SandboxManager::new(); let exec_env = manager .transform( spec, sandbox_policy, sandbox_type, sandbox_cwd, codex_linux_sandbox_exe.as_ref(), ) .map_err(CodexErr::from)?; // Route through the sandboxing module for a single, unified execution path. crate::sandboxing::execute_env(exec_env, sandbox_policy, stdout_stream).await } pub(crate) async fn execute_exec_env( env: ExecEnv, sandbox_policy: &SandboxPolicy, stdout_stream: Option<StdoutStream>, ) -> Result<ExecToolCallOutput> { let ExecEnv { command, cwd, env, expiration, sandbox, sandbox_permissions, justification, arg0, } = env; let params = ExecParams { command, cwd, expiration, env, sandbox_permissions, justification, arg0, }; let start = Instant::now(); let raw_output_result = exec(params, sandbox, sandbox_policy, stdout_stream).await; let duration = start.elapsed(); finalize_exec_result(raw_output_result, sandbox, duration) } #[cfg(target_os = "windows")] async fn exec_windows_sandbox( params: ExecParams, sandbox_policy: &SandboxPolicy, ) -> Result<RawExecToolCallOutput> { use crate::config::find_codex_home; use crate::safety::is_windows_elevated_sandbox_enabled; use codex_windows_sandbox::run_windows_sandbox_capture; use codex_windows_sandbox::run_windows_sandbox_capture_elevated; let ExecParams { command, cwd, env, expiration, .. } = params; // TODO(iceweasel-oai): run_windows_sandbox_capture should support all // variants of ExecExpiration, not just timeout. let timeout_ms = expiration.timeout_ms(); let policy_str = serde_json::to_string(sandbox_policy).map_err(|err| { CodexErr::Io(io::Error::other(format!( "failed to serialize Windows sandbox policy: {err}" ))) })?; let sandbox_cwd = cwd.clone(); let codex_home = find_codex_home().map_err(|err| { CodexErr::Io(io::Error::other(format!( "windows sandbox: failed to resolve codex_home: {err}" ))) })?; let use_elevated = is_windows_elevated_sandbox_enabled(); let spawn_res = tokio::task::spawn_blocking(move || { if use_elevated { run_windows_sandbox_capture_elevated( policy_str.as_str(), &sandbox_cwd, codex_home.as_ref(), command, &cwd, env, timeout_ms, ) } else { run_windows_sandbox_capture( policy_str.as_str(), &sandbox_cwd, codex_home.as_ref(), command, &cwd, env, timeout_ms, ) } }) .await; let capture = match spawn_res { Ok(Ok(v)) => v, Ok(Err(err)) => { return Err(CodexErr::Io(io::Error::other(format!( "windows sandbox: {err}" )))); } Err(join_err) => { return Err(CodexErr::Io(io::Error::other(format!( "windows sandbox join error: {join_err}" )))); } }; let exit_status = synthetic_exit_status(capture.exit_code); let stdout = StreamOutput { text: capture.stdout, truncated_after_lines: None, }; let stderr = StreamOutput { text: capture.stderr, truncated_after_lines: None, }; // Best-effort aggregate: stdout then stderr let mut aggregated = Vec::with_capacity(stdout.text.len() + stderr.text.len()); append_all(&mut aggregated, &stdout.text); append_all(&mut aggregated, &stderr.text); let aggregated_output = StreamOutput { text: aggregated, truncated_after_lines: None, }; Ok(RawExecToolCallOutput { exit_status, stdout, stderr, aggregated_output, timed_out: capture.timed_out, }) } fn finalize_exec_result( raw_output_result: std::result::Result<RawExecToolCallOutput, CodexErr>, sandbox_type: SandboxType, duration: Duration, ) -> Result<ExecToolCallOutput> { match raw_output_result { Ok(raw_output) => { #[allow(unused_mut)] let mut timed_out = raw_output.timed_out; #[cfg(target_family = "unix")] { if let Some(signal) = raw_output.exit_status.signal() { if signal == TIMEOUT_CODE { timed_out = true; } else { return Err(CodexErr::Sandbox(SandboxErr::Signal(signal))); } } } let mut exit_code = raw_output.exit_status.code().unwrap_or(-1); if timed_out { exit_code = EXEC_TIMEOUT_EXIT_CODE; } let stdout = raw_output.stdout.from_utf8_lossy(); let stderr = raw_output.stderr.from_utf8_lossy(); let aggregated_output = raw_output.aggregated_output.from_utf8_lossy(); let exec_output = ExecToolCallOutput { exit_code, stdout, stderr, aggregated_output, duration, timed_out, }; if timed_out { return Err(CodexErr::Sandbox(SandboxErr::Timeout { output: Box::new(exec_output), })); } if is_likely_sandbox_denied(sandbox_type, &exec_output) { return Err(CodexErr::Sandbox(SandboxErr::Denied { output: Box::new(exec_output), })); } Ok(exec_output) } Err(err) => { tracing::error!("exec error: {err}"); Err(err) } } } pub(crate) mod errors { use super::CodexErr; use crate::sandboxing::SandboxTransformError; impl From<SandboxTransformError> for CodexErr { fn from(err: SandboxTransformError) -> Self { match err { SandboxTransformError::MissingLinuxSandboxExecutable => { CodexErr::LandlockSandboxExecutableNotProvided } #[cfg(not(target_os = "macos"))] SandboxTransformError::SeatbeltUnavailable => CodexErr::UnsupportedOperation( "seatbelt sandbox is only available on macOS".to_string(), ), } } } } /// We don't have a fully deterministic way to tell if our command failed /// because of the sandbox - a command in the user's zshrc file might hit an /// error, but the command itself might fail or succeed for other reasons. /// For now, we conservatively check for well known command failure exit codes and /// also look for common sandbox denial keywords in the command output. pub(crate) fn is_likely_sandbox_denied( sandbox_type: SandboxType, exec_output: &ExecToolCallOutput, ) -> bool { if sandbox_type == SandboxType::None || exec_output.exit_code == 0 { return false; } // Quick rejects: well-known non-sandbox shell exit codes // 2: misuse of shell builtins // 126: permission denied // 127: command not found const SANDBOX_DENIED_KEYWORDS: [&str; 7] = [ "operation not permitted", "permission denied", "read-only file system", "seccomp", "sandbox", "landlock", "failed to write file", ]; let has_sandbox_keyword = [ &exec_output.stderr.text, &exec_output.stdout.text, &exec_output.aggregated_output.text, ] .into_iter() .any(|section| { let lower = section.to_lowercase(); SANDBOX_DENIED_KEYWORDS .iter() .any(|needle| lower.contains(needle)) }); if has_sandbox_keyword { return true; } const QUICK_REJECT_EXIT_CODES: [i32; 3] = [2, 126, 127]; if QUICK_REJECT_EXIT_CODES.contains(&exec_output.exit_code) { return false; } #[cfg(unix)] { const SIGSYS_CODE: i32 = libc::SIGSYS; if sandbox_type == SandboxType::LinuxSeccomp && exec_output.exit_code == EXIT_CODE_SIGNAL_BASE + SIGSYS_CODE { return true; } } false } #[derive(Debug, Clone)] pub struct StreamOutput<T: Clone> { pub text: T, pub truncated_after_lines: Option<u32>, } #[derive(Debug)] struct RawExecToolCallOutput { pub exit_status: ExitStatus, pub stdout: StreamOutput<Vec<u8>>, pub stderr: StreamOutput<Vec<u8>>, pub aggregated_output: StreamOutput<Vec<u8>>, pub timed_out: bool, } impl StreamOutput<String> { pub fn new(text: String) -> Self { Self { text, truncated_after_lines: None, } } } impl StreamOutput<Vec<u8>> { pub fn from_utf8_lossy(&self) -> StreamOutput<String> { StreamOutput { text: bytes_to_string_smart(&self.text), truncated_after_lines: self.truncated_after_lines, } } } #[inline] fn append_all(dst: &mut Vec<u8>, src: &[u8]) { dst.extend_from_slice(src); } #[derive(Clone, Debug)] pub struct ExecToolCallOutput { pub exit_code: i32, pub stdout: StreamOutput<String>, pub stderr: StreamOutput<String>, pub aggregated_output: StreamOutput<String>, pub duration: Duration, pub timed_out: bool, } impl Default for ExecToolCallOutput { fn default() -> Self { Self { exit_code: 0, stdout: StreamOutput::new(String::new()), stderr: StreamOutput::new(String::new()), aggregated_output: StreamOutput::new(String::new()), duration: Duration::ZERO, timed_out: false, } } } #[cfg_attr(not(target_os = "windows"), allow(unused_variables))] async fn exec( params: ExecParams, sandbox: SandboxType, sandbox_policy: &SandboxPolicy, stdout_stream: Option<StdoutStream>, ) -> Result<RawExecToolCallOutput> { #[cfg(target_os = "windows")] if sandbox == SandboxType::WindowsRestrictedToken && !matches!( sandbox_policy, SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. } ) { return exec_windows_sandbox(params, sandbox_policy).await; } let ExecParams { command, cwd, env, arg0, expiration, .. } = params; let (program, args) = command.split_first().ok_or_else(|| { CodexErr::Io(io::Error::new( io::ErrorKind::InvalidInput, "command args are empty", )) })?; let arg0_ref = arg0.as_deref(); let child = spawn_child_async( PathBuf::from(program), args.into(), arg0_ref, cwd, sandbox_policy, StdioPolicy::RedirectForShellTool, env, ) .await?; consume_truncated_output(child, expiration, stdout_stream).await } /// Consumes the output of a child process, truncating it so it is suitable for /// use as the output of a `shell` tool call. Also enforces specified timeout. async fn consume_truncated_output( mut child: Child, expiration: ExecExpiration, stdout_stream: Option<StdoutStream>, ) -> Result<RawExecToolCallOutput> { // Both stdout and stderr were configured with `Stdio::piped()` // above, therefore `take()` should normally return `Some`. If it doesn't // we treat it as an exceptional I/O error let stdout_reader = child.stdout.take().ok_or_else(|| { CodexErr::Io(io::Error::other( "stdout pipe was unexpectedly not available", )) })?; let stderr_reader = child.stderr.take().ok_or_else(|| { CodexErr::Io(io::Error::other( "stderr pipe was unexpectedly not available", )) })?; let (agg_tx, agg_rx) = async_channel::unbounded::<Vec<u8>>(); let stdout_handle = tokio::spawn(read_capped( BufReader::new(stdout_reader), stdout_stream.clone(), false, Some(agg_tx.clone()), )); let stderr_handle = tokio::spawn(read_capped( BufReader::new(stderr_reader), stdout_stream.clone(), true, Some(agg_tx.clone()), )); let (exit_status, timed_out) = tokio::select! { status_result = child.wait() => { let exit_status = status_result?; (exit_status, false) } _ = expiration.wait() => { kill_child_process_group(&mut child)?; child.start_kill()?; (synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + TIMEOUT_CODE), true) } _ = tokio::signal::ctrl_c() => { kill_child_process_group(&mut child)?; child.start_kill()?; (synthetic_exit_status(EXIT_CODE_SIGNAL_BASE + SIGKILL_CODE), false) } }; // Wait for the stdout/stderr collection tasks but guard against them // hanging forever. In the normal case, both pipes are closed once the child // terminates so the tasks exit quickly. However, if the child process // spawned grandchildren that inherited its stdout/stderr file descriptors // those pipes may stay open after we `kill` the direct child on timeout. // That would cause the `read_capped` tasks to block on `read()` // indefinitely, effectively hanging the whole agent. const IO_DRAIN_TIMEOUT_MS: u64 = 2_000; // 2 s should be plenty for local pipes // We need mutable bindings so we can `abort()` them on timeout. use tokio::task::JoinHandle; async fn await_with_timeout( handle: &mut JoinHandle<std::io::Result<StreamOutput<Vec<u8>>>>, timeout: Duration, ) -> std::io::Result<StreamOutput<Vec<u8>>> { match tokio::time::timeout(timeout, &mut *handle).await { Ok(join_res) => match join_res { Ok(io_res) => io_res, Err(join_err) => Err(std::io::Error::other(join_err)), }, Err(_elapsed) => { // Timeout: abort the task to avoid hanging on open pipes. handle.abort(); Ok(StreamOutput { text: Vec::new(), truncated_after_lines: None, }) } } } let mut stdout_handle = stdout_handle; let mut stderr_handle = stderr_handle; let stdout = await_with_timeout( &mut stdout_handle, Duration::from_millis(IO_DRAIN_TIMEOUT_MS), ) .await?; let stderr = await_with_timeout( &mut stderr_handle, Duration::from_millis(IO_DRAIN_TIMEOUT_MS), ) .await?; drop(agg_tx); let mut combined_buf = Vec::with_capacity(AGGREGATE_BUFFER_INITIAL_CAPACITY); while let Ok(chunk) = agg_rx.recv().await { append_all(&mut combined_buf, &chunk); } let aggregated_output = StreamOutput { text: combined_buf, truncated_after_lines: None, }; Ok(RawExecToolCallOutput { exit_status, stdout, stderr, aggregated_output, timed_out, }) } async fn read_capped<R: AsyncRead + Unpin + Send + 'static>( mut reader: R, stream: Option<StdoutStream>, is_stderr: bool, aggregate_tx: Option<Sender<Vec<u8>>>, ) -> io::Result<StreamOutput<Vec<u8>>> { let mut buf = Vec::with_capacity(AGGREGATE_BUFFER_INITIAL_CAPACITY); let mut tmp = [0u8; READ_CHUNK_SIZE]; let mut emitted_deltas: usize = 0; // No caps: append all bytes loop { let n = reader.read(&mut tmp).await?; if n == 0 { break; } if let Some(stream) = &stream && emitted_deltas < MAX_EXEC_OUTPUT_DELTAS_PER_CALL { let chunk = tmp[..n].to_vec(); let msg = EventMsg::ExecCommandOutputDelta(ExecCommandOutputDeltaEvent { call_id: stream.call_id.clone(), stream: if is_stderr { ExecOutputStream::Stderr } else { ExecOutputStream::Stdout }, chunk, }); let event = Event { id: stream.sub_id.clone(), msg, }; #[allow(clippy::let_unit_value)] let _ = stream.tx_event.send(event).await; emitted_deltas += 1; } if let Some(tx) = &aggregate_tx { let _ = tx.send(tmp[..n].to_vec()).await; } append_all(&mut buf, &tmp[..n]); // Continue reading to EOF to avoid back-pressure } Ok(StreamOutput { text: buf, truncated_after_lines: None, }) } #[cfg(unix)] fn synthetic_exit_status(code: i32) -> ExitStatus { use std::os::unix::process::ExitStatusExt; std::process::ExitStatus::from_raw(code) } #[cfg(windows)] fn synthetic_exit_status(code: i32) -> ExitStatus { use std::os::windows::process::ExitStatusExt; // On Windows the raw status is a u32. Use a direct cast to avoid // panicking on negative i32 values produced by prior narrowing casts. std::process::ExitStatus::from_raw(code as u32) } #[cfg(unix)] fn kill_child_process_group(child: &mut Child) -> io::Result<()> { use std::io::ErrorKind; if let Some(pid) = child.id() { let pid = pid as libc::pid_t; let pgid = unsafe { libc::getpgid(pid) }; if pgid == -1 { let err = std::io::Error::last_os_error(); if err.kind() != ErrorKind::NotFound { return Err(err); } return Ok(()); } let result = unsafe { libc::killpg(pgid, libc::SIGKILL) }; if result == -1 { let err = std::io::Error::last_os_error(); if err.kind() != ErrorKind::NotFound { return Err(err); } } } Ok(()) } #[cfg(not(unix))] fn kill_child_process_group(_: &mut Child) -> io::Result<()> { Ok(()) } #[cfg(test)] mod tests { use super::*; use std::time::Duration; fn make_exec_output( exit_code: i32, stdout: &str, stderr: &str, aggregated: &str, ) -> ExecToolCallOutput { ExecToolCallOutput { exit_code, stdout: StreamOutput::new(stdout.to_string()), stderr: StreamOutput::new(stderr.to_string()), aggregated_output: StreamOutput::new(aggregated.to_string()), duration: Duration::from_millis(1), timed_out: false, } } #[test] fn sandbox_detection_requires_keywords() { let output = make_exec_output(1, "", "", ""); assert!(!is_likely_sandbox_denied( SandboxType::LinuxSeccomp, &output )); } #[test] fn sandbox_detection_identifies_keyword_in_stderr() { let output = make_exec_output(1, "", "Operation not permitted", ""); assert!(is_likely_sandbox_denied(SandboxType::LinuxSeccomp, &output)); } #[test] fn sandbox_detection_respects_quick_reject_exit_codes() { let output = make_exec_output(127, "", "command not found", ""); assert!(!is_likely_sandbox_denied( SandboxType::LinuxSeccomp, &output )); } #[test] fn sandbox_detection_ignores_non_sandbox_mode() { let output = make_exec_output(1, "", "Operation not permitted", ""); assert!(!is_likely_sandbox_denied(SandboxType::None, &output)); } #[test] fn sandbox_detection_uses_aggregated_output() { let output = make_exec_output( 101, "", "", "cargo failed: Read-only file system when writing target", ); assert!(is_likely_sandbox_denied( SandboxType::MacosSeatbelt, &output )); } #[cfg(unix)] #[test] fn sandbox_detection_flags_sigsys_exit_code() { let exit_code = EXIT_CODE_SIGNAL_BASE + libc::SIGSYS; let output = make_exec_output(exit_code, "", "", ""); assert!(is_likely_sandbox_denied(SandboxType::LinuxSeccomp, &output)); } #[cfg(unix)] #[tokio::test] async fn kill_child_process_group_kills_grandchildren_on_timeout() -> Result<()> { // On Linux/macOS, /bin/bash is typically present; on FreeBSD/OpenBSD, // prefer /bin/sh to avoid NotFound errors. #[cfg(any(target_os = "freebsd", target_os = "openbsd"))] let command = vec![ "/bin/sh".to_string(), "-c".to_string(), "sleep 60 & echo $!; sleep 60".to_string(), ]; #[cfg(all(unix, not(any(target_os = "freebsd", target_os = "openbsd"))))] let command = vec![ "/bin/bash".to_string(), "-c".to_string(), "sleep 60 & echo $!; sleep 60".to_string(), ]; let env: HashMap<String, String> = std::env::vars().collect(); let params = ExecParams { command, cwd: std::env::current_dir()?, expiration: 500.into(), env, sandbox_permissions: SandboxPermissions::UseDefault, justification: None, arg0: None, }; let output = exec(params, SandboxType::None, &SandboxPolicy::ReadOnly, None).await?; assert!(output.timed_out); let stdout = output.stdout.from_utf8_lossy().text; let pid_line = stdout.lines().next().unwrap_or("").trim(); let pid: i32 = pid_line.parse().map_err(|error| { io::Error::new( io::ErrorKind::InvalidData, format!("Failed to parse pid from stdout '{pid_line}': {error}"), ) })?; let mut killed = false; for _ in 0..20 { // Use kill(pid, 0) to check if the process is alive. if unsafe { libc::kill(pid, 0) } == -1 && let Some(libc::ESRCH) = std::io::Error::last_os_error().raw_os_error() { killed = true; break; } tokio::time::sleep(Duration::from_millis(100)).await; } assert!(killed, "grandchild process with pid {pid} is still alive"); Ok(()) } #[tokio::test] async fn process_exec_tool_call_respects_cancellation_token() -> Result<()> { let command = long_running_command(); let cwd = std::env::current_dir()?; let env: HashMap<String, String> = std::env::vars().collect(); let cancel_token = CancellationToken::new(); let cancel_tx = cancel_token.clone(); let params = ExecParams { command, cwd: cwd.clone(), expiration: ExecExpiration::Cancellation(cancel_token), env, sandbox_permissions: SandboxPermissions::UseDefault, justification: None, arg0: None, }; tokio::spawn(async move { tokio::time::sleep(Duration::from_millis(1_000)).await; cancel_tx.cancel(); }); let result = process_exec_tool_call( params, &SandboxPolicy::DangerFullAccess, cwd.as_path(), &None, None, ) .await; let output = match result { Err(CodexErr::Sandbox(SandboxErr::Timeout { output })) => output, other => panic!("expected timeout error, got {other:?}"), }; assert!(output.timed_out); assert_eq!(output.exit_code, EXEC_TIMEOUT_EXIT_CODE); Ok(()) } #[cfg(unix)] fn long_running_command() -> Vec<String> { vec![ "/bin/sh".to_string(), "-c".to_string(), "sleep 30".to_string(), ] } #[cfg(windows)] fn long_running_command() -> Vec<String> { vec![ "powershell.exe".to_string(), "-NonInteractive".to_string(), "-NoLogo".to_string(), "-Command".to_string(), "Start-Sleep -Seconds 30".to_string(), ] } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/model_provider_info.rs
codex-rs/core/src/model_provider_info.rs
//! Registry of model providers supported by Codex. //! //! Providers can be defined in two places: //! 1. Built-in defaults compiled into the binary so Codex works out-of-the-box. //! 2. User-defined entries inside `~/.codex/config.toml` under the `model_providers` //! key. These override or extend the defaults at runtime. use codex_api::Provider as ApiProvider; use codex_api::WireApi as ApiWireApi; use codex_api::provider::RetryConfig as ApiRetryConfig; use codex_app_server_protocol::AuthMode; use http::HeaderMap; use http::header::HeaderName; use http::header::HeaderValue; use serde::Deserialize; use serde::Serialize; use std::collections::HashMap; use std::env::VarError; use std::time::Duration; use crate::error::EnvVarError; const DEFAULT_STREAM_IDLE_TIMEOUT_MS: u64 = 300_000; const DEFAULT_STREAM_MAX_RETRIES: u64 = 5; const DEFAULT_REQUEST_MAX_RETRIES: u64 = 4; /// Hard cap for user-configured `stream_max_retries`. const MAX_STREAM_MAX_RETRIES: u64 = 100; /// Hard cap for user-configured `request_max_retries`. const MAX_REQUEST_MAX_RETRIES: u64 = 100; pub const CHAT_WIRE_API_DEPRECATION_SUMMARY: &str = r#"Support for the "chat" wire API is deprecated and will soon be removed. Update your model provider definition in config.toml to use wire_api = "responses"."#; const OPENAI_PROVIDER_NAME: &str = "OpenAI"; /// Wire protocol that the provider speaks. Most third-party services only /// implement the classic OpenAI Chat Completions JSON schema, whereas OpenAI /// itself (and a handful of others) additionally expose the more modern /// *Responses* API. The two protocols use different request/response shapes /// and *cannot* be auto-detected at runtime, therefore each provider entry /// must declare which one it expects. #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] pub enum WireApi { /// The Responses API exposed by OpenAI at `/v1/responses`. Responses, /// Regular Chat Completions compatible with `/v1/chat/completions`. #[default] Chat, } /// Serializable representation of a provider definition. #[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] pub struct ModelProviderInfo { /// Friendly display name. pub name: String, /// Base URL for the provider's OpenAI-compatible API. pub base_url: Option<String>, /// Environment variable that stores the user's API key for this provider. pub env_key: Option<String>, /// Optional instructions to help the user get a valid value for the /// variable and set it. pub env_key_instructions: Option<String>, /// Value to use with `Authorization: Bearer <token>` header. Use of this /// config is discouraged in favor of `env_key` for security reasons, but /// this may be necessary when using this programmatically. pub experimental_bearer_token: Option<String>, /// Which wire protocol this provider expects. #[serde(default)] pub wire_api: WireApi, /// Optional query parameters to append to the base URL. pub query_params: Option<HashMap<String, String>>, /// Additional HTTP headers to include in requests to this provider where /// the (key, value) pairs are the header name and value. pub http_headers: Option<HashMap<String, String>>, /// Optional HTTP headers to include in requests to this provider where the /// (key, value) pairs are the header name and _environment variable_ whose /// value should be used. If the environment variable is not set, or the /// value is empty, the header will not be included in the request. pub env_http_headers: Option<HashMap<String, String>>, /// Maximum number of times to retry a failed HTTP request to this provider. pub request_max_retries: Option<u64>, /// Number of times to retry reconnecting a dropped streaming response before failing. pub stream_max_retries: Option<u64>, /// Idle timeout (in milliseconds) to wait for activity on a streaming response before treating /// the connection as lost. pub stream_idle_timeout_ms: Option<u64>, /// Does this provider require an OpenAI API Key or ChatGPT login token? If true, /// user is presented with login screen on first run, and login preference and token/key /// are stored in auth.json. If false (which is the default), login screen is skipped, /// and API key (if needed) comes from the "env_key" environment variable. #[serde(default)] pub requires_openai_auth: bool, } impl ModelProviderInfo { fn build_header_map(&self) -> crate::error::Result<HeaderMap> { let mut headers = HeaderMap::new(); if let Some(extra) = &self.http_headers { for (k, v) in extra { if let (Ok(name), Ok(value)) = (HeaderName::try_from(k), HeaderValue::try_from(v)) { headers.insert(name, value); } } } if let Some(env_headers) = &self.env_http_headers { for (header, env_var) in env_headers { if let Ok(val) = std::env::var(env_var) && !val.trim().is_empty() && let (Ok(name), Ok(value)) = (HeaderName::try_from(header), HeaderValue::try_from(val)) { headers.insert(name, value); } } } Ok(headers) } pub(crate) fn to_api_provider( &self, auth_mode: Option<AuthMode>, ) -> crate::error::Result<ApiProvider> { let default_base_url = if matches!(auth_mode, Some(AuthMode::ChatGPT)) { "https://chatgpt.com/backend-api/codex" } else { "https://api.openai.com/v1" }; let base_url = self .base_url .clone() .unwrap_or_else(|| default_base_url.to_string()); let headers = self.build_header_map()?; let retry = ApiRetryConfig { max_attempts: self.request_max_retries(), base_delay: Duration::from_millis(200), retry_429: false, retry_5xx: true, retry_transport: true, }; Ok(ApiProvider { name: self.name.clone(), base_url, query_params: self.query_params.clone(), wire: match self.wire_api { WireApi::Responses => ApiWireApi::Responses, WireApi::Chat => ApiWireApi::Chat, }, headers, retry, stream_idle_timeout: self.stream_idle_timeout(), }) } /// If `env_key` is Some, returns the API key for this provider if present /// (and non-empty) in the environment. If `env_key` is required but /// cannot be found, returns an error. pub fn api_key(&self) -> crate::error::Result<Option<String>> { match &self.env_key { Some(env_key) => { let env_value = std::env::var(env_key); env_value .and_then(|v| { if v.trim().is_empty() { Err(VarError::NotPresent) } else { Ok(Some(v)) } }) .map_err(|_| { crate::error::CodexErr::EnvVar(EnvVarError { var: env_key.clone(), instructions: self.env_key_instructions.clone(), }) }) } None => Ok(None), } } /// Effective maximum number of request retries for this provider. pub fn request_max_retries(&self) -> u64 { self.request_max_retries .unwrap_or(DEFAULT_REQUEST_MAX_RETRIES) .min(MAX_REQUEST_MAX_RETRIES) } /// Effective maximum number of stream reconnection attempts for this provider. pub fn stream_max_retries(&self) -> u64 { self.stream_max_retries .unwrap_or(DEFAULT_STREAM_MAX_RETRIES) .min(MAX_STREAM_MAX_RETRIES) } /// Effective idle timeout for streaming responses. pub fn stream_idle_timeout(&self) -> Duration { self.stream_idle_timeout_ms .map(Duration::from_millis) .unwrap_or(Duration::from_millis(DEFAULT_STREAM_IDLE_TIMEOUT_MS)) } pub fn create_openai_provider() -> ModelProviderInfo { ModelProviderInfo { name: OPENAI_PROVIDER_NAME.into(), // Allow users to override the default OpenAI endpoint by // exporting `OPENAI_BASE_URL`. This is useful when pointing // Codex at a proxy, mock server, or Azure-style deployment // without requiring a full TOML override for the built-in // OpenAI provider. base_url: std::env::var("OPENAI_BASE_URL") .ok() .filter(|v| !v.trim().is_empty()), env_key: None, env_key_instructions: None, experimental_bearer_token: None, wire_api: WireApi::Responses, query_params: None, http_headers: Some( [("version".to_string(), env!("CARGO_PKG_VERSION").to_string())] .into_iter() .collect(), ), env_http_headers: Some( [ ( "OpenAI-Organization".to_string(), "OPENAI_ORGANIZATION".to_string(), ), ("OpenAI-Project".to_string(), "OPENAI_PROJECT".to_string()), ] .into_iter() .collect(), ), // Use global defaults for retry/timeout unless overridden in config.toml. request_max_retries: None, stream_max_retries: None, stream_idle_timeout_ms: None, requires_openai_auth: true, } } pub fn is_openai(&self) -> bool { self.name == OPENAI_PROVIDER_NAME } } pub const DEFAULT_LMSTUDIO_PORT: u16 = 1234; pub const DEFAULT_OLLAMA_PORT: u16 = 11434; pub const LMSTUDIO_OSS_PROVIDER_ID: &str = "lmstudio"; pub const OLLAMA_OSS_PROVIDER_ID: &str = "ollama"; /// Built-in default provider list. pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> { use ModelProviderInfo as P; // We do not want to be in the business of adjucating which third-party // providers are bundled with Codex CLI, so we only include the OpenAI and // open source ("oss") providers by default. Users are encouraged to add to // `model_providers` in config.toml to add their own providers. [ ("openai", P::create_openai_provider()), ( OLLAMA_OSS_PROVIDER_ID, create_oss_provider(DEFAULT_OLLAMA_PORT, WireApi::Chat), ), ( LMSTUDIO_OSS_PROVIDER_ID, create_oss_provider(DEFAULT_LMSTUDIO_PORT, WireApi::Responses), ), ] .into_iter() .map(|(k, v)| (k.to_string(), v)) .collect() } pub fn create_oss_provider(default_provider_port: u16, wire_api: WireApi) -> ModelProviderInfo { // These CODEX_OSS_ environment variables are experimental: we may // switch to reading values from config.toml instead. let codex_oss_base_url = match std::env::var("CODEX_OSS_BASE_URL") .ok() .filter(|v| !v.trim().is_empty()) { Some(url) => url, None => format!( "http://localhost:{port}/v1", port = std::env::var("CODEX_OSS_PORT") .ok() .filter(|v| !v.trim().is_empty()) .and_then(|v| v.parse::<u16>().ok()) .unwrap_or(default_provider_port) ), }; create_oss_provider_with_base_url(&codex_oss_base_url, wire_api) } pub fn create_oss_provider_with_base_url(base_url: &str, wire_api: WireApi) -> ModelProviderInfo { ModelProviderInfo { name: "gpt-oss".into(), base_url: Some(base_url.into()), env_key: None, env_key_instructions: None, experimental_bearer_token: None, wire_api, query_params: None, http_headers: None, env_http_headers: None, request_max_retries: None, stream_max_retries: None, stream_idle_timeout_ms: None, requires_openai_auth: false, } } #[cfg(test)] mod tests { use super::*; use pretty_assertions::assert_eq; #[test] fn test_deserialize_ollama_model_provider_toml() { let azure_provider_toml = r#" name = "Ollama" base_url = "http://localhost:11434/v1" "#; let expected_provider = ModelProviderInfo { name: "Ollama".into(), base_url: Some("http://localhost:11434/v1".into()), env_key: None, env_key_instructions: None, experimental_bearer_token: None, wire_api: WireApi::Chat, query_params: None, http_headers: None, env_http_headers: None, request_max_retries: None, stream_max_retries: None, stream_idle_timeout_ms: None, requires_openai_auth: false, }; let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap(); assert_eq!(expected_provider, provider); } #[test] fn test_deserialize_azure_model_provider_toml() { let azure_provider_toml = r#" name = "Azure" base_url = "https://xxxxx.openai.azure.com/openai" env_key = "AZURE_OPENAI_API_KEY" query_params = { api-version = "2025-04-01-preview" } "#; let expected_provider = ModelProviderInfo { name: "Azure".into(), base_url: Some("https://xxxxx.openai.azure.com/openai".into()), env_key: Some("AZURE_OPENAI_API_KEY".into()), env_key_instructions: None, experimental_bearer_token: None, wire_api: WireApi::Chat, query_params: Some(maplit::hashmap! { "api-version".to_string() => "2025-04-01-preview".to_string(), }), http_headers: None, env_http_headers: None, request_max_retries: None, stream_max_retries: None, stream_idle_timeout_ms: None, requires_openai_auth: false, }; let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap(); assert_eq!(expected_provider, provider); } #[test] fn test_deserialize_example_model_provider_toml() { let azure_provider_toml = r#" name = "Example" base_url = "https://example.com" env_key = "API_KEY" http_headers = { "X-Example-Header" = "example-value" } env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" } "#; let expected_provider = ModelProviderInfo { name: "Example".into(), base_url: Some("https://example.com".into()), env_key: Some("API_KEY".into()), env_key_instructions: None, experimental_bearer_token: None, wire_api: WireApi::Chat, query_params: None, http_headers: Some(maplit::hashmap! { "X-Example-Header".to_string() => "example-value".to_string(), }), env_http_headers: Some(maplit::hashmap! { "X-Example-Env-Header".to_string() => "EXAMPLE_ENV_VAR".to_string(), }), request_max_retries: None, stream_max_retries: None, stream_idle_timeout_ms: None, requires_openai_auth: false, }; let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap(); assert_eq!(expected_provider, provider); } #[test] fn detects_azure_responses_base_urls() { let positive_cases = [ "https://foo.openai.azure.com/openai", "https://foo.openai.azure.us/openai/deployments/bar", "https://foo.cognitiveservices.azure.cn/openai", "https://foo.aoai.azure.com/openai", "https://foo.openai.azure-api.net/openai", "https://foo.z01.azurefd.net/", ]; for base_url in positive_cases { let provider = ModelProviderInfo { name: "test".into(), base_url: Some(base_url.into()), env_key: None, env_key_instructions: None, experimental_bearer_token: None, wire_api: WireApi::Responses, query_params: None, http_headers: None, env_http_headers: None, request_max_retries: None, stream_max_retries: None, stream_idle_timeout_ms: None, requires_openai_auth: false, }; let api = provider.to_api_provider(None).expect("api provider"); assert!( api.is_azure_responses_endpoint(), "expected {base_url} to be detected as Azure" ); } let named_provider = ModelProviderInfo { name: "Azure".into(), base_url: Some("https://example.com".into()), env_key: None, env_key_instructions: None, experimental_bearer_token: None, wire_api: WireApi::Responses, query_params: None, http_headers: None, env_http_headers: None, request_max_retries: None, stream_max_retries: None, stream_idle_timeout_ms: None, requires_openai_auth: false, }; let named_api = named_provider.to_api_provider(None).expect("api provider"); assert!(named_api.is_azure_responses_endpoint()); let negative_cases = [ "https://api.openai.com/v1", "https://example.com/openai", "https://myproxy.azurewebsites.net/openai", ]; for base_url in negative_cases { let provider = ModelProviderInfo { name: "test".into(), base_url: Some(base_url.into()), env_key: None, env_key_instructions: None, experimental_bearer_token: None, wire_api: WireApi::Responses, query_params: None, http_headers: None, env_http_headers: None, request_max_retries: None, stream_max_retries: None, stream_idle_timeout_ms: None, requires_openai_auth: false, }; let api = provider.to_api_provider(None).expect("api provider"); assert!( !api.is_azure_responses_endpoint(), "expected {base_url} not to be detected as Azure" ); } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tasks/ghost_snapshot.rs
codex-rs/core/src/tasks/ghost_snapshot.rs
use crate::codex::TurnContext; use crate::protocol::EventMsg; use crate::protocol::WarningEvent; use crate::state::TaskKind; use crate::tasks::SessionTask; use crate::tasks::SessionTaskContext; use async_trait::async_trait; use codex_git::CreateGhostCommitOptions; use codex_git::GhostSnapshotReport; use codex_git::GitToolingError; use codex_git::create_ghost_commit_with_report; use codex_protocol::models::ResponseItem; use codex_protocol::user_input::UserInput; use codex_utils_readiness::Readiness; use codex_utils_readiness::Token; use std::sync::Arc; use std::time::Duration; use tokio::sync::oneshot; use tokio_util::sync::CancellationToken; use tracing::info; use tracing::warn; pub(crate) struct GhostSnapshotTask { token: Token, } const SNAPSHOT_WARNING_THRESHOLD: Duration = Duration::from_secs(240); #[async_trait] impl SessionTask for GhostSnapshotTask { fn kind(&self) -> TaskKind { TaskKind::Regular } async fn run( self: Arc<Self>, session: Arc<SessionTaskContext>, ctx: Arc<TurnContext>, _input: Vec<UserInput>, cancellation_token: CancellationToken, ) -> Option<String> { tokio::task::spawn(async move { let token = self.token; let warnings_enabled = !ctx.ghost_snapshot.disable_warnings; // Channel used to signal when the snapshot work has finished so the // timeout warning task can exit early without sending a warning. let (snapshot_done_tx, snapshot_done_rx) = oneshot::channel::<()>(); if warnings_enabled { let ctx_for_warning = ctx.clone(); let cancellation_token_for_warning = cancellation_token.clone(); let session_for_warning = session.clone(); // Fire a generic warning if the snapshot is still running after // three minutes; this helps users discover large untracked files // that might need to be added to .gitignore. tokio::task::spawn(async move { tokio::select! { _ = tokio::time::sleep(SNAPSHOT_WARNING_THRESHOLD) => { session_for_warning.session .send_event( &ctx_for_warning, EventMsg::Warning(WarningEvent { message: "Repository snapshot is taking longer than expected. Large untracked or ignored files can slow snapshots; consider adding large files or directories to .gitignore or disabling `undo` in your config.".to_string() }), ) .await; } _ = snapshot_done_rx => {} _ = cancellation_token_for_warning.cancelled() => {} } }); } else { drop(snapshot_done_rx); } let ctx_for_task = ctx.clone(); let cancelled = tokio::select! { _ = cancellation_token.cancelled() => true, _ = async { let repo_path = ctx_for_task.cwd.clone(); let ghost_snapshot = ctx_for_task.ghost_snapshot.clone(); let ghost_snapshot_for_commit = ghost_snapshot.clone(); // Required to run in a dedicated blocking pool. match tokio::task::spawn_blocking(move || { let options = CreateGhostCommitOptions::new(&repo_path).ghost_snapshot(ghost_snapshot_for_commit); create_ghost_commit_with_report(&options) }) .await { Ok(Ok((ghost_commit, report))) => { info!("ghost snapshot blocking task finished"); if warnings_enabled { for message in format_snapshot_warnings( ghost_snapshot.ignore_large_untracked_files, ghost_snapshot.ignore_large_untracked_dirs, &report, ) { session .session .send_event( &ctx_for_task, EventMsg::Warning(WarningEvent { message }), ) .await; } } session .session .record_conversation_items(&ctx, &[ResponseItem::GhostSnapshot { ghost_commit: ghost_commit.clone(), }]) .await; info!("ghost commit captured: {}", ghost_commit.id()); } Ok(Err(err)) => match err { GitToolingError::NotAGitRepository { .. } => info!( sub_id = ctx_for_task.sub_id.as_str(), "skipping ghost snapshot because current directory is not a Git repository" ), _ => { warn!( sub_id = ctx_for_task.sub_id.as_str(), "failed to capture ghost snapshot: {err}" ); } }, Err(err) => { warn!( sub_id = ctx_for_task.sub_id.as_str(), "ghost snapshot task panicked: {err}" ); let message = format!("Snapshots disabled after ghost snapshot panic: {err}."); session .session .notify_background_event(&ctx_for_task, message) .await; } } } => false, }; let _ = snapshot_done_tx.send(()); if cancelled { info!("ghost snapshot task cancelled"); } match ctx.tool_call_gate.mark_ready(token).await { Ok(true) => info!("ghost snapshot gate marked ready"), Ok(false) => warn!("ghost snapshot gate already ready"), Err(err) => warn!("failed to mark ghost snapshot ready: {err}"), } }); None } } impl GhostSnapshotTask { pub(crate) fn new(token: Token) -> Self { Self { token } } } fn format_snapshot_warnings( ignore_large_untracked_files: Option<i64>, ignore_large_untracked_dirs: Option<i64>, report: &GhostSnapshotReport, ) -> Vec<String> { let mut warnings = Vec::new(); if let Some(message) = format_large_untracked_warning(ignore_large_untracked_dirs, report) { warnings.push(message); } if let Some(message) = format_ignored_untracked_files_warning(ignore_large_untracked_files, report) { warnings.push(message); } warnings } fn format_large_untracked_warning( ignore_large_untracked_dirs: Option<i64>, report: &GhostSnapshotReport, ) -> Option<String> { if report.large_untracked_dirs.is_empty() { return None; } let threshold = ignore_large_untracked_dirs?; const MAX_DIRS: usize = 3; let mut parts: Vec<String> = Vec::new(); for dir in report.large_untracked_dirs.iter().take(MAX_DIRS) { parts.push(format!("{} ({} files)", dir.path.display(), dir.file_count)); } if report.large_untracked_dirs.len() > MAX_DIRS { let remaining = report.large_untracked_dirs.len() - MAX_DIRS; parts.push(format!("{remaining} more")); } Some(format!( "Repository snapshot ignored large untracked directories (>= {threshold} files): {}. These directories are excluded from snapshots and undo cleanup. Adjust `ghost_snapshot.ignore_large_untracked_dirs` to change this behavior.", parts.join(", ") )) } fn format_ignored_untracked_files_warning( ignore_large_untracked_files: Option<i64>, report: &GhostSnapshotReport, ) -> Option<String> { let threshold = ignore_large_untracked_files?; if report.ignored_untracked_files.is_empty() { return None; } const MAX_FILES: usize = 3; let mut parts: Vec<String> = Vec::new(); for file in report.ignored_untracked_files.iter().take(MAX_FILES) { parts.push(format!( "{} ({})", file.path.display(), format_bytes(file.byte_size) )); } if report.ignored_untracked_files.len() > MAX_FILES { let remaining = report.ignored_untracked_files.len() - MAX_FILES; parts.push(format!("{remaining} more")); } Some(format!( "Repository snapshot ignored untracked files larger than {}: {}. These files are preserved during undo cleanup, but their contents are not captured in the snapshot. Adjust `ghost_snapshot.ignore_large_untracked_files` to change this behavior. To avoid this message in the future, update your `.gitignore`.", format_bytes(threshold), parts.join(", ") )) } fn format_bytes(bytes: i64) -> String { const KIB: i64 = 1024; const MIB: i64 = 1024 * 1024; if bytes >= MIB { return format!("{} MiB", bytes / MIB); } if bytes >= KIB { return format!("{} KiB", bytes / KIB); } format!("{bytes} B") } #[cfg(test)] mod tests { use super::*; use codex_git::LargeUntrackedDir; use pretty_assertions::assert_eq; use std::path::PathBuf; #[test] fn large_untracked_warning_includes_threshold() { let report = GhostSnapshotReport { large_untracked_dirs: vec![LargeUntrackedDir { path: PathBuf::from("models"), file_count: 250, }], ignored_untracked_files: Vec::new(), }; let message = format_large_untracked_warning(Some(200), &report).unwrap(); assert!(message.contains(">= 200 files")); } #[test] fn large_untracked_warning_disabled_when_threshold_disabled() { let report = GhostSnapshotReport { large_untracked_dirs: vec![LargeUntrackedDir { path: PathBuf::from("models"), file_count: 250, }], ignored_untracked_files: Vec::new(), }; assert_eq!(format_large_untracked_warning(None, &report), None); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tasks/compact.rs
codex-rs/core/src/tasks/compact.rs
use std::sync::Arc; use super::SessionTask; use super::SessionTaskContext; use crate::codex::TurnContext; use crate::state::TaskKind; use async_trait::async_trait; use codex_protocol::user_input::UserInput; use tokio_util::sync::CancellationToken; #[derive(Clone, Copy, Default)] pub(crate) struct CompactTask; #[async_trait] impl SessionTask for CompactTask { fn kind(&self) -> TaskKind { TaskKind::Compact } async fn run( self: Arc<Self>, session: Arc<SessionTaskContext>, ctx: Arc<TurnContext>, input: Vec<UserInput>, _cancellation_token: CancellationToken, ) -> Option<String> { let session = session.clone_session(); if crate::compact::should_use_remote_compact_task( session.as_ref(), &ctx.client.get_provider(), ) { crate::compact_remote::run_remote_compact_task(session, ctx).await } else { crate::compact::run_compact_task(session, ctx, input).await } None } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tasks/regular.rs
codex-rs/core/src/tasks/regular.rs
use std::sync::Arc; use crate::codex::TurnContext; use crate::codex::run_task; use crate::state::TaskKind; use async_trait::async_trait; use codex_protocol::user_input::UserInput; use tokio_util::sync::CancellationToken; use tracing::Instrument; use tracing::trace_span; use super::SessionTask; use super::SessionTaskContext; #[derive(Clone, Copy, Default)] pub(crate) struct RegularTask; #[async_trait] impl SessionTask for RegularTask { fn kind(&self) -> TaskKind { TaskKind::Regular } async fn run( self: Arc<Self>, session: Arc<SessionTaskContext>, ctx: Arc<TurnContext>, input: Vec<UserInput>, cancellation_token: CancellationToken, ) -> Option<String> { let sess = session.clone_session(); let run_task_span = trace_span!(parent: sess.services.otel_manager.current_span(), "run_task"); run_task(sess, ctx, input, cancellation_token) .instrument(run_task_span) .await } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tasks/undo.rs
codex-rs/core/src/tasks/undo.rs
use std::sync::Arc; use crate::codex::TurnContext; use crate::protocol::EventMsg; use crate::protocol::UndoCompletedEvent; use crate::protocol::UndoStartedEvent; use crate::state::TaskKind; use crate::tasks::SessionTask; use crate::tasks::SessionTaskContext; use async_trait::async_trait; use codex_git::RestoreGhostCommitOptions; use codex_git::restore_ghost_commit_with_options; use codex_protocol::models::ResponseItem; use codex_protocol::user_input::UserInput; use tokio_util::sync::CancellationToken; use tracing::error; use tracing::info; use tracing::warn; pub(crate) struct UndoTask; impl UndoTask { pub(crate) fn new() -> Self { Self } } #[async_trait] impl SessionTask for UndoTask { fn kind(&self) -> TaskKind { TaskKind::Regular } async fn run( self: Arc<Self>, session: Arc<SessionTaskContext>, ctx: Arc<TurnContext>, _input: Vec<UserInput>, cancellation_token: CancellationToken, ) -> Option<String> { let sess = session.clone_session(); sess.send_event( ctx.as_ref(), EventMsg::UndoStarted(UndoStartedEvent { message: Some("Undo in progress...".to_string()), }), ) .await; if cancellation_token.is_cancelled() { sess.send_event( ctx.as_ref(), EventMsg::UndoCompleted(UndoCompletedEvent { success: false, message: Some("Undo cancelled.".to_string()), }), ) .await; return None; } let mut history = sess.clone_history().await; let mut items = history.get_history(); let mut completed = UndoCompletedEvent { success: false, message: None, }; let Some((idx, ghost_commit)) = items .iter() .enumerate() .rev() .find_map(|(idx, item)| match item { ResponseItem::GhostSnapshot { ghost_commit } => { Some((idx, ghost_commit.clone())) } _ => None, }) else { completed.message = Some("No ghost snapshot available to undo.".to_string()); sess.send_event(ctx.as_ref(), EventMsg::UndoCompleted(completed)) .await; return None; }; let commit_id = ghost_commit.id().to_string(); let repo_path = ctx.cwd.clone(); let ghost_snapshot = ctx.ghost_snapshot.clone(); let restore_result = tokio::task::spawn_blocking(move || { let options = RestoreGhostCommitOptions::new(&repo_path).ghost_snapshot(ghost_snapshot); restore_ghost_commit_with_options(&options, &ghost_commit) }) .await; match restore_result { Ok(Ok(())) => { items.remove(idx); sess.replace_history(items).await; let short_id: String = commit_id.chars().take(7).collect(); info!(commit_id = commit_id, "Undo restored ghost snapshot"); completed.success = true; completed.message = Some(format!("Undo restored snapshot {short_id}.")); } Ok(Err(err)) => { let message = format!("Failed to restore snapshot {commit_id}: {err}"); warn!("{message}"); completed.message = Some(message); } Err(err) => { let message = format!("Failed to restore snapshot {commit_id}: {err}"); error!("{message}"); completed.message = Some(message); } } sess.send_event(ctx.as_ref(), EventMsg::UndoCompleted(completed)) .await; None } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tasks/mod.rs
codex-rs/core/src/tasks/mod.rs
mod compact; mod ghost_snapshot; mod regular; mod review; mod undo; mod user_shell; use std::sync::Arc; use std::time::Duration; use async_trait::async_trait; use tokio::select; use tokio::sync::Notify; use tokio_util::sync::CancellationToken; use tokio_util::task::AbortOnDropHandle; use tracing::trace; use tracing::warn; use crate::AuthManager; use crate::codex::Session; use crate::codex::TurnContext; use crate::models_manager::manager::ModelsManager; use crate::protocol::EventMsg; use crate::protocol::TaskCompleteEvent; use crate::protocol::TurnAbortReason; use crate::protocol::TurnAbortedEvent; use crate::state::ActiveTurn; use crate::state::RunningTask; use crate::state::TaskKind; use codex_protocol::user_input::UserInput; pub(crate) use compact::CompactTask; pub(crate) use ghost_snapshot::GhostSnapshotTask; pub(crate) use regular::RegularTask; pub(crate) use review::ReviewTask; pub(crate) use undo::UndoTask; pub(crate) use user_shell::UserShellCommandTask; const GRACEFULL_INTERRUPTION_TIMEOUT_MS: u64 = 100; /// Thin wrapper that exposes the parts of [`Session`] task runners need. #[derive(Clone)] pub(crate) struct SessionTaskContext { session: Arc<Session>, } impl SessionTaskContext { pub(crate) fn new(session: Arc<Session>) -> Self { Self { session } } pub(crate) fn clone_session(&self) -> Arc<Session> { Arc::clone(&self.session) } pub(crate) fn auth_manager(&self) -> Arc<AuthManager> { Arc::clone(&self.session.services.auth_manager) } pub(crate) fn models_manager(&self) -> Arc<ModelsManager> { Arc::clone(&self.session.services.models_manager) } } /// Async task that drives a [`Session`] turn. /// /// Implementations encapsulate a specific Codex workflow (regular chat, /// reviews, ghost snapshots, etc.). Each task instance is owned by a /// [`Session`] and executed on a background Tokio task. The trait is /// intentionally small: implementers identify themselves via /// [`SessionTask::kind`], perform their work in [`SessionTask::run`], and may /// release resources in [`SessionTask::abort`]. #[async_trait] pub(crate) trait SessionTask: Send + Sync + 'static { /// Describes the type of work the task performs so the session can /// surface it in telemetry and UI. fn kind(&self) -> TaskKind; /// Executes the task until completion or cancellation. /// /// Implementations typically stream protocol events using `session` and /// `ctx`, returning an optional final agent message when finished. The /// provided `cancellation_token` is cancelled when the session requests an /// abort; implementers should watch for it and terminate quickly once it /// fires. Returning [`Some`] yields a final message that /// [`Session::on_task_finished`] will emit to the client. async fn run( self: Arc<Self>, session: Arc<SessionTaskContext>, ctx: Arc<TurnContext>, input: Vec<UserInput>, cancellation_token: CancellationToken, ) -> Option<String>; /// Gives the task a chance to perform cleanup after an abort. /// /// The default implementation is a no-op; override this if additional /// teardown or notifications are required once /// [`Session::abort_all_tasks`] cancels the task. async fn abort(&self, session: Arc<SessionTaskContext>, ctx: Arc<TurnContext>) { let _ = (session, ctx); } } impl Session { pub async fn spawn_task<T: SessionTask>( self: &Arc<Self>, turn_context: Arc<TurnContext>, input: Vec<UserInput>, task: T, ) { self.abort_all_tasks(TurnAbortReason::Replaced).await; let task: Arc<dyn SessionTask> = Arc::new(task); let task_kind = task.kind(); let cancellation_token = CancellationToken::new(); let done = Arc::new(Notify::new()); let done_clone = Arc::clone(&done); let handle = { let session_ctx = Arc::new(SessionTaskContext::new(Arc::clone(self))); let ctx = Arc::clone(&turn_context); let task_for_run = Arc::clone(&task); let task_cancellation_token = cancellation_token.child_token(); tokio::spawn(async move { let ctx_for_finish = Arc::clone(&ctx); let last_agent_message = task_for_run .run( Arc::clone(&session_ctx), ctx, input, task_cancellation_token.child_token(), ) .await; session_ctx.clone_session().flush_rollout().await; if !task_cancellation_token.is_cancelled() { // Emit completion uniformly from spawn site so all tasks share the same lifecycle. let sess = session_ctx.clone_session(); sess.on_task_finished(ctx_for_finish, last_agent_message) .await; } done_clone.notify_waiters(); }) }; let running_task = RunningTask { done, handle: Arc::new(AbortOnDropHandle::new(handle)), kind: task_kind, task, cancellation_token, turn_context: Arc::clone(&turn_context), }; self.register_new_active_task(running_task).await; } pub async fn abort_all_tasks(self: &Arc<Self>, reason: TurnAbortReason) { for task in self.take_all_running_tasks().await { self.handle_task_abort(task, reason.clone()).await; } self.close_unified_exec_sessions().await; } pub async fn on_task_finished( self: &Arc<Self>, turn_context: Arc<TurnContext>, last_agent_message: Option<String>, ) { let mut active = self.active_turn.lock().await; let should_close_sessions = if let Some(at) = active.as_mut() && at.remove_task(&turn_context.sub_id) { *active = None; true } else { false }; drop(active); if should_close_sessions { self.close_unified_exec_sessions().await; } let event = EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }); self.send_event(turn_context.as_ref(), event).await; } async fn register_new_active_task(&self, task: RunningTask) { let mut active = self.active_turn.lock().await; let mut turn = ActiveTurn::default(); turn.add_task(task); *active = Some(turn); } async fn take_all_running_tasks(&self) -> Vec<RunningTask> { let mut active = self.active_turn.lock().await; match active.take() { Some(mut at) => { at.clear_pending().await; at.drain_tasks() } None => Vec::new(), } } async fn close_unified_exec_sessions(&self) { self.services .unified_exec_manager .terminate_all_sessions() .await; } async fn handle_task_abort(self: &Arc<Self>, task: RunningTask, reason: TurnAbortReason) { let sub_id = task.turn_context.sub_id.clone(); if task.cancellation_token.is_cancelled() { return; } trace!(task_kind = ?task.kind, sub_id, "aborting running task"); task.cancellation_token.cancel(); let session_task = task.task; select! { _ = task.done.notified() => { }, _ = tokio::time::sleep(Duration::from_millis(GRACEFULL_INTERRUPTION_TIMEOUT_MS)) => { warn!("task {sub_id} didn't complete gracefully after {}ms", GRACEFULL_INTERRUPTION_TIMEOUT_MS); } } task.handle.abort(); let session_ctx = Arc::new(SessionTaskContext::new(Arc::clone(self))); session_task .abort(session_ctx, Arc::clone(&task.turn_context)) .await; let event = EventMsg::TurnAborted(TurnAbortedEvent { reason }); self.send_event(task.turn_context.as_ref(), event).await; } } #[cfg(test)] mod tests {}
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tasks/user_shell.rs
codex-rs/core/src/tasks/user_shell.rs
use std::sync::Arc; use std::time::Duration; use async_trait::async_trait; use codex_async_utils::CancelErr; use codex_async_utils::OrCancelExt; use codex_protocol::user_input::UserInput; use tokio_util::sync::CancellationToken; use tracing::error; use uuid::Uuid; use crate::codex::TurnContext; use crate::exec::ExecToolCallOutput; use crate::exec::SandboxType; use crate::exec::StdoutStream; use crate::exec::StreamOutput; use crate::exec::execute_exec_env; use crate::exec_env::create_env; use crate::parse_command::parse_command; use crate::protocol::EventMsg; use crate::protocol::ExecCommandBeginEvent; use crate::protocol::ExecCommandEndEvent; use crate::protocol::ExecCommandSource; use crate::protocol::SandboxPolicy; use crate::protocol::TaskStartedEvent; use crate::sandboxing::ExecEnv; use crate::sandboxing::SandboxPermissions; use crate::state::TaskKind; use crate::tools::format_exec_output_str; use crate::user_shell_command::user_shell_command_record_item; use super::SessionTask; use super::SessionTaskContext; const USER_SHELL_TIMEOUT_MS: u64 = 60 * 60 * 1000; // 1 hour #[derive(Clone)] pub(crate) struct UserShellCommandTask { command: String, } impl UserShellCommandTask { pub(crate) fn new(command: String) -> Self { Self { command } } } #[async_trait] impl SessionTask for UserShellCommandTask { fn kind(&self) -> TaskKind { TaskKind::Regular } async fn run( self: Arc<Self>, session: Arc<SessionTaskContext>, turn_context: Arc<TurnContext>, _input: Vec<UserInput>, cancellation_token: CancellationToken, ) -> Option<String> { let event = EventMsg::TaskStarted(TaskStartedEvent { model_context_window: turn_context.client.get_model_context_window(), }); let session = session.clone_session(); session.send_event(turn_context.as_ref(), event).await; // Execute the user's script under their default shell when known; this // allows commands that use shell features (pipes, &&, redirects, etc.). // We do not source rc files or otherwise reformat the script. let use_login_shell = true; let command = session .user_shell() .derive_exec_args(&self.command, use_login_shell); let call_id = Uuid::new_v4().to_string(); let raw_command = self.command.clone(); let cwd = turn_context.cwd.clone(); let parsed_cmd = parse_command(&command); session .send_event( turn_context.as_ref(), EventMsg::ExecCommandBegin(ExecCommandBeginEvent { call_id: call_id.clone(), process_id: None, turn_id: turn_context.sub_id.clone(), command: command.clone(), cwd: cwd.clone(), parsed_cmd: parsed_cmd.clone(), source: ExecCommandSource::UserShell, interaction_input: None, }), ) .await; let exec_env = ExecEnv { command: command.clone(), cwd: cwd.clone(), env: create_env(&turn_context.shell_environment_policy), // TODO(zhao-oai): Now that we have ExecExpiration::Cancellation, we // should use that instead of an "arbitrarily large" timeout here. expiration: USER_SHELL_TIMEOUT_MS.into(), sandbox: SandboxType::None, sandbox_permissions: SandboxPermissions::UseDefault, justification: None, arg0: None, }; let stdout_stream = Some(StdoutStream { sub_id: turn_context.sub_id.clone(), call_id: call_id.clone(), tx_event: session.get_tx_event(), }); let sandbox_policy = SandboxPolicy::DangerFullAccess; let exec_result = execute_exec_env(exec_env, &sandbox_policy, stdout_stream) .or_cancel(&cancellation_token) .await; match exec_result { Err(CancelErr::Cancelled) => { let aborted_message = "command aborted by user".to_string(); let exec_output = ExecToolCallOutput { exit_code: -1, stdout: StreamOutput::new(String::new()), stderr: StreamOutput::new(aborted_message.clone()), aggregated_output: StreamOutput::new(aborted_message.clone()), duration: Duration::ZERO, timed_out: false, }; let output_items = [user_shell_command_record_item( &raw_command, &exec_output, &turn_context, )]; session .record_conversation_items(turn_context.as_ref(), &output_items) .await; session .send_event( turn_context.as_ref(), EventMsg::ExecCommandEnd(ExecCommandEndEvent { call_id, process_id: None, turn_id: turn_context.sub_id.clone(), command: command.clone(), cwd: cwd.clone(), parsed_cmd: parsed_cmd.clone(), source: ExecCommandSource::UserShell, interaction_input: None, stdout: String::new(), stderr: aborted_message.clone(), aggregated_output: aborted_message.clone(), exit_code: -1, duration: Duration::ZERO, formatted_output: aborted_message, }), ) .await; } Ok(Ok(output)) => { session .send_event( turn_context.as_ref(), EventMsg::ExecCommandEnd(ExecCommandEndEvent { call_id: call_id.clone(), process_id: None, turn_id: turn_context.sub_id.clone(), command: command.clone(), cwd: cwd.clone(), parsed_cmd: parsed_cmd.clone(), source: ExecCommandSource::UserShell, interaction_input: None, stdout: output.stdout.text.clone(), stderr: output.stderr.text.clone(), aggregated_output: output.aggregated_output.text.clone(), exit_code: output.exit_code, duration: output.duration, formatted_output: format_exec_output_str( &output, turn_context.truncation_policy, ), }), ) .await; let output_items = [user_shell_command_record_item( &raw_command, &output, &turn_context, )]; session .record_conversation_items(turn_context.as_ref(), &output_items) .await; } Ok(Err(err)) => { error!("user shell command failed: {err:?}"); let message = format!("execution error: {err:?}"); let exec_output = ExecToolCallOutput { exit_code: -1, stdout: StreamOutput::new(String::new()), stderr: StreamOutput::new(message.clone()), aggregated_output: StreamOutput::new(message.clone()), duration: Duration::ZERO, timed_out: false, }; session .send_event( turn_context.as_ref(), EventMsg::ExecCommandEnd(ExecCommandEndEvent { call_id, process_id: None, turn_id: turn_context.sub_id.clone(), command, cwd, parsed_cmd, source: ExecCommandSource::UserShell, interaction_input: None, stdout: exec_output.stdout.text.clone(), stderr: exec_output.stderr.text.clone(), aggregated_output: exec_output.aggregated_output.text.clone(), exit_code: exec_output.exit_code, duration: exec_output.duration, formatted_output: format_exec_output_str( &exec_output, turn_context.truncation_policy, ), }), ) .await; let output_items = [user_shell_command_record_item( &raw_command, &exec_output, &turn_context, )]; session .record_conversation_items(turn_context.as_ref(), &output_items) .await; } } None } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tasks/review.rs
codex-rs/core/src/tasks/review.rs
use std::sync::Arc; use async_trait::async_trait; use codex_protocol::items::TurnItem; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseItem; use codex_protocol::protocol::AgentMessageContentDeltaEvent; use codex_protocol::protocol::AgentMessageDeltaEvent; use codex_protocol::protocol::Event; use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::ExitedReviewModeEvent; use codex_protocol::protocol::ItemCompletedEvent; use codex_protocol::protocol::ReviewOutputEvent; use tokio_util::sync::CancellationToken; use crate::codex::Session; use crate::codex::TurnContext; use crate::codex_delegate::run_codex_conversation_one_shot; use crate::review_format::format_review_findings_block; use crate::review_format::render_review_output_text; use crate::state::TaskKind; use codex_protocol::user_input::UserInput; use super::SessionTask; use super::SessionTaskContext; #[derive(Clone, Copy)] pub(crate) struct ReviewTask; impl ReviewTask { pub(crate) fn new() -> Self { Self } } #[async_trait] impl SessionTask for ReviewTask { fn kind(&self) -> TaskKind { TaskKind::Review } async fn run( self: Arc<Self>, session: Arc<SessionTaskContext>, ctx: Arc<TurnContext>, input: Vec<UserInput>, cancellation_token: CancellationToken, ) -> Option<String> { // Start sub-codex conversation and get the receiver for events. let output = match start_review_conversation( session.clone(), ctx.clone(), input, cancellation_token.clone(), ) .await { Some(receiver) => process_review_events(session.clone(), ctx.clone(), receiver).await, None => None, }; if !cancellation_token.is_cancelled() { exit_review_mode(session.clone_session(), output.clone(), ctx.clone()).await; } None } async fn abort(&self, session: Arc<SessionTaskContext>, ctx: Arc<TurnContext>) { exit_review_mode(session.clone_session(), None, ctx).await; } } async fn start_review_conversation( session: Arc<SessionTaskContext>, ctx: Arc<TurnContext>, input: Vec<UserInput>, cancellation_token: CancellationToken, ) -> Option<async_channel::Receiver<Event>> { let config = ctx.client.config(); let mut sub_agent_config = config.as_ref().clone(); // Run with only reviewer rubric — drop outer user_instructions sub_agent_config.user_instructions = None; // Avoid loading project docs; reviewer only needs findings sub_agent_config.project_doc_max_bytes = 0; // Carry over review-only feature restrictions so the delegate cannot // re-enable blocked tools (web search, view image). sub_agent_config .features .disable(crate::features::Feature::WebSearchRequest) .disable(crate::features::Feature::ViewImageTool); // Set explicit review rubric for the sub-agent sub_agent_config.base_instructions = Some(crate::REVIEW_PROMPT.to_string()); sub_agent_config.model = Some(config.review_model.clone()); (run_codex_conversation_one_shot( sub_agent_config, session.auth_manager(), session.models_manager(), input, session.clone_session(), ctx.clone(), cancellation_token, None, ) .await) .ok() .map(|io| io.rx_event) } async fn process_review_events( session: Arc<SessionTaskContext>, ctx: Arc<TurnContext>, receiver: async_channel::Receiver<Event>, ) -> Option<ReviewOutputEvent> { let mut prev_agent_message: Option<Event> = None; while let Ok(event) = receiver.recv().await { match event.clone().msg { EventMsg::AgentMessage(_) => { if let Some(prev) = prev_agent_message.take() { session .clone_session() .send_event(ctx.as_ref(), prev.msg) .await; } prev_agent_message = Some(event); } // Suppress ItemCompleted only for assistant messages: forwarding it // would trigger legacy AgentMessage via as_legacy_events(), which this // review flow intentionally hides in favor of structured output. EventMsg::ItemCompleted(ItemCompletedEvent { item: TurnItem::AgentMessage(_), .. }) | EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { .. }) | EventMsg::AgentMessageContentDelta(AgentMessageContentDeltaEvent { .. }) => {} EventMsg::TaskComplete(task_complete) => { // Parse review output from the last agent message (if present). let out = task_complete .last_agent_message .as_deref() .map(parse_review_output_event); return out; } EventMsg::TurnAborted(_) => { // Cancellation or abort: consumer will finalize with None. return None; } other => { session .clone_session() .send_event(ctx.as_ref(), other) .await; } } } // Channel closed without TaskComplete: treat as interrupted. None } /// Parse a ReviewOutputEvent from a text blob returned by the reviewer model. /// If the text is valid JSON matching ReviewOutputEvent, deserialize it. /// Otherwise, attempt to extract the first JSON object substring and parse it. /// If parsing still fails, return a structured fallback carrying the plain text /// in `overall_explanation`. fn parse_review_output_event(text: &str) -> ReviewOutputEvent { if let Ok(ev) = serde_json::from_str::<ReviewOutputEvent>(text) { return ev; } if let (Some(start), Some(end)) = (text.find('{'), text.rfind('}')) && start < end && let Some(slice) = text.get(start..=end) && let Ok(ev) = serde_json::from_str::<ReviewOutputEvent>(slice) { return ev; } ReviewOutputEvent { overall_explanation: text.to_string(), ..Default::default() } } /// Emits an ExitedReviewMode Event with optional ReviewOutput, /// and records a developer message with the review output. pub(crate) async fn exit_review_mode( session: Arc<Session>, review_output: Option<ReviewOutputEvent>, ctx: Arc<TurnContext>, ) { const REVIEW_USER_MESSAGE_ID: &str = "review:rollout:user"; const REVIEW_ASSISTANT_MESSAGE_ID: &str = "review:rollout:assistant"; let (user_message, assistant_message) = if let Some(out) = review_output.clone() { let mut findings_str = String::new(); let text = out.overall_explanation.trim(); if !text.is_empty() { findings_str.push_str(text); } if !out.findings.is_empty() { let block = format_review_findings_block(&out.findings, None); findings_str.push_str(&format!("\n{block}")); } let rendered = crate::client_common::REVIEW_EXIT_SUCCESS_TMPL.replace("{results}", &findings_str); let assistant_message = render_review_output_text(&out); (rendered, assistant_message) } else { let rendered = crate::client_common::REVIEW_EXIT_INTERRUPTED_TMPL.to_string(); let assistant_message = "Review was interrupted. Please re-run /review and wait for it to complete." .to_string(); (rendered, assistant_message) }; session .record_conversation_items( &ctx, &[ResponseItem::Message { id: Some(REVIEW_USER_MESSAGE_ID.to_string()), role: "user".to_string(), content: vec![ContentItem::InputText { text: user_message }], }], ) .await; session .send_event( ctx.as_ref(), EventMsg::ExitedReviewMode(ExitedReviewModeEvent { review_output }), ) .await; session .record_response_item_and_emit_turn_item( ctx.as_ref(), ResponseItem::Message { id: Some(REVIEW_ASSISTANT_MESSAGE_ID.to_string()), role: "assistant".to_string(), content: vec![ContentItem::OutputText { text: assistant_message, }], }, ) .await; }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/models_manager/model_family.rs
codex-rs/core/src/models_manager/model_family.rs
use codex_protocol::config_types::Verbosity; use codex_protocol::openai_models::ApplyPatchToolType; use codex_protocol::openai_models::ConfigShellToolType; use codex_protocol::openai_models::ModelInfo; use codex_protocol::openai_models::ReasoningEffort; use crate::config::Config; use crate::truncate::TruncationPolicy; /// The `instructions` field in the payload sent to a model should always start /// with this content. const BASE_INSTRUCTIONS: &str = include_str!("../../prompt.md"); const GPT_5_CODEX_INSTRUCTIONS: &str = include_str!("../../gpt_5_codex_prompt.md"); const GPT_5_1_INSTRUCTIONS: &str = include_str!("../../gpt_5_1_prompt.md"); const GPT_5_2_INSTRUCTIONS: &str = include_str!("../../gpt_5_2_prompt.md"); const GPT_5_1_CODEX_MAX_INSTRUCTIONS: &str = include_str!("../../gpt-5.1-codex-max_prompt.md"); const GPT_5_2_CODEX_INSTRUCTIONS: &str = include_str!("../../gpt-5.2-codex_prompt.md"); pub(crate) const CONTEXT_WINDOW_272K: i64 = 272_000; /// A model family is a group of models that share certain characteristics. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct ModelFamily { /// The full model slug used to derive this model family, e.g. /// "gpt-4.1-2025-04-14". pub slug: String, /// The model family name, e.g. "gpt-4.1". This string is used when deriving /// default metadata for the family, such as context windows. pub family: String, /// True if the model needs additional instructions on how to use the /// "virtual" `apply_patch` CLI. pub needs_special_apply_patch_instructions: bool, /// Maximum supported context window, if known. pub context_window: Option<i64>, /// Token threshold for automatic compaction if config does not override it. auto_compact_token_limit: Option<i64>, // Whether the `reasoning` field can be set when making a request to this // model family. Note it has `effort` and `summary` subfields (though // `summary` is optional). pub supports_reasoning_summaries: bool, // The reasoning effort to use for this model family when none is explicitly chosen. pub default_reasoning_effort: Option<ReasoningEffort>, /// Whether this model supports parallel tool calls when using the /// Responses API. pub supports_parallel_tool_calls: bool, /// Present if the model performs better when `apply_patch` is provided as /// a tool call instead of just a bash command pub apply_patch_tool_type: Option<ApplyPatchToolType>, // Instructions to use for querying the model pub base_instructions: String, /// Names of beta tools that should be exposed to this model family. pub experimental_supported_tools: Vec<String>, /// Percentage of the context window considered usable for inputs, after /// reserving headroom for system prompts, tool overhead, and model output. /// This is applied when computing the effective context window seen by /// consumers. pub effective_context_window_percent: i64, /// If the model family supports setting the verbosity level when using Responses API. pub support_verbosity: bool, // The default verbosity level for this model family when using Responses API. pub default_verbosity: Option<Verbosity>, /// Preferred shell tool type for this model family when features do not override it. pub shell_type: ConfigShellToolType, pub truncation_policy: TruncationPolicy, } impl ModelFamily { pub(super) fn with_config_overrides(mut self, config: &Config) -> Self { if let Some(supports_reasoning_summaries) = config.model_supports_reasoning_summaries { self.supports_reasoning_summaries = supports_reasoning_summaries; } if let Some(context_window) = config.model_context_window { self.context_window = Some(context_window); } if let Some(auto_compact_token_limit) = config.model_auto_compact_token_limit { self.auto_compact_token_limit = Some(auto_compact_token_limit); } self } pub(super) fn with_remote_overrides(mut self, remote_models: Vec<ModelInfo>) -> Self { for model in remote_models { if model.slug == self.slug { self.apply_remote_overrides(model); } } self } fn apply_remote_overrides(&mut self, model: ModelInfo) { let ModelInfo { slug: _, display_name: _, description: _, default_reasoning_level, supported_reasoning_levels: _, shell_type, visibility: _, supported_in_api: _, priority: _, upgrade: _, base_instructions, supports_reasoning_summaries, support_verbosity, default_verbosity, apply_patch_tool_type, truncation_policy, supports_parallel_tool_calls, context_window, experimental_supported_tools, } = model; self.default_reasoning_effort = Some(default_reasoning_level); self.shell_type = shell_type; if let Some(base) = base_instructions { self.base_instructions = base; } self.supports_reasoning_summaries = supports_reasoning_summaries; self.support_verbosity = support_verbosity; self.default_verbosity = default_verbosity; self.apply_patch_tool_type = apply_patch_tool_type; self.truncation_policy = truncation_policy.into(); self.supports_parallel_tool_calls = supports_parallel_tool_calls; self.context_window = context_window; self.experimental_supported_tools = experimental_supported_tools; } pub fn auto_compact_token_limit(&self) -> Option<i64> { self.auto_compact_token_limit .or(self.context_window.map(Self::default_auto_compact_limit)) } const fn default_auto_compact_limit(context_window: i64) -> i64 { (context_window * 9) / 10 } pub fn get_model_slug(&self) -> &str { &self.slug } } macro_rules! model_family { ( $slug:expr, $family:expr $(, $key:ident : $value:expr )* $(,)? ) => {{ // defaults #[allow(unused_mut)] let mut mf = ModelFamily { slug: $slug.to_string(), family: $family.to_string(), needs_special_apply_patch_instructions: false, context_window: Some(CONTEXT_WINDOW_272K), auto_compact_token_limit: None, supports_reasoning_summaries: false, supports_parallel_tool_calls: false, apply_patch_tool_type: None, base_instructions: BASE_INSTRUCTIONS.to_string(), experimental_supported_tools: Vec::new(), effective_context_window_percent: 95, support_verbosity: false, shell_type: ConfigShellToolType::Default, default_verbosity: None, default_reasoning_effort: None, truncation_policy: TruncationPolicy::Bytes(10_000), }; // apply overrides $( mf.$key = $value; )* mf }}; } /// Internal offline helper for `ModelsManager` that returns a `ModelFamily` for the given /// model slug. #[allow(clippy::if_same_then_else)] pub(super) fn find_family_for_model(slug: &str) -> ModelFamily { if slug.starts_with("o3") { model_family!( slug, "o3", supports_reasoning_summaries: true, needs_special_apply_patch_instructions: true, context_window: Some(200_000), ) } else if slug.starts_with("o4-mini") { model_family!( slug, "o4-mini", supports_reasoning_summaries: true, needs_special_apply_patch_instructions: true, context_window: Some(200_000), ) } else if slug.starts_with("codex-mini-latest") { model_family!( slug, "codex-mini-latest", supports_reasoning_summaries: true, needs_special_apply_patch_instructions: true, shell_type: ConfigShellToolType::Local, context_window: Some(200_000), ) } else if slug.starts_with("gpt-4.1") { model_family!( slug, "gpt-4.1", needs_special_apply_patch_instructions: true, context_window: Some(1_047_576), ) } else if slug.starts_with("gpt-oss") || slug.starts_with("openai/gpt-oss") { model_family!( slug, "gpt-oss", apply_patch_tool_type: Some(ApplyPatchToolType::Function), context_window: Some(96_000), ) } else if slug.starts_with("gpt-4o") { model_family!( slug, "gpt-4o", needs_special_apply_patch_instructions: true, context_window: Some(128_000), ) } else if slug.starts_with("gpt-3.5") { model_family!( slug, "gpt-3.5", needs_special_apply_patch_instructions: true, context_window: Some(16_385), ) } else if slug.starts_with("test-gpt-5") { model_family!( slug, slug, supports_reasoning_summaries: true, base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(), experimental_supported_tools: vec![ "grep_files".to_string(), "list_dir".to_string(), "read_file".to_string(), "test_sync_tool".to_string(), ], supports_parallel_tool_calls: true, shell_type: ConfigShellToolType::ShellCommand, support_verbosity: true, truncation_policy: TruncationPolicy::Tokens(10_000), ) // Experimental models. } else if slug.starts_with("exp-codex") || slug.starts_with("codex-1p") { // Same as gpt-5.1-codex-max. model_family!( slug, slug, supports_reasoning_summaries: true, base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(), apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), shell_type: ConfigShellToolType::ShellCommand, supports_parallel_tool_calls: true, support_verbosity: false, truncation_policy: TruncationPolicy::Tokens(10_000), context_window: Some(CONTEXT_WINDOW_272K), ) } else if slug.starts_with("exp-") { model_family!( slug, slug, supports_reasoning_summaries: true, apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), support_verbosity: true, default_verbosity: Some(Verbosity::Low), base_instructions: BASE_INSTRUCTIONS.to_string(), default_reasoning_effort: Some(ReasoningEffort::Medium), truncation_policy: TruncationPolicy::Bytes(10_000), shell_type: ConfigShellToolType::UnifiedExec, supports_parallel_tool_calls: true, context_window: Some(CONTEXT_WINDOW_272K), ) // Production models. } else if slug.starts_with("gpt-5.2-codex") { model_family!( slug, slug, supports_reasoning_summaries: true, base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(), apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), shell_type: ConfigShellToolType::ShellCommand, supports_parallel_tool_calls: true, support_verbosity: false, truncation_policy: TruncationPolicy::Tokens(10_000), context_window: Some(CONTEXT_WINDOW_272K), ) } else if slug.starts_with("bengalfox") { model_family!( slug, slug, supports_reasoning_summaries: true, base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(), apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), shell_type: ConfigShellToolType::ShellCommand, supports_parallel_tool_calls: true, support_verbosity: false, truncation_policy: TruncationPolicy::Tokens(10_000), context_window: Some(CONTEXT_WINDOW_272K), ) } else if slug.starts_with("gpt-5.1-codex-max") { model_family!( slug, slug, supports_reasoning_summaries: true, base_instructions: GPT_5_1_CODEX_MAX_INSTRUCTIONS.to_string(), apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), shell_type: ConfigShellToolType::ShellCommand, supports_parallel_tool_calls: false, support_verbosity: false, truncation_policy: TruncationPolicy::Tokens(10_000), context_window: Some(CONTEXT_WINDOW_272K), ) } else if slug.starts_with("gpt-5-codex") || slug.starts_with("gpt-5.1-codex") || slug.starts_with("codex-") { model_family!( slug, slug, supports_reasoning_summaries: true, base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(), apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), shell_type: ConfigShellToolType::ShellCommand, supports_parallel_tool_calls: false, support_verbosity: false, truncation_policy: TruncationPolicy::Tokens(10_000), context_window: Some(CONTEXT_WINDOW_272K), ) } else if slug.starts_with("gpt-5.2") { model_family!( slug, slug, supports_reasoning_summaries: true, apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), support_verbosity: true, default_verbosity: Some(Verbosity::Low), base_instructions: GPT_5_2_INSTRUCTIONS.to_string(), default_reasoning_effort: Some(ReasoningEffort::Medium), truncation_policy: TruncationPolicy::Bytes(10_000), shell_type: ConfigShellToolType::ShellCommand, supports_parallel_tool_calls: true, context_window: Some(CONTEXT_WINDOW_272K), ) } else if slug.starts_with("boomslang") { model_family!( slug, slug, supports_reasoning_summaries: true, apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), support_verbosity: true, default_verbosity: Some(Verbosity::Low), base_instructions: GPT_5_2_INSTRUCTIONS.to_string(), default_reasoning_effort: Some(ReasoningEffort::Medium), truncation_policy: TruncationPolicy::Bytes(10_000), shell_type: ConfigShellToolType::ShellCommand, supports_parallel_tool_calls: true, context_window: Some(CONTEXT_WINDOW_272K), ) } else if slug.starts_with("gpt-5.1") { model_family!( slug, "gpt-5.1", supports_reasoning_summaries: true, apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), support_verbosity: true, default_verbosity: Some(Verbosity::Low), base_instructions: GPT_5_1_INSTRUCTIONS.to_string(), default_reasoning_effort: Some(ReasoningEffort::Medium), truncation_policy: TruncationPolicy::Bytes(10_000), shell_type: ConfigShellToolType::ShellCommand, supports_parallel_tool_calls: true, context_window: Some(CONTEXT_WINDOW_272K), ) } else if slug.starts_with("gpt-5") { model_family!( slug, "gpt-5", supports_reasoning_summaries: true, needs_special_apply_patch_instructions: true, shell_type: ConfigShellToolType::Default, support_verbosity: true, truncation_policy: TruncationPolicy::Bytes(10_000), context_window: Some(CONTEXT_WINDOW_272K), ) } else { derive_default_model_family(slug) } } fn derive_default_model_family(model: &str) -> ModelFamily { tracing::warn!("Unknown model {model} is used. This will degrade the performance of Codex."); ModelFamily { slug: model.to_string(), family: model.to_string(), needs_special_apply_patch_instructions: false, context_window: None, auto_compact_token_limit: None, supports_reasoning_summaries: false, supports_parallel_tool_calls: false, apply_patch_tool_type: None, base_instructions: BASE_INSTRUCTIONS.to_string(), experimental_supported_tools: Vec::new(), effective_context_window_percent: 95, support_verbosity: false, shell_type: ConfigShellToolType::Default, default_verbosity: None, default_reasoning_effort: None, truncation_policy: TruncationPolicy::Bytes(10_000), } } #[cfg(test)] mod tests { use super::*; use codex_protocol::openai_models::ModelVisibility; use codex_protocol::openai_models::ReasoningEffortPreset; use codex_protocol::openai_models::TruncationPolicyConfig; fn remote(slug: &str, effort: ReasoningEffort, shell: ConfigShellToolType) -> ModelInfo { ModelInfo { slug: slug.to_string(), display_name: slug.to_string(), description: Some(format!("{slug} desc")), default_reasoning_level: effort, supported_reasoning_levels: vec![ReasoningEffortPreset { effort, description: effort.to_string(), }], shell_type: shell, visibility: ModelVisibility::List, supported_in_api: true, priority: 1, upgrade: None, base_instructions: None, supports_reasoning_summaries: false, support_verbosity: false, default_verbosity: None, apply_patch_tool_type: None, truncation_policy: TruncationPolicyConfig::bytes(10_000), supports_parallel_tool_calls: false, context_window: None, experimental_supported_tools: Vec::new(), } } #[test] fn remote_overrides_apply_when_slug_matches() { let family = model_family!("gpt-4o-mini", "gpt-4o-mini"); assert_ne!(family.default_reasoning_effort, Some(ReasoningEffort::High)); let updated = family.with_remote_overrides(vec![ remote( "gpt-4o-mini", ReasoningEffort::High, ConfigShellToolType::ShellCommand, ), remote( "other-model", ReasoningEffort::Low, ConfigShellToolType::UnifiedExec, ), ]); assert_eq!( updated.default_reasoning_effort, Some(ReasoningEffort::High) ); assert_eq!(updated.shell_type, ConfigShellToolType::ShellCommand); } #[test] fn remote_overrides_skip_non_matching_models() { let family = model_family!( "codex-mini-latest", "codex-mini-latest", shell_type: ConfigShellToolType::Local ); let updated = family.clone().with_remote_overrides(vec![remote( "other", ReasoningEffort::High, ConfigShellToolType::ShellCommand, )]); assert_eq!( updated.default_reasoning_effort, family.default_reasoning_effort ); assert_eq!(updated.shell_type, family.shell_type); } #[test] fn remote_overrides_apply_extended_metadata() { let family = model_family!( "gpt-5.1", "gpt-5.1", supports_reasoning_summaries: false, support_verbosity: false, default_verbosity: None, apply_patch_tool_type: Some(ApplyPatchToolType::Function), supports_parallel_tool_calls: false, experimental_supported_tools: vec!["local".to_string()], truncation_policy: TruncationPolicy::Bytes(10_000), context_window: Some(100), ); let updated = family.with_remote_overrides(vec![ModelInfo { slug: "gpt-5.1".to_string(), display_name: "gpt-5.1".to_string(), description: Some("desc".to_string()), default_reasoning_level: ReasoningEffort::High, supported_reasoning_levels: vec![ReasoningEffortPreset { effort: ReasoningEffort::High, description: "High".to_string(), }], shell_type: ConfigShellToolType::ShellCommand, visibility: ModelVisibility::List, supported_in_api: true, priority: 10, upgrade: None, base_instructions: Some("Remote instructions".to_string()), supports_reasoning_summaries: true, support_verbosity: true, default_verbosity: Some(Verbosity::High), apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), truncation_policy: TruncationPolicyConfig::tokens(2_000), supports_parallel_tool_calls: true, context_window: Some(400_000), experimental_supported_tools: vec!["alpha".to_string(), "beta".to_string()], }]); assert_eq!( updated.default_reasoning_effort, Some(ReasoningEffort::High) ); assert!(updated.supports_reasoning_summaries); assert!(updated.support_verbosity); assert_eq!(updated.default_verbosity, Some(Verbosity::High)); assert_eq!(updated.shell_type, ConfigShellToolType::ShellCommand); assert_eq!( updated.apply_patch_tool_type, Some(ApplyPatchToolType::Freeform) ); assert_eq!(updated.truncation_policy, TruncationPolicy::Tokens(2_000)); assert!(updated.supports_parallel_tool_calls); assert_eq!(updated.context_window, Some(400_000)); assert_eq!( updated.experimental_supported_tools, vec!["alpha".to_string(), "beta".to_string()] ); assert_eq!(updated.base_instructions, "Remote instructions"); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/models_manager/manager.rs
codex-rs/core/src/models_manager/manager.rs
use chrono::Utc; use codex_api::ModelsClient; use codex_api::ReqwestTransport; use codex_app_server_protocol::AuthMode; use codex_protocol::openai_models::ModelInfo; use codex_protocol::openai_models::ModelPreset; use codex_protocol::openai_models::ModelsResponse; use http::HeaderMap; use std::collections::HashSet; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; use tokio::sync::RwLock; use tokio::sync::TryLockError; use tracing::error; use super::cache; use super::cache::ModelsCache; use crate::api_bridge::auth_provider_from_auth; use crate::api_bridge::map_api_error; use crate::auth::AuthManager; use crate::config::Config; use crate::default_client::build_reqwest_client; use crate::error::Result as CoreResult; use crate::features::Feature; use crate::model_provider_info::ModelProviderInfo; use crate::models_manager::model_family::ModelFamily; use crate::models_manager::model_presets::builtin_model_presets; const MODEL_CACHE_FILE: &str = "models_cache.json"; const DEFAULT_MODEL_CACHE_TTL: Duration = Duration::from_secs(300); const OPENAI_DEFAULT_API_MODEL: &str = "gpt-5.1-codex-max"; const OPENAI_DEFAULT_CHATGPT_MODEL: &str = "gpt-5.2-codex"; const CODEX_AUTO_BALANCED_MODEL: &str = "codex-auto-balanced"; /// Coordinates remote model discovery plus cached metadata on disk. #[derive(Debug)] pub struct ModelsManager { // todo(aibrahim) merge available_models and model family creation into one struct local_models: Vec<ModelPreset>, remote_models: RwLock<Vec<ModelInfo>>, auth_manager: Arc<AuthManager>, etag: RwLock<Option<String>>, codex_home: PathBuf, cache_ttl: Duration, provider: ModelProviderInfo, } impl ModelsManager { /// Construct a manager scoped to the provided `AuthManager`. pub fn new(auth_manager: Arc<AuthManager>) -> Self { let codex_home = auth_manager.codex_home().to_path_buf(); Self { local_models: builtin_model_presets(auth_manager.get_auth_mode()), remote_models: RwLock::new(Self::load_remote_models_from_file().unwrap_or_default()), auth_manager, etag: RwLock::new(None), codex_home, cache_ttl: DEFAULT_MODEL_CACHE_TTL, provider: ModelProviderInfo::create_openai_provider(), } } #[cfg(any(test, feature = "test-support"))] /// Construct a manager scoped to the provided `AuthManager` with a specific provider. Used for integration tests. pub fn with_provider(auth_manager: Arc<AuthManager>, provider: ModelProviderInfo) -> Self { let codex_home = auth_manager.codex_home().to_path_buf(); Self { local_models: builtin_model_presets(auth_manager.get_auth_mode()), remote_models: RwLock::new(Self::load_remote_models_from_file().unwrap_or_default()), auth_manager, etag: RwLock::new(None), codex_home, cache_ttl: DEFAULT_MODEL_CACHE_TTL, provider, } } /// Fetch the latest remote models, using the on-disk cache when still fresh. pub async fn refresh_available_models_with_cache(&self, config: &Config) -> CoreResult<()> { if !config.features.enabled(Feature::RemoteModels) || self.auth_manager.get_auth_mode() == Some(AuthMode::ApiKey) { return Ok(()); } if self.try_load_cache().await { return Ok(()); } self.refresh_available_models_no_cache(config.features.enabled(Feature::RemoteModels)) .await } pub(crate) async fn refresh_available_models_no_cache( &self, remote_models_feature: bool, ) -> CoreResult<()> { if !remote_models_feature || self.auth_manager.get_auth_mode() == Some(AuthMode::ApiKey) { return Ok(()); } let auth = self.auth_manager.auth(); let api_provider = self.provider.to_api_provider(Some(AuthMode::ChatGPT))?; let api_auth = auth_provider_from_auth(auth.clone(), &self.provider).await?; let transport = ReqwestTransport::new(build_reqwest_client()); let client = ModelsClient::new(transport, api_provider, api_auth); let client_version = format_client_version_to_whole(); let (models, etag) = client .list_models(&client_version, HeaderMap::new()) .await .map_err(map_api_error)?; self.apply_remote_models(models.clone()).await; *self.etag.write().await = etag.clone(); self.persist_cache(&models, etag).await; Ok(()) } pub async fn list_models(&self, config: &Config) -> Vec<ModelPreset> { if let Err(err) = self.refresh_available_models_with_cache(config).await { error!("failed to refresh available models: {err}"); } let remote_models = self.remote_models(config).await; self.build_available_models(remote_models) } pub fn try_list_models(&self, config: &Config) -> Result<Vec<ModelPreset>, TryLockError> { let remote_models = self.try_get_remote_models(config)?; Ok(self.build_available_models(remote_models)) } fn find_family_for_model(slug: &str) -> ModelFamily { super::model_family::find_family_for_model(slug) } /// Look up the requested model family while applying remote metadata overrides. pub async fn construct_model_family(&self, model: &str, config: &Config) -> ModelFamily { Self::find_family_for_model(model) .with_remote_overrides(self.remote_models(config).await) .with_config_overrides(config) } pub async fn get_model(&self, model: &Option<String>, config: &Config) -> String { if let Some(model) = model.as_ref() { return model.to_string(); } if let Err(err) = self.refresh_available_models_with_cache(config).await { error!("failed to refresh available models: {err}"); } // if codex-auto-balanced exists & signed in with chatgpt mode, return it, otherwise return the default model let auth_mode = self.auth_manager.get_auth_mode(); let remote_models = self.remote_models(config).await; if auth_mode == Some(AuthMode::ChatGPT) && self .build_available_models(remote_models) .iter() .any(|m| m.model == CODEX_AUTO_BALANCED_MODEL) { return CODEX_AUTO_BALANCED_MODEL.to_string(); } else if auth_mode == Some(AuthMode::ChatGPT) { return OPENAI_DEFAULT_CHATGPT_MODEL.to_string(); } OPENAI_DEFAULT_API_MODEL.to_string() } pub async fn refresh_if_new_etag(&self, etag: String, remote_models_feature: bool) { let current_etag = self.get_etag().await; if current_etag.clone().is_some() && current_etag.as_deref() == Some(etag.as_str()) { return; } if let Err(err) = self .refresh_available_models_no_cache(remote_models_feature) .await { error!("failed to refresh available models: {err}"); } } #[cfg(any(test, feature = "test-support"))] pub fn get_model_offline(model: Option<&str>) -> String { model.unwrap_or(OPENAI_DEFAULT_CHATGPT_MODEL).to_string() } #[cfg(any(test, feature = "test-support"))] /// Offline helper that builds a `ModelFamily` without consulting remote state. pub fn construct_model_family_offline(model: &str, config: &Config) -> ModelFamily { Self::find_family_for_model(model).with_config_overrides(config) } async fn get_etag(&self) -> Option<String> { self.etag.read().await.clone() } /// Replace the cached remote models and rebuild the derived presets list. async fn apply_remote_models(&self, models: Vec<ModelInfo>) { *self.remote_models.write().await = models; } fn load_remote_models_from_file() -> Result<Vec<ModelInfo>, std::io::Error> { let file_contents = include_str!("../../models.json"); let response: ModelsResponse = serde_json::from_str(file_contents)?; Ok(response.models) } /// Attempt to satisfy the refresh from the cache when it matches the provider and TTL. async fn try_load_cache(&self) -> bool { // todo(aibrahim): think if we should store fetched_at in ModelsManager so we don't always need to read the disk let cache_path = self.cache_path(); let cache = match cache::load_cache(&cache_path).await { Ok(cache) => cache, Err(err) => { error!("failed to load models cache: {err}"); return false; } }; let cache = match cache { Some(cache) => cache, None => return false, }; if !cache.is_fresh(self.cache_ttl) { return false; } let models = cache.models.clone(); *self.etag.write().await = cache.etag.clone(); self.apply_remote_models(models.clone()).await; true } /// Serialize the latest fetch to disk for reuse across future processes. async fn persist_cache(&self, models: &[ModelInfo], etag: Option<String>) { let cache = ModelsCache { fetched_at: Utc::now(), etag, models: models.to_vec(), }; let cache_path = self.cache_path(); if let Err(err) = cache::save_cache(&cache_path, &cache).await { error!("failed to write models cache: {err}"); } } /// Merge remote model metadata into picker-ready presets, preserving existing entries. fn build_available_models(&self, mut remote_models: Vec<ModelInfo>) -> Vec<ModelPreset> { remote_models.sort_by(|a, b| a.priority.cmp(&b.priority)); let remote_presets: Vec<ModelPreset> = remote_models.into_iter().map(Into::into).collect(); let existing_presets = self.local_models.clone(); let mut merged_presets = Self::merge_presets(remote_presets, existing_presets); merged_presets = self.filter_visible_models(merged_presets); let has_default = merged_presets.iter().any(|preset| preset.is_default); if let Some(default) = merged_presets.first_mut() && !has_default { default.is_default = true; } merged_presets } fn filter_visible_models(&self, models: Vec<ModelPreset>) -> Vec<ModelPreset> { let chatgpt_mode = self.auth_manager.get_auth_mode() == Some(AuthMode::ChatGPT); models .into_iter() .filter(|model| model.show_in_picker && (chatgpt_mode || model.supported_in_api)) .collect() } fn merge_presets( remote_presets: Vec<ModelPreset>, existing_presets: Vec<ModelPreset>, ) -> Vec<ModelPreset> { if remote_presets.is_empty() { return existing_presets; } let remote_slugs: HashSet<&str> = remote_presets .iter() .map(|preset| preset.model.as_str()) .collect(); let mut merged_presets = remote_presets.clone(); for mut preset in existing_presets { if remote_slugs.contains(preset.model.as_str()) { continue; } preset.is_default = false; merged_presets.push(preset); } merged_presets } async fn remote_models(&self, config: &Config) -> Vec<ModelInfo> { if config.features.enabled(Feature::RemoteModels) { self.remote_models.read().await.clone() } else { Vec::new() } } fn try_get_remote_models(&self, config: &Config) -> Result<Vec<ModelInfo>, TryLockError> { if config.features.enabled(Feature::RemoteModels) { Ok(self.remote_models.try_read()?.clone()) } else { Ok(Vec::new()) } } fn cache_path(&self) -> PathBuf { self.codex_home.join(MODEL_CACHE_FILE) } } /// Convert a client version string to a whole version string (e.g. "1.2.3-alpha.4" -> "1.2.3") fn format_client_version_to_whole() -> String { format!( "{}.{}.{}", env!("CARGO_PKG_VERSION_MAJOR"), env!("CARGO_PKG_VERSION_MINOR"), env!("CARGO_PKG_VERSION_PATCH") ) } #[cfg(test)] mod tests { use super::cache::ModelsCache; use super::*; use crate::CodexAuth; use crate::auth::AuthCredentialsStoreMode; use crate::config::ConfigBuilder; use crate::features::Feature; use crate::model_provider_info::WireApi; use codex_protocol::openai_models::ModelsResponse; use core_test_support::responses::mount_models_once; use pretty_assertions::assert_eq; use serde_json::json; use tempfile::tempdir; use wiremock::MockServer; fn remote_model(slug: &str, display: &str, priority: i32) -> ModelInfo { remote_model_with_visibility(slug, display, priority, "list") } fn remote_model_with_visibility( slug: &str, display: &str, priority: i32, visibility: &str, ) -> ModelInfo { serde_json::from_value(json!({ "slug": slug, "display_name": display, "description": format!("{display} desc"), "default_reasoning_level": "medium", "supported_reasoning_levels": [{"effort": "low", "description": "low"}, {"effort": "medium", "description": "medium"}], "shell_type": "shell_command", "visibility": visibility, "minimal_client_version": [0, 1, 0], "supported_in_api": true, "priority": priority, "upgrade": null, "base_instructions": null, "supports_reasoning_summaries": false, "support_verbosity": false, "default_verbosity": null, "apply_patch_tool_type": null, "truncation_policy": {"mode": "bytes", "limit": 10_000}, "supports_parallel_tool_calls": false, "context_window": null, "experimental_supported_tools": [], })) .expect("valid model") } fn provider_for(base_url: String) -> ModelProviderInfo { ModelProviderInfo { name: "mock".into(), base_url: Some(base_url), env_key: None, env_key_instructions: None, experimental_bearer_token: None, wire_api: WireApi::Responses, query_params: None, http_headers: None, env_http_headers: None, request_max_retries: Some(0), stream_max_retries: Some(0), stream_idle_timeout_ms: Some(5_000), requires_openai_auth: false, } } #[tokio::test] async fn refresh_available_models_sorts_and_marks_default() { let server = MockServer::start().await; let remote_models = vec![ remote_model("priority-low", "Low", 1), remote_model("priority-high", "High", 0), ]; let models_mock = mount_models_once( &server, ModelsResponse { models: remote_models.clone(), }, ) .await; let codex_home = tempdir().expect("temp dir"); let mut config = ConfigBuilder::default() .codex_home(codex_home.path().to_path_buf()) .build() .await .expect("load default test config"); config.features.enable(Feature::RemoteModels); let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing()); let provider = provider_for(server.uri()); let manager = ModelsManager::with_provider(auth_manager, provider); manager .refresh_available_models_with_cache(&config) .await .expect("refresh succeeds"); let cached_remote = manager.remote_models(&config).await; assert_eq!(cached_remote, remote_models); let available = manager.list_models(&config).await; let high_idx = available .iter() .position(|model| model.model == "priority-high") .expect("priority-high should be listed"); let low_idx = available .iter() .position(|model| model.model == "priority-low") .expect("priority-low should be listed"); assert!( high_idx < low_idx, "higher priority should be listed before lower priority" ); assert!( available[high_idx].is_default, "highest priority should be default" ); assert!(!available[low_idx].is_default); assert_eq!( models_mock.requests().len(), 1, "expected a single /models request" ); } #[tokio::test] async fn refresh_available_models_uses_cache_when_fresh() { let server = MockServer::start().await; let remote_models = vec![remote_model("cached", "Cached", 5)]; let models_mock = mount_models_once( &server, ModelsResponse { models: remote_models.clone(), }, ) .await; let codex_home = tempdir().expect("temp dir"); let mut config = ConfigBuilder::default() .codex_home(codex_home.path().to_path_buf()) .build() .await .expect("load default test config"); config.features.enable(Feature::RemoteModels); let auth_manager = Arc::new(AuthManager::new( codex_home.path().to_path_buf(), false, AuthCredentialsStoreMode::File, )); let provider = provider_for(server.uri()); let manager = ModelsManager::with_provider(auth_manager, provider); manager .refresh_available_models_with_cache(&config) .await .expect("first refresh succeeds"); assert_eq!( manager.remote_models(&config).await, remote_models, "remote cache should store fetched models" ); // Second call should read from cache and avoid the network. manager .refresh_available_models_with_cache(&config) .await .expect("cached refresh succeeds"); assert_eq!( manager.remote_models(&config).await, remote_models, "cache path should not mutate stored models" ); assert_eq!( models_mock.requests().len(), 1, "cache hit should avoid a second /models request" ); } #[tokio::test] async fn refresh_available_models_refetches_when_cache_stale() { let server = MockServer::start().await; let initial_models = vec![remote_model("stale", "Stale", 1)]; let initial_mock = mount_models_once( &server, ModelsResponse { models: initial_models.clone(), }, ) .await; let codex_home = tempdir().expect("temp dir"); let mut config = ConfigBuilder::default() .codex_home(codex_home.path().to_path_buf()) .build() .await .expect("load default test config"); config.features.enable(Feature::RemoteModels); let auth_manager = Arc::new(AuthManager::new( codex_home.path().to_path_buf(), false, AuthCredentialsStoreMode::File, )); let provider = provider_for(server.uri()); let manager = ModelsManager::with_provider(auth_manager, provider); manager .refresh_available_models_with_cache(&config) .await .expect("initial refresh succeeds"); // Rewrite cache with an old timestamp so it is treated as stale. let cache_path = codex_home.path().join(MODEL_CACHE_FILE); let contents = std::fs::read_to_string(&cache_path).expect("cache file should exist after refresh"); let mut cache: ModelsCache = serde_json::from_str(&contents).expect("cache should deserialize"); cache.fetched_at = Utc::now() - chrono::Duration::hours(1); std::fs::write(&cache_path, serde_json::to_string_pretty(&cache).unwrap()) .expect("cache rewrite succeeds"); let updated_models = vec![remote_model("fresh", "Fresh", 9)]; server.reset().await; let refreshed_mock = mount_models_once( &server, ModelsResponse { models: updated_models.clone(), }, ) .await; manager .refresh_available_models_with_cache(&config) .await .expect("second refresh succeeds"); assert_eq!( manager.remote_models(&config).await, updated_models, "stale cache should trigger refetch" ); assert_eq!( initial_mock.requests().len(), 1, "initial refresh should only hit /models once" ); assert_eq!( refreshed_mock.requests().len(), 1, "stale cache refresh should fetch /models once" ); } #[tokio::test] async fn refresh_available_models_drops_removed_remote_models() { let server = MockServer::start().await; let initial_models = vec![remote_model("remote-old", "Remote Old", 1)]; let initial_mock = mount_models_once( &server, ModelsResponse { models: initial_models, }, ) .await; let codex_home = tempdir().expect("temp dir"); let mut config = ConfigBuilder::default() .codex_home(codex_home.path().to_path_buf()) .build() .await .expect("load default test config"); config.features.enable(Feature::RemoteModels); let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::create_dummy_chatgpt_auth_for_testing()); let provider = provider_for(server.uri()); let mut manager = ModelsManager::with_provider(auth_manager, provider); manager.cache_ttl = Duration::ZERO; manager .refresh_available_models_with_cache(&config) .await .expect("initial refresh succeeds"); server.reset().await; let refreshed_models = vec![remote_model("remote-new", "Remote New", 1)]; let refreshed_mock = mount_models_once( &server, ModelsResponse { models: refreshed_models, }, ) .await; manager .refresh_available_models_with_cache(&config) .await .expect("second refresh succeeds"); let available = manager .try_list_models(&config) .expect("models should be available"); assert!( available.iter().any(|preset| preset.model == "remote-new"), "new remote model should be listed" ); assert!( !available.iter().any(|preset| preset.model == "remote-old"), "removed remote model should not be listed" ); assert_eq!( initial_mock.requests().len(), 1, "initial refresh should only hit /models once" ); assert_eq!( refreshed_mock.requests().len(), 1, "second refresh should only hit /models once" ); } #[test] fn build_available_models_picks_default_after_hiding_hidden_models() { let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key")); let provider = provider_for("http://example.test".to_string()); let mut manager = ModelsManager::with_provider(auth_manager, provider); manager.local_models = Vec::new(); let hidden_model = remote_model_with_visibility("hidden", "Hidden", 0, "hide"); let visible_model = remote_model_with_visibility("visible", "Visible", 1, "list"); let mut expected = ModelPreset::from(visible_model.clone()); expected.is_default = true; let available = manager.build_available_models(vec![hidden_model, visible_model]); assert_eq!(available, vec![expected]); } #[test] fn bundled_models_json_roundtrips() { let file_contents = include_str!("../../models.json"); let response: ModelsResponse = serde_json::from_str(file_contents).expect("bundled models.json should deserialize"); let serialized = serde_json::to_string(&response).expect("bundled models.json should serialize"); let roundtripped: ModelsResponse = serde_json::from_str(&serialized).expect("serialized models.json should deserialize"); assert_eq!( response, roundtripped, "bundled models.json should round trip through serde" ); assert!( !response.models.is_empty(), "bundled models.json should contain at least one model" ); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/models_manager/mod.rs
codex-rs/core/src/models_manager/mod.rs
pub mod cache; pub mod manager; pub mod model_family; pub mod model_presets;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/models_manager/model_presets.rs
codex-rs/core/src/models_manager/model_presets.rs
use codex_app_server_protocol::AuthMode; use codex_protocol::openai_models::ModelPreset; use codex_protocol::openai_models::ModelUpgrade; use codex_protocol::openai_models::ReasoningEffort; use codex_protocol::openai_models::ReasoningEffortPreset; use once_cell::sync::Lazy; pub const HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG: &str = "hide_gpt5_1_migration_prompt"; pub const HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG: &str = "hide_gpt-5.1-codex-max_migration_prompt"; static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| { vec![ ModelPreset { id: "gpt-5.2-codex".to_string(), model: "gpt-5.2-codex".to_string(), display_name: "gpt-5.2-codex".to_string(), description: "Latest frontier agentic coding model.".to_string(), default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: vec![ ReasoningEffortPreset { effort: ReasoningEffort::Low, description: "Fast responses with lighter reasoning".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::Medium, description: "Balances speed and reasoning depth for everyday tasks".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::High, description: "Greater reasoning depth for complex problems".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::XHigh, description: "Extra high reasoning depth for complex problems".to_string(), }, ], is_default: true, upgrade: None, show_in_picker: true, supported_in_api: false, }, ModelPreset { id: "gpt-5.1-codex-max".to_string(), model: "gpt-5.1-codex-max".to_string(), display_name: "gpt-5.1-codex-max".to_string(), description: "Codex-optimized flagship for deep and fast reasoning.".to_string(), default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: vec![ ReasoningEffortPreset { effort: ReasoningEffort::Low, description: "Fast responses with lighter reasoning".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::Medium, description: "Balances speed and reasoning depth for everyday tasks".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::High, description: "Greater reasoning depth for complex problems".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::XHigh, description: "Extra high reasoning depth for complex problems".to_string(), }, ], is_default: false, upgrade: Some(gpt_52_codex_upgrade()), show_in_picker: true, supported_in_api: true, }, ModelPreset { id: "gpt-5.1-codex-mini".to_string(), model: "gpt-5.1-codex-mini".to_string(), display_name: "gpt-5.1-codex-mini".to_string(), description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(), default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: vec![ ReasoningEffortPreset { effort: ReasoningEffort::Medium, description: "Dynamically adjusts reasoning based on the task".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::High, description: "Maximizes reasoning depth for complex or ambiguous problems" .to_string(), }, ], is_default: false, upgrade: Some(gpt_52_codex_upgrade()), show_in_picker: true, supported_in_api: true, }, ModelPreset { id: "gpt-5.2".to_string(), model: "gpt-5.2".to_string(), display_name: "gpt-5.2".to_string(), description: "Latest frontier model with improvements across knowledge, reasoning and coding".to_string(), default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: vec![ ReasoningEffortPreset { effort: ReasoningEffort::Low, description: "Balances speed with some reasoning; useful for straightforward queries and short explanations".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::Medium, description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::High, description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::XHigh, description: "Extra high reasoning for complex problems".to_string(), }, ], is_default: false, upgrade: Some(gpt_52_codex_upgrade()), show_in_picker: true, supported_in_api: true, }, ModelPreset { id: "bengalfox".to_string(), model: "bengalfox".to_string(), display_name: "bengalfox".to_string(), description: "bengalfox".to_string(), default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: vec![ ReasoningEffortPreset { effort: ReasoningEffort::Low, description: "Fast responses with lighter reasoning".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::Medium, description: "Balances speed and reasoning depth for everyday tasks".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::High, description: "Greater reasoning depth for complex problems".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::XHigh, description: "Extra high reasoning depth for complex problems".to_string(), }, ], is_default: false, upgrade: None, show_in_picker: false, supported_in_api: true, }, ModelPreset { id: "boomslang".to_string(), model: "boomslang".to_string(), display_name: "boomslang".to_string(), description: "boomslang".to_string(), default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: vec![ ReasoningEffortPreset { effort: ReasoningEffort::Low, description: "Balances speed with some reasoning; useful for straightforward queries and short explanations".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::Medium, description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::High, description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::XHigh, description: "Extra high reasoning for complex problems".to_string(), }, ], is_default: false, upgrade: None, show_in_picker: false, supported_in_api: true, }, // Deprecated models. ModelPreset { id: "gpt-5-codex".to_string(), model: "gpt-5-codex".to_string(), display_name: "gpt-5-codex".to_string(), description: "Optimized for codex.".to_string(), default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: vec![ ReasoningEffortPreset { effort: ReasoningEffort::Low, description: "Fastest responses with limited reasoning".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::Medium, description: "Dynamically adjusts reasoning based on the task".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::High, description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(), }, ], is_default: false, upgrade: Some(gpt_52_codex_upgrade()), show_in_picker: false, supported_in_api: true, }, ModelPreset { id: "gpt-5-codex-mini".to_string(), model: "gpt-5-codex-mini".to_string(), display_name: "gpt-5-codex-mini".to_string(), description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(), default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: vec![ ReasoningEffortPreset { effort: ReasoningEffort::Medium, description: "Dynamically adjusts reasoning based on the task".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::High, description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(), }, ], is_default: false, upgrade: Some(gpt_52_codex_upgrade()), show_in_picker: false, supported_in_api: true, }, ModelPreset { id: "gpt-5.1-codex".to_string(), model: "gpt-5.1-codex".to_string(), display_name: "gpt-5.1-codex".to_string(), description: "Optimized for codex.".to_string(), default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: vec![ ReasoningEffortPreset { effort: ReasoningEffort::Low, description: "Fastest responses with limited reasoning".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::Medium, description: "Dynamically adjusts reasoning based on the task".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::High, description: "Maximizes reasoning depth for complex or ambiguous problems" .to_string(), }, ], is_default: false, upgrade: Some(gpt_52_codex_upgrade()), show_in_picker: false, supported_in_api: true, }, ModelPreset { id: "gpt-5".to_string(), model: "gpt-5".to_string(), display_name: "gpt-5".to_string(), description: "Broad world knowledge with strong general reasoning.".to_string(), default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: vec![ ReasoningEffortPreset { effort: ReasoningEffort::Minimal, description: "Fastest responses with little reasoning".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::Low, description: "Balances speed with some reasoning; useful for straightforward queries and short explanations".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::Medium, description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::High, description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(), }, ], is_default: false, upgrade: Some(gpt_52_codex_upgrade()), show_in_picker: false, supported_in_api: true, }, ModelPreset { id: "gpt-5.1".to_string(), model: "gpt-5.1".to_string(), display_name: "gpt-5.1".to_string(), description: "Broad world knowledge with strong general reasoning.".to_string(), default_reasoning_effort: ReasoningEffort::Medium, supported_reasoning_efforts: vec![ ReasoningEffortPreset { effort: ReasoningEffort::Low, description: "Balances speed with some reasoning; useful for straightforward queries and short explanations".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::Medium, description: "Provides a solid balance of reasoning depth and latency for general-purpose tasks".to_string(), }, ReasoningEffortPreset { effort: ReasoningEffort::High, description: "Maximizes reasoning depth for complex or ambiguous problems".to_string(), }, ], is_default: false, upgrade: Some(gpt_52_codex_upgrade()), show_in_picker: false, supported_in_api: true, }, ] }); fn gpt_52_codex_upgrade() -> ModelUpgrade { ModelUpgrade { id: "gpt-5.2-codex".to_string(), reasoning_effort_mapping: None, migration_config_key: "gpt-5.2-codex".to_string(), model_link: Some("https://openai.com/index/introducing-gpt-5-2-codex".to_string()), upgrade_copy: Some( "Codex is now powered by gpt-5.2-codex, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work." .to_string(), ), } } pub(super) fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> { PRESETS .iter() .filter(|preset| preset.show_in_picker) .cloned() .collect() } #[cfg(any(test, feature = "test-support"))] pub fn all_model_presets() -> &'static Vec<ModelPreset> { &PRESETS } #[cfg(test)] mod tests { use super::*; #[test] fn only_one_default_model_is_configured() { let default_models = PRESETS.iter().filter(|preset| preset.is_default).count(); assert!(default_models == 1); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/models_manager/cache.rs
codex-rs/core/src/models_manager/cache.rs
use chrono::DateTime; use chrono::Utc; use codex_protocol::openai_models::ModelInfo; use serde::Deserialize; use serde::Serialize; use std::io; use std::io::ErrorKind; use std::path::Path; use std::time::Duration; use tokio::fs; /// Serialized snapshot of models and metadata cached on disk. #[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct ModelsCache { pub(crate) fetched_at: DateTime<Utc>, #[serde(default, skip_serializing_if = "Option::is_none")] pub(crate) etag: Option<String>, pub(crate) models: Vec<ModelInfo>, } impl ModelsCache { /// Returns `true` when the cache entry has not exceeded the configured TTL. pub(crate) fn is_fresh(&self, ttl: Duration) -> bool { if ttl.is_zero() { return false; } let Ok(ttl_duration) = chrono::Duration::from_std(ttl) else { return false; }; let age = Utc::now().signed_duration_since(self.fetched_at); age <= ttl_duration } } /// Read and deserialize the cache file if it exists. pub(crate) async fn load_cache(path: &Path) -> io::Result<Option<ModelsCache>> { match fs::read(path).await { Ok(contents) => { let cache = serde_json::from_slice(&contents) .map_err(|err| io::Error::new(ErrorKind::InvalidData, err.to_string()))?; Ok(Some(cache)) } Err(err) if err.kind() == ErrorKind::NotFound => Ok(None), Err(err) => Err(err), } } /// Persist the cache contents to disk, creating parent directories as needed. pub(crate) async fn save_cache(path: &Path, cache: &ModelsCache) -> io::Result<()> { if let Some(parent) = path.parent() { fs::create_dir_all(parent).await?; } let json = serde_json::to_vec_pretty(cache) .map_err(|err| io::Error::new(ErrorKind::InvalidData, err.to_string()))?; fs::write(path, json).await }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/auth/storage.rs
codex-rs/core/src/auth/storage.rs
use chrono::DateTime; use chrono::Utc; use serde::Deserialize; use serde::Serialize; use sha2::Digest; use sha2::Sha256; use std::fmt::Debug; use std::fs::File; use std::fs::OpenOptions; use std::io::Read; use std::io::Write; #[cfg(unix)] use std::os::unix::fs::OpenOptionsExt; use std::path::Path; use std::path::PathBuf; use std::sync::Arc; use tracing::warn; use crate::token_data::TokenData; use codex_keyring_store::DefaultKeyringStore; use codex_keyring_store::KeyringStore; /// Determine where Codex should store CLI auth credentials. #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] pub enum AuthCredentialsStoreMode { #[default] /// Persist credentials in CODEX_HOME/auth.json. File, /// Persist credentials in the keyring. Fail if unavailable. Keyring, /// Use keyring when available; otherwise, fall back to a file in CODEX_HOME. Auto, } /// Expected structure for $CODEX_HOME/auth.json. #[derive(Deserialize, Serialize, Clone, Debug, PartialEq)] pub struct AuthDotJson { #[serde(rename = "OPENAI_API_KEY")] pub openai_api_key: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tokens: Option<TokenData>, #[serde(default, skip_serializing_if = "Option::is_none")] pub last_refresh: Option<DateTime<Utc>>, } pub(super) fn get_auth_file(codex_home: &Path) -> PathBuf { codex_home.join("auth.json") } pub(super) fn delete_file_if_exists(codex_home: &Path) -> std::io::Result<bool> { let auth_file = get_auth_file(codex_home); match std::fs::remove_file(&auth_file) { Ok(()) => Ok(true), Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(false), Err(err) => Err(err), } } pub(super) trait AuthStorageBackend: Debug + Send + Sync { fn load(&self) -> std::io::Result<Option<AuthDotJson>>; fn save(&self, auth: &AuthDotJson) -> std::io::Result<()>; fn delete(&self) -> std::io::Result<bool>; } #[derive(Clone, Debug)] pub(super) struct FileAuthStorage { codex_home: PathBuf, } impl FileAuthStorage { pub(super) fn new(codex_home: PathBuf) -> Self { Self { codex_home } } /// Attempt to read and refresh the `auth.json` file in the given `CODEX_HOME` directory. /// Returns the full AuthDotJson structure after refreshing if necessary. pub(super) fn try_read_auth_json(&self, auth_file: &Path) -> std::io::Result<AuthDotJson> { let mut file = File::open(auth_file)?; let mut contents = String::new(); file.read_to_string(&mut contents)?; let auth_dot_json: AuthDotJson = serde_json::from_str(&contents)?; Ok(auth_dot_json) } } impl AuthStorageBackend for FileAuthStorage { fn load(&self) -> std::io::Result<Option<AuthDotJson>> { let auth_file = get_auth_file(&self.codex_home); let auth_dot_json = match self.try_read_auth_json(&auth_file) { Ok(auth) => auth, Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(None), Err(err) => return Err(err), }; Ok(Some(auth_dot_json)) } fn save(&self, auth_dot_json: &AuthDotJson) -> std::io::Result<()> { let auth_file = get_auth_file(&self.codex_home); if let Some(parent) = auth_file.parent() { std::fs::create_dir_all(parent)?; } let json_data = serde_json::to_string_pretty(auth_dot_json)?; let mut options = OpenOptions::new(); options.truncate(true).write(true).create(true); #[cfg(unix)] { options.mode(0o600); } let mut file = options.open(auth_file)?; file.write_all(json_data.as_bytes())?; file.flush()?; Ok(()) } fn delete(&self) -> std::io::Result<bool> { delete_file_if_exists(&self.codex_home) } } const KEYRING_SERVICE: &str = "Codex Auth"; // turns codex_home path into a stable, short key string fn compute_store_key(codex_home: &Path) -> std::io::Result<String> { let canonical = codex_home .canonicalize() .unwrap_or_else(|_| codex_home.to_path_buf()); let path_str = canonical.to_string_lossy(); let mut hasher = Sha256::new(); hasher.update(path_str.as_bytes()); let digest = hasher.finalize(); let hex = format!("{digest:x}"); let truncated = hex.get(..16).unwrap_or(&hex); Ok(format!("cli|{truncated}")) } #[derive(Clone, Debug)] struct KeyringAuthStorage { codex_home: PathBuf, keyring_store: Arc<dyn KeyringStore>, } impl KeyringAuthStorage { fn new(codex_home: PathBuf, keyring_store: Arc<dyn KeyringStore>) -> Self { Self { codex_home, keyring_store, } } fn load_from_keyring(&self, key: &str) -> std::io::Result<Option<AuthDotJson>> { match self.keyring_store.load(KEYRING_SERVICE, key) { Ok(Some(serialized)) => serde_json::from_str(&serialized).map(Some).map_err(|err| { std::io::Error::other(format!( "failed to deserialize CLI auth from keyring: {err}" )) }), Ok(None) => Ok(None), Err(error) => Err(std::io::Error::other(format!( "failed to load CLI auth from keyring: {}", error.message() ))), } } fn save_to_keyring(&self, key: &str, value: &str) -> std::io::Result<()> { match self.keyring_store.save(KEYRING_SERVICE, key, value) { Ok(()) => Ok(()), Err(error) => { let message = format!( "failed to write OAuth tokens to keyring: {}", error.message() ); warn!("{message}"); Err(std::io::Error::other(message)) } } } } impl AuthStorageBackend for KeyringAuthStorage { fn load(&self) -> std::io::Result<Option<AuthDotJson>> { let key = compute_store_key(&self.codex_home)?; self.load_from_keyring(&key) } fn save(&self, auth: &AuthDotJson) -> std::io::Result<()> { let key = compute_store_key(&self.codex_home)?; // Simpler error mapping per style: prefer method reference over closure let serialized = serde_json::to_string(auth).map_err(std::io::Error::other)?; self.save_to_keyring(&key, &serialized)?; if let Err(err) = delete_file_if_exists(&self.codex_home) { warn!("failed to remove CLI auth fallback file: {err}"); } Ok(()) } fn delete(&self) -> std::io::Result<bool> { let key = compute_store_key(&self.codex_home)?; let keyring_removed = self .keyring_store .delete(KEYRING_SERVICE, &key) .map_err(|err| { std::io::Error::other(format!("failed to delete auth from keyring: {err}")) })?; let file_removed = delete_file_if_exists(&self.codex_home)?; Ok(keyring_removed || file_removed) } } #[derive(Clone, Debug)] struct AutoAuthStorage { keyring_storage: Arc<KeyringAuthStorage>, file_storage: Arc<FileAuthStorage>, } impl AutoAuthStorage { fn new(codex_home: PathBuf, keyring_store: Arc<dyn KeyringStore>) -> Self { Self { keyring_storage: Arc::new(KeyringAuthStorage::new(codex_home.clone(), keyring_store)), file_storage: Arc::new(FileAuthStorage::new(codex_home)), } } } impl AuthStorageBackend for AutoAuthStorage { fn load(&self) -> std::io::Result<Option<AuthDotJson>> { match self.keyring_storage.load() { Ok(Some(auth)) => Ok(Some(auth)), Ok(None) => self.file_storage.load(), Err(err) => { warn!("failed to load CLI auth from keyring, falling back to file storage: {err}"); self.file_storage.load() } } } fn save(&self, auth: &AuthDotJson) -> std::io::Result<()> { match self.keyring_storage.save(auth) { Ok(()) => Ok(()), Err(err) => { warn!("failed to save auth to keyring, falling back to file storage: {err}"); self.file_storage.save(auth) } } } fn delete(&self) -> std::io::Result<bool> { // Keyring storage will delete from disk as well self.keyring_storage.delete() } } pub(super) fn create_auth_storage( codex_home: PathBuf, mode: AuthCredentialsStoreMode, ) -> Arc<dyn AuthStorageBackend> { let keyring_store: Arc<dyn KeyringStore> = Arc::new(DefaultKeyringStore); create_auth_storage_with_keyring_store(codex_home, mode, keyring_store) } fn create_auth_storage_with_keyring_store( codex_home: PathBuf, mode: AuthCredentialsStoreMode, keyring_store: Arc<dyn KeyringStore>, ) -> Arc<dyn AuthStorageBackend> { match mode { AuthCredentialsStoreMode::File => Arc::new(FileAuthStorage::new(codex_home)), AuthCredentialsStoreMode::Keyring => { Arc::new(KeyringAuthStorage::new(codex_home, keyring_store)) } AuthCredentialsStoreMode::Auto => Arc::new(AutoAuthStorage::new(codex_home, keyring_store)), } } #[cfg(test)] mod tests { use super::*; use crate::token_data::IdTokenInfo; use anyhow::Context; use base64::Engine; use pretty_assertions::assert_eq; use serde_json::json; use tempfile::tempdir; use codex_keyring_store::tests::MockKeyringStore; use keyring::Error as KeyringError; #[tokio::test] async fn file_storage_load_returns_auth_dot_json() -> anyhow::Result<()> { let codex_home = tempdir()?; let storage = FileAuthStorage::new(codex_home.path().to_path_buf()); let auth_dot_json = AuthDotJson { openai_api_key: Some("test-key".to_string()), tokens: None, last_refresh: Some(Utc::now()), }; storage .save(&auth_dot_json) .context("failed to save auth file")?; let loaded = storage.load().context("failed to load auth file")?; assert_eq!(Some(auth_dot_json), loaded); Ok(()) } #[tokio::test] async fn file_storage_save_persists_auth_dot_json() -> anyhow::Result<()> { let codex_home = tempdir()?; let storage = FileAuthStorage::new(codex_home.path().to_path_buf()); let auth_dot_json = AuthDotJson { openai_api_key: Some("test-key".to_string()), tokens: None, last_refresh: Some(Utc::now()), }; let file = get_auth_file(codex_home.path()); storage .save(&auth_dot_json) .context("failed to save auth file")?; let same_auth_dot_json = storage .try_read_auth_json(&file) .context("failed to read auth file after save")?; assert_eq!(auth_dot_json, same_auth_dot_json); Ok(()) } #[test] fn file_storage_delete_removes_auth_file() -> anyhow::Result<()> { let dir = tempdir()?; let auth_dot_json = AuthDotJson { openai_api_key: Some("sk-test-key".to_string()), tokens: None, last_refresh: None, }; let storage = create_auth_storage(dir.path().to_path_buf(), AuthCredentialsStoreMode::File); storage.save(&auth_dot_json)?; assert!(dir.path().join("auth.json").exists()); let storage = FileAuthStorage::new(dir.path().to_path_buf()); let removed = storage.delete()?; assert!(removed); assert!(!dir.path().join("auth.json").exists()); Ok(()) } fn seed_keyring_and_fallback_auth_file_for_delete<F>( mock_keyring: &MockKeyringStore, codex_home: &Path, compute_key: F, ) -> anyhow::Result<(String, PathBuf)> where F: FnOnce() -> std::io::Result<String>, { let key = compute_key()?; mock_keyring.save(KEYRING_SERVICE, &key, "{}")?; let auth_file = get_auth_file(codex_home); std::fs::write(&auth_file, "stale")?; Ok((key, auth_file)) } fn seed_keyring_with_auth<F>( mock_keyring: &MockKeyringStore, compute_key: F, auth: &AuthDotJson, ) -> anyhow::Result<()> where F: FnOnce() -> std::io::Result<String>, { let key = compute_key()?; let serialized = serde_json::to_string(auth)?; mock_keyring.save(KEYRING_SERVICE, &key, &serialized)?; Ok(()) } fn assert_keyring_saved_auth_and_removed_fallback( mock_keyring: &MockKeyringStore, key: &str, codex_home: &Path, expected: &AuthDotJson, ) { let saved_value = mock_keyring .saved_value(key) .expect("keyring entry should exist"); let expected_serialized = serde_json::to_string(expected).expect("serialize expected auth"); assert_eq!(saved_value, expected_serialized); let auth_file = get_auth_file(codex_home); assert!( !auth_file.exists(), "fallback auth.json should be removed after keyring save" ); } fn id_token_with_prefix(prefix: &str) -> IdTokenInfo { #[derive(Serialize)] struct Header { alg: &'static str, typ: &'static str, } let header = Header { alg: "none", typ: "JWT", }; let payload = json!({ "email": format!("{prefix}@example.com"), "https://api.openai.com/auth": { "chatgpt_account_id": format!("{prefix}-account"), }, }); let encode = |bytes: &[u8]| base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(bytes); let header_b64 = encode(&serde_json::to_vec(&header).expect("serialize header")); let payload_b64 = encode(&serde_json::to_vec(&payload).expect("serialize payload")); let signature_b64 = encode(b"sig"); let fake_jwt = format!("{header_b64}.{payload_b64}.{signature_b64}"); crate::token_data::parse_id_token(&fake_jwt).expect("fake JWT should parse") } fn auth_with_prefix(prefix: &str) -> AuthDotJson { AuthDotJson { openai_api_key: Some(format!("{prefix}-api-key")), tokens: Some(TokenData { id_token: id_token_with_prefix(prefix), access_token: format!("{prefix}-access"), refresh_token: format!("{prefix}-refresh"), account_id: Some(format!("{prefix}-account-id")), }), last_refresh: None, } } #[test] fn keyring_auth_storage_load_returns_deserialized_auth() -> anyhow::Result<()> { let codex_home = tempdir()?; let mock_keyring = MockKeyringStore::default(); let storage = KeyringAuthStorage::new( codex_home.path().to_path_buf(), Arc::new(mock_keyring.clone()), ); let expected = AuthDotJson { openai_api_key: Some("sk-test".to_string()), tokens: None, last_refresh: None, }; seed_keyring_with_auth( &mock_keyring, || compute_store_key(codex_home.path()), &expected, )?; let loaded = storage.load()?; assert_eq!(Some(expected), loaded); Ok(()) } #[test] fn keyring_auth_storage_compute_store_key_for_home_directory() -> anyhow::Result<()> { let codex_home = PathBuf::from("~/.codex"); let key = compute_store_key(codex_home.as_path())?; assert_eq!(key, "cli|940db7b1d0e4eb40"); Ok(()) } #[test] fn keyring_auth_storage_save_persists_and_removes_fallback_file() -> anyhow::Result<()> { let codex_home = tempdir()?; let mock_keyring = MockKeyringStore::default(); let storage = KeyringAuthStorage::new( codex_home.path().to_path_buf(), Arc::new(mock_keyring.clone()), ); let auth_file = get_auth_file(codex_home.path()); std::fs::write(&auth_file, "stale")?; let auth = AuthDotJson { openai_api_key: None, tokens: Some(TokenData { id_token: Default::default(), access_token: "access".to_string(), refresh_token: "refresh".to_string(), account_id: Some("account".to_string()), }), last_refresh: Some(Utc::now()), }; storage.save(&auth)?; let key = compute_store_key(codex_home.path())?; assert_keyring_saved_auth_and_removed_fallback( &mock_keyring, &key, codex_home.path(), &auth, ); Ok(()) } #[test] fn keyring_auth_storage_delete_removes_keyring_and_file() -> anyhow::Result<()> { let codex_home = tempdir()?; let mock_keyring = MockKeyringStore::default(); let storage = KeyringAuthStorage::new( codex_home.path().to_path_buf(), Arc::new(mock_keyring.clone()), ); let (key, auth_file) = seed_keyring_and_fallback_auth_file_for_delete( &mock_keyring, codex_home.path(), || compute_store_key(codex_home.path()), )?; let removed = storage.delete()?; assert!(removed, "delete should report removal"); assert!( !mock_keyring.contains(&key), "keyring entry should be removed" ); assert!( !auth_file.exists(), "fallback auth.json should be removed after keyring delete" ); Ok(()) } #[test] fn auto_auth_storage_load_prefers_keyring_value() -> anyhow::Result<()> { let codex_home = tempdir()?; let mock_keyring = MockKeyringStore::default(); let storage = AutoAuthStorage::new( codex_home.path().to_path_buf(), Arc::new(mock_keyring.clone()), ); let keyring_auth = auth_with_prefix("keyring"); seed_keyring_with_auth( &mock_keyring, || compute_store_key(codex_home.path()), &keyring_auth, )?; let file_auth = auth_with_prefix("file"); storage.file_storage.save(&file_auth)?; let loaded = storage.load()?; assert_eq!(loaded, Some(keyring_auth)); Ok(()) } #[test] fn auto_auth_storage_load_uses_file_when_keyring_empty() -> anyhow::Result<()> { let codex_home = tempdir()?; let mock_keyring = MockKeyringStore::default(); let storage = AutoAuthStorage::new(codex_home.path().to_path_buf(), Arc::new(mock_keyring)); let expected = auth_with_prefix("file-only"); storage.file_storage.save(&expected)?; let loaded = storage.load()?; assert_eq!(loaded, Some(expected)); Ok(()) } #[test] fn auto_auth_storage_load_falls_back_when_keyring_errors() -> anyhow::Result<()> { let codex_home = tempdir()?; let mock_keyring = MockKeyringStore::default(); let storage = AutoAuthStorage::new( codex_home.path().to_path_buf(), Arc::new(mock_keyring.clone()), ); let key = compute_store_key(codex_home.path())?; mock_keyring.set_error(&key, KeyringError::Invalid("error".into(), "load".into())); let expected = auth_with_prefix("fallback"); storage.file_storage.save(&expected)?; let loaded = storage.load()?; assert_eq!(loaded, Some(expected)); Ok(()) } #[test] fn auto_auth_storage_save_prefers_keyring() -> anyhow::Result<()> { let codex_home = tempdir()?; let mock_keyring = MockKeyringStore::default(); let storage = AutoAuthStorage::new( codex_home.path().to_path_buf(), Arc::new(mock_keyring.clone()), ); let key = compute_store_key(codex_home.path())?; let stale = auth_with_prefix("stale"); storage.file_storage.save(&stale)?; let expected = auth_with_prefix("to-save"); storage.save(&expected)?; assert_keyring_saved_auth_and_removed_fallback( &mock_keyring, &key, codex_home.path(), &expected, ); Ok(()) } #[test] fn auto_auth_storage_save_falls_back_when_keyring_errors() -> anyhow::Result<()> { let codex_home = tempdir()?; let mock_keyring = MockKeyringStore::default(); let storage = AutoAuthStorage::new( codex_home.path().to_path_buf(), Arc::new(mock_keyring.clone()), ); let key = compute_store_key(codex_home.path())?; mock_keyring.set_error(&key, KeyringError::Invalid("error".into(), "save".into())); let auth = auth_with_prefix("fallback"); storage.save(&auth)?; let auth_file = get_auth_file(codex_home.path()); assert!( auth_file.exists(), "fallback auth.json should be created when keyring save fails" ); let saved = storage .file_storage .load()? .context("fallback auth should exist")?; assert_eq!(saved, auth); assert!( mock_keyring.saved_value(&key).is_none(), "keyring should not contain value when save fails" ); Ok(()) } #[test] fn auto_auth_storage_delete_removes_keyring_and_file() -> anyhow::Result<()> { let codex_home = tempdir()?; let mock_keyring = MockKeyringStore::default(); let storage = AutoAuthStorage::new( codex_home.path().to_path_buf(), Arc::new(mock_keyring.clone()), ); let (key, auth_file) = seed_keyring_and_fallback_auth_file_for_delete( &mock_keyring, codex_home.path(), || compute_store_key(codex_home.path()), )?; let removed = storage.delete()?; assert!(removed, "delete should report removal"); assert!( !mock_keyring.contains(&key), "keyring entry should be removed" ); assert!( !auth_file.exists(), "fallback auth.json should be removed after delete" ); Ok(()) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/command_safety/windows_safe_commands.rs
codex-rs/core/src/command_safety/windows_safe_commands.rs
use base64::Engine; use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; use serde::Deserialize; use std::path::Path; use std::process::Command; use std::sync::LazyLock; const POWERSHELL_PARSER_SCRIPT: &str = include_str!("powershell_parser.ps1"); /// On Windows, we conservatively allow only clearly read-only PowerShell invocations /// that match a small safelist. Anything else (including direct CMD commands) is unsafe. pub fn is_safe_command_windows(command: &[String]) -> bool { if let Some(commands) = try_parse_powershell_command_sequence(command) { commands .iter() .all(|cmd| is_safe_powershell_command(cmd.as_slice())) } else { // Only PowerShell invocations are allowed on Windows for now; anything else is unsafe. false } } /// Returns each command sequence if the invocation starts with a PowerShell binary. /// For example, the tokens from `pwsh Get-ChildItem | Measure-Object` become two sequences. fn try_parse_powershell_command_sequence(command: &[String]) -> Option<Vec<Vec<String>>> { let (exe, rest) = command.split_first()?; if is_powershell_executable(exe) { parse_powershell_invocation(exe, rest) } else { None } } /// Parses a PowerShell invocation into discrete command vectors, rejecting unsafe patterns. fn parse_powershell_invocation(executable: &str, args: &[String]) -> Option<Vec<Vec<String>>> { if args.is_empty() { // Examples rejected here: "pwsh" and "powershell.exe" with no additional arguments. return None; } let mut idx = 0; while idx < args.len() { let arg = &args[idx]; let lower = arg.to_ascii_lowercase(); match lower.as_str() { "-command" | "/command" | "-c" => { let script = args.get(idx + 1)?; if idx + 2 != args.len() { // Reject if there is more than one token representing the actual command. // Examples rejected here: "pwsh -Command foo bar" and "powershell -c ls extra". return None; } return parse_powershell_script(executable, script); } _ if lower.starts_with("-command:") || lower.starts_with("/command:") => { if idx + 1 != args.len() { // Reject if there are more tokens after the command itself. // Examples rejected here: "pwsh -Command:dir C:\\" and "powershell /Command:dir C:\\" with trailing args. return None; } let script = arg.split_once(':')?.1; return parse_powershell_script(executable, script); } // Benign, no-arg flags we tolerate. "-nologo" | "-noprofile" | "-noninteractive" | "-mta" | "-sta" => { idx += 1; continue; } // Explicitly forbidden/opaque or unnecessary for read-only operations. "-encodedcommand" | "-ec" | "-file" | "/file" | "-windowstyle" | "-executionpolicy" | "-workingdirectory" => { // Examples rejected here: "pwsh -EncodedCommand ..." and "powershell -File script.ps1". return None; } // Unknown switch → bail conservatively. _ if lower.starts_with('-') => { // Examples rejected here: "pwsh -UnknownFlag" and "powershell -foo bar". return None; } // If we hit non-flag tokens, treat the remainder as a command sequence. // This happens if powershell is invoked without -Command, e.g. // ["pwsh", "-NoLogo", "git", "-c", "core.pager=cat", "status"] _ => { let script = join_arguments_as_script(&args[idx..]); return parse_powershell_script(executable, &script); } } } // Examples rejected here: "pwsh" and "powershell.exe -NoLogo" without a script. None } /// Tokenizes an inline PowerShell script and delegates to the command splitter. /// Examples of when this is called: pwsh.exe -Command '<script>' or pwsh.exe -Command:<script> fn parse_powershell_script(executable: &str, script: &str) -> Option<Vec<Vec<String>>> { if let PowershellParseOutcome::Commands(commands) = parse_with_powershell_ast(executable, script) { Some(commands) } else { None } } /// Returns true when the executable name is one of the supported PowerShell binaries. fn is_powershell_executable(exe: &str) -> bool { let executable_name = Path::new(exe) .file_name() .and_then(|osstr| osstr.to_str()) .unwrap_or(exe) .to_ascii_lowercase(); matches!( executable_name.as_str(), "powershell" | "powershell.exe" | "pwsh" | "pwsh.exe" ) } /// Attempts to parse PowerShell using the real PowerShell parser, returning every pipeline element /// as a flat argv vector when possible. If parsing fails or the AST includes unsupported constructs, /// we conservatively reject the command instead of trying to split it manually. fn parse_with_powershell_ast(executable: &str, script: &str) -> PowershellParseOutcome { let encoded_script = encode_powershell_base64(script); let encoded_parser_script = encoded_parser_script(); match Command::new(executable) .args([ "-NoLogo", "-NoProfile", "-NonInteractive", "-EncodedCommand", encoded_parser_script, ]) .env("CODEX_POWERSHELL_PAYLOAD", &encoded_script) .output() { Ok(output) if output.status.success() => { if let Ok(result) = serde_json::from_slice::<PowershellParserOutput>(output.stdout.as_slice()) { result.into_outcome() } else { PowershellParseOutcome::Failed } } _ => PowershellParseOutcome::Failed, } } fn encode_powershell_base64(script: &str) -> String { let mut utf16 = Vec::with_capacity(script.len() * 2); for unit in script.encode_utf16() { utf16.extend_from_slice(&unit.to_le_bytes()); } BASE64_STANDARD.encode(utf16) } fn encoded_parser_script() -> &'static str { static ENCODED: LazyLock<String> = LazyLock::new(|| encode_powershell_base64(POWERSHELL_PARSER_SCRIPT)); &ENCODED } #[derive(Deserialize)] #[serde(deny_unknown_fields)] struct PowershellParserOutput { status: String, commands: Option<Vec<Vec<String>>>, } impl PowershellParserOutput { fn into_outcome(self) -> PowershellParseOutcome { match self.status.as_str() { "ok" => self .commands .filter(|commands| { !commands.is_empty() && commands .iter() .all(|cmd| !cmd.is_empty() && cmd.iter().all(|word| !word.is_empty())) }) .map(PowershellParseOutcome::Commands) .unwrap_or(PowershellParseOutcome::Unsupported), "unsupported" => PowershellParseOutcome::Unsupported, _ => PowershellParseOutcome::Failed, } } } enum PowershellParseOutcome { Commands(Vec<Vec<String>>), Unsupported, Failed, } fn join_arguments_as_script(args: &[String]) -> String { let mut words = Vec::with_capacity(args.len()); if let Some((first, rest)) = args.split_first() { words.push(first.clone()); for arg in rest { words.push(quote_argument(arg)); } } words.join(" ") } fn quote_argument(arg: &str) -> String { if arg.is_empty() { return "''".to_string(); } if arg.chars().all(|ch| !ch.is_whitespace()) { return arg.to_string(); } format!("'{}'", arg.replace('\'', "''")) } /// Validates that a parsed PowerShell command stays within our read-only safelist. /// Everything before this is parsing, and rejecting things that make us feel uncomfortable. fn is_safe_powershell_command(words: &[String]) -> bool { if words.is_empty() { // Examples rejected here: "pwsh -Command ''" and "pwsh -Command \"\"". return false; } // Reject nested unsafe cmdlets inside parentheses or arguments for w in words.iter() { let inner = w .trim_matches(|c| c == '(' || c == ')') .trim_start_matches('-') .to_ascii_lowercase(); if matches!( inner.as_str(), "set-content" | "add-content" | "out-file" | "new-item" | "remove-item" | "move-item" | "copy-item" | "rename-item" | "start-process" | "stop-process" ) { // Examples rejected here: "Write-Output (Set-Content foo6.txt 'abc')" and "Get-Content (New-Item bar.txt)". return false; } } let command = words[0] .trim_matches(|c| c == '(' || c == ')') .trim_start_matches('-') .to_ascii_lowercase(); match command.as_str() { "echo" | "write-output" | "write-host" => true, // (no redirection allowed) "dir" | "ls" | "get-childitem" | "gci" => true, "cat" | "type" | "gc" | "get-content" => true, "select-string" | "sls" | "findstr" => true, "measure-object" | "measure" => true, "get-location" | "gl" | "pwd" => true, "test-path" | "tp" => true, "resolve-path" | "rvpa" => true, "select-object" | "select" => true, "get-item" => true, "git" => is_safe_git_command(words), "rg" => is_safe_ripgrep(words), // Extra safety: explicitly prohibit common side-effecting cmdlets regardless of args. "set-content" | "add-content" | "out-file" | "new-item" | "remove-item" | "move-item" | "copy-item" | "rename-item" | "start-process" | "stop-process" => { // Examples rejected here: "pwsh -Command 'Set-Content notes.txt data'" and "pwsh -Command 'Remove-Item temp.log'". false } _ => { // Examples rejected here: "pwsh -Command 'Invoke-WebRequest https://example.com'" and "pwsh -Command 'Start-Service Spooler'". false } } } /// Checks that an `rg` invocation avoids options that can spawn arbitrary executables. fn is_safe_ripgrep(words: &[String]) -> bool { const UNSAFE_RIPGREP_OPTIONS_WITH_ARGS: &[&str] = &["--pre", "--hostname-bin"]; const UNSAFE_RIPGREP_OPTIONS_WITHOUT_ARGS: &[&str] = &["--search-zip", "-z"]; !words.iter().skip(1).any(|arg| { let arg_lc = arg.to_ascii_lowercase(); // Examples rejected here: "pwsh -Command 'rg --pre cat pattern'" and "pwsh -Command 'rg --search-zip pattern'". UNSAFE_RIPGREP_OPTIONS_WITHOUT_ARGS.contains(&arg_lc.as_str()) || UNSAFE_RIPGREP_OPTIONS_WITH_ARGS .iter() .any(|opt| arg_lc == *opt || arg_lc.starts_with(&format!("{opt}="))) }) } /// Ensures a Git command sticks to whitelisted read-only subcommands and flags. fn is_safe_git_command(words: &[String]) -> bool { const SAFE_SUBCOMMANDS: &[&str] = &["status", "log", "show", "diff", "cat-file"]; let mut iter = words.iter().skip(1); while let Some(arg) = iter.next() { let arg_lc = arg.to_ascii_lowercase(); if arg.starts_with('-') { if arg.eq_ignore_ascii_case("-c") || arg.eq_ignore_ascii_case("--config") { if iter.next().is_none() { // Examples rejected here: "pwsh -Command 'git -c'" and "pwsh -Command 'git --config'". return false; } continue; } if arg_lc.starts_with("-c=") || arg_lc.starts_with("--config=") || arg_lc.starts_with("--git-dir=") || arg_lc.starts_with("--work-tree=") { continue; } if arg.eq_ignore_ascii_case("--git-dir") || arg.eq_ignore_ascii_case("--work-tree") { if iter.next().is_none() { // Examples rejected here: "pwsh -Command 'git --git-dir'" and "pwsh -Command 'git --work-tree'". return false; } continue; } continue; } return SAFE_SUBCOMMANDS.contains(&arg_lc.as_str()); } // Examples rejected here: "pwsh -Command 'git'" and "pwsh -Command 'git status --short | Remove-Item foo'". false } #[cfg(all(test, windows))] mod tests { use super::*; use crate::powershell::try_find_pwsh_executable_blocking; use std::string::ToString; /// Converts a slice of string literals into owned `String`s for the tests. fn vec_str(args: &[&str]) -> Vec<String> { args.iter().map(ToString::to_string).collect() } #[test] fn recognizes_safe_powershell_wrappers() { assert!(is_safe_command_windows(&vec_str(&[ "powershell.exe", "-NoLogo", "-Command", "Get-ChildItem -Path .", ]))); assert!(is_safe_command_windows(&vec_str(&[ "powershell.exe", "-NoProfile", "-Command", "git status", ]))); assert!(is_safe_command_windows(&vec_str(&[ "powershell.exe", "Get-Content", "Cargo.toml", ]))); // pwsh parity if let Some(pwsh) = try_find_pwsh_executable_blocking() { assert!(is_safe_command_windows(&[ pwsh.as_path().to_str().unwrap().into(), "-NoProfile".to_string(), "-Command".to_string(), "Get-ChildItem".to_string(), ])); } } #[test] fn accepts_full_path_powershell_invocations() { if !cfg!(windows) { // Windows only because on Linux path splitting doesn't handle `/` separators properly return; } if let Some(pwsh) = try_find_pwsh_executable_blocking() { assert!(is_safe_command_windows(&[ pwsh.as_path().to_str().unwrap().into(), "-NoProfile".to_string(), "-Command".to_string(), "Get-ChildItem -Path .".to_string(), ])); } assert!(is_safe_command_windows(&vec_str(&[ r"C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe", "-Command", "Get-Content Cargo.toml", ]))); } #[test] fn allows_read_only_pipelines_and_git_usage() { let Some(pwsh) = try_find_pwsh_executable_blocking() else { return; }; let pwsh: String = pwsh.as_path().to_str().unwrap().into(); assert!(is_safe_command_windows(&[ pwsh.clone(), "-NoLogo".to_string(), "-NoProfile".to_string(), "-Command".to_string(), "rg --files-with-matches foo | Measure-Object | Select-Object -ExpandProperty Count" .to_string() ])); assert!(is_safe_command_windows(&[ pwsh.clone(), "-NoLogo".to_string(), "-NoProfile".to_string(), "-Command".to_string(), "Get-Content foo.rs | Select-Object -Skip 200".to_string() ])); assert!(is_safe_command_windows(&[ pwsh.clone(), "-NoLogo".to_string(), "-NoProfile".to_string(), "-Command".to_string(), "git -c core.pager=cat show HEAD:foo.rs".to_string() ])); assert!(is_safe_command_windows(&[ pwsh.clone(), "-Command".to_string(), "-git cat-file -p HEAD:foo.rs".to_string() ])); assert!(is_safe_command_windows(&[ pwsh.clone(), "-Command".to_string(), "(Get-Content foo.rs -Raw)".to_string() ])); assert!(is_safe_command_windows(&[ pwsh, "-Command".to_string(), "Get-Item foo.rs | Select-Object Length".to_string() ])); } #[test] fn rejects_powershell_commands_with_side_effects() { assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-NoLogo", "-Command", "Remove-Item foo.txt", ]))); assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-NoProfile", "-Command", "rg --pre cat", ]))); assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "Set-Content foo.txt 'hello'", ]))); // Redirections are blocked assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "echo hi > out.txt", ]))); assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "Get-Content x | Out-File y", ]))); assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "Write-Output foo 2> err.txt", ]))); // Call operator is blocked assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "& Remove-Item foo", ]))); // Chained safe + unsafe must fail assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "Get-ChildItem; Remove-Item foo", ]))); // Nested unsafe cmdlet inside safe command must fail assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "Write-Output (Set-Content foo6.txt 'abc')", ]))); // Additional nested unsafe cmdlet examples must fail assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "Write-Host (Remove-Item foo.txt)", ]))); assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "Get-Content (New-Item bar.txt)", ]))); // Unsafe @ expansion. assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "ls @(calc.exe)" ]))); // Unsupported constructs that the AST parser refuses (no fallback to manual splitting). assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "ls && pwd" ]))); // Sub-expressions are rejected even if they contain otherwise safe commands. assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "Write-Output $(Get-Content foo)" ]))); // Empty words from the parser (e.g. '') are rejected. assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "''" ]))); } #[test] fn accepts_constant_expression_arguments() { assert!(is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "Get-Content 'foo bar'" ]))); assert!(is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "Get-Content \"foo bar\"" ]))); } #[test] fn rejects_dynamic_arguments() { assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "Get-Content $foo" ]))); assert!(!is_safe_command_windows(&vec_str(&[ "powershell.exe", "-Command", "Write-Output \"foo $bar\"" ]))); } #[test] fn uses_invoked_powershell_variant_for_parsing() { if !cfg!(windows) { return; } let chain = "pwd && ls"; assert!( !is_safe_command_windows(&vec_str(&[ "powershell.exe", "-NoProfile", "-Command", chain, ])), "`{chain}` is not recognized by powershell.exe" ); if let Some(pwsh) = try_find_pwsh_executable_blocking() { assert!( is_safe_command_windows(&[ pwsh.as_path().to_str().unwrap().into(), "-NoProfile".to_string(), "-Command".to_string(), chain.to_string(), ]), "`{chain}` should be considered safe to pwsh.exe" ); } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/command_safety/windows_dangerous_commands.rs
codex-rs/core/src/command_safety/windows_dangerous_commands.rs
use std::path::Path; use once_cell::sync::Lazy; use regex::Regex; use shlex::split as shlex_split; use url::Url; pub fn is_dangerous_command_windows(command: &[String]) -> bool { // Prefer structured parsing for PowerShell/CMD so we can spot URL-bearing // invocations of ShellExecute-style entry points before falling back to // simple argv heuristics. if is_dangerous_powershell(command) { return true; } if is_dangerous_cmd(command) { return true; } is_direct_gui_launch(command) } fn is_dangerous_powershell(command: &[String]) -> bool { let Some((exe, rest)) = command.split_first() else { return false; }; if !is_powershell_executable(exe) { return false; } // Parse the PowerShell invocation to get a flat token list we can scan for // dangerous cmdlets/COM calls plus any URL-looking arguments. This is a // best-effort shlex split of the script text, not a full PS parser. let Some(parsed) = parse_powershell_invocation(rest) else { return false; }; let tokens_lc: Vec<String> = parsed .tokens .iter() .map(|t| t.trim_matches('\'').trim_matches('"').to_ascii_lowercase()) .collect(); let has_url = args_have_url(&parsed.tokens); if has_url && tokens_lc.iter().any(|t| { matches!( t.as_str(), "start-process" | "start" | "saps" | "invoke-item" | "ii" ) || t.contains("start-process") || t.contains("invoke-item") }) { return true; } if has_url && tokens_lc .iter() .any(|t| t.contains("shellexecute") || t.contains("shell.application")) { return true; } if let Some(first) = tokens_lc.first() { // Legacy ShellExecute path via url.dll if first == "rundll32" && tokens_lc .iter() .any(|t| t.contains("url.dll,fileprotocolhandler")) && has_url { return true; } if first == "mshta" && has_url { return true; } if is_browser_executable(first) && has_url { return true; } if matches!(first.as_str(), "explorer" | "explorer.exe") && has_url { return true; } } false } fn is_dangerous_cmd(command: &[String]) -> bool { let Some((exe, rest)) = command.split_first() else { return false; }; let Some(base) = executable_basename(exe) else { return false; }; if base != "cmd" && base != "cmd.exe" { return false; } let mut iter = rest.iter(); for arg in iter.by_ref() { let lower = arg.to_ascii_lowercase(); match lower.as_str() { "/c" | "/r" | "-c" => break, _ if lower.starts_with('/') => continue, // Unknown tokens before the command body => bail. _ => return false, } } let Some(first_cmd) = iter.next() else { return false; }; // Classic `cmd /c start https://...` ShellExecute path. if !first_cmd.eq_ignore_ascii_case("start") { return false; } let remaining: Vec<String> = iter.cloned().collect(); args_have_url(&remaining) } fn is_direct_gui_launch(command: &[String]) -> bool { let Some((exe, rest)) = command.split_first() else { return false; }; let Some(base) = executable_basename(exe) else { return false; }; // Explorer/rundll32/mshta or direct browser exe with a URL anywhere in args. if matches!(base.as_str(), "explorer" | "explorer.exe") && args_have_url(rest) { return true; } if matches!(base.as_str(), "mshta" | "mshta.exe") && args_have_url(rest) { return true; } if (base == "rundll32" || base == "rundll32.exe") && rest.iter().any(|t| { t.to_ascii_lowercase() .contains("url.dll,fileprotocolhandler") }) && args_have_url(rest) { return true; } if is_browser_executable(&base) && args_have_url(rest) { return true; } false } fn args_have_url(args: &[String]) -> bool { args.iter().any(|arg| looks_like_url(arg)) } fn looks_like_url(token: &str) -> bool { // Strip common PowerShell punctuation around inline URLs (quotes, parens, trailing semicolons). // Capture the middle token after trimming leading quotes/parens/whitespace and trailing semicolons/closing parens. static RE: Lazy<Option<Regex>> = Lazy::new(|| Regex::new(r#"^[ "'\(\s]*([^\s"'\);]+)[\s;\)]*$"#).ok()); // If the token embeds a URL alongside other text (e.g., Start-Process('https://...')) // as a single shlex token, grab the substring starting at the first URL prefix. let urlish = token .find("https://") .or_else(|| token.find("http://")) .map(|idx| &token[idx..]) .unwrap_or(token); let candidate = RE .as_ref() .and_then(|re| re.captures(urlish)) .and_then(|caps| caps.get(1)) .map(|m| m.as_str()) .unwrap_or(urlish); let Ok(url) = Url::parse(candidate) else { return false; }; matches!(url.scheme(), "http" | "https") } fn executable_basename(exe: &str) -> Option<String> { Path::new(exe) .file_name() .and_then(|osstr| osstr.to_str()) .map(str::to_ascii_lowercase) } fn is_powershell_executable(exe: &str) -> bool { matches!( executable_basename(exe).as_deref(), Some("powershell") | Some("powershell.exe") | Some("pwsh") | Some("pwsh.exe") ) } fn is_browser_executable(name: &str) -> bool { matches!( name, "chrome" | "chrome.exe" | "msedge" | "msedge.exe" | "firefox" | "firefox.exe" | "iexplore" | "iexplore.exe" ) } struct ParsedPowershell { tokens: Vec<String>, } fn parse_powershell_invocation(args: &[String]) -> Option<ParsedPowershell> { if args.is_empty() { return None; } let mut idx = 0; while idx < args.len() { let arg = &args[idx]; let lower = arg.to_ascii_lowercase(); match lower.as_str() { "-command" | "/command" | "-c" => { let script = args.get(idx + 1)?; if idx + 2 != args.len() { return None; } let tokens = shlex_split(script)?; return Some(ParsedPowershell { tokens }); } _ if lower.starts_with("-command:") || lower.starts_with("/command:") => { if idx + 1 != args.len() { return None; } let (_, script) = arg.split_once(':')?; let tokens = shlex_split(script)?; return Some(ParsedPowershell { tokens }); } "-nologo" | "-noprofile" | "-noninteractive" | "-mta" | "-sta" => { idx += 1; } _ if lower.starts_with('-') => { idx += 1; } _ => { let rest = args[idx..].to_vec(); return Some(ParsedPowershell { tokens: rest }); } } } None } #[cfg(test)] mod tests { use super::is_dangerous_command_windows; fn vec_str(items: &[&str]) -> Vec<String> { items.iter().map(std::string::ToString::to_string).collect() } #[test] fn powershell_start_process_url_is_dangerous() { assert!(is_dangerous_command_windows(&vec_str(&[ "powershell", "-NoLogo", "-Command", "Start-Process 'https://example.com'" ]))); } #[test] fn powershell_start_process_url_with_trailing_semicolon_is_dangerous() { assert!(is_dangerous_command_windows(&vec_str(&[ "powershell", "-Command", "Start-Process('https://example.com');" ]))); } #[test] fn powershell_start_process_local_is_not_flagged() { assert!(!is_dangerous_command_windows(&vec_str(&[ "powershell", "-Command", "Start-Process notepad.exe" ]))); } #[test] fn cmd_start_with_url_is_dangerous() { assert!(is_dangerous_command_windows(&vec_str(&[ "cmd", "/c", "start", "https://example.com" ]))); } #[test] fn msedge_with_url_is_dangerous() { assert!(is_dangerous_command_windows(&vec_str(&[ "msedge.exe", "https://example.com" ]))); } #[test] fn explorer_with_directory_is_not_flagged() { assert!(!is_dangerous_command_windows(&vec_str(&[ "explorer.exe", "." ]))); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/command_safety/mod.rs
codex-rs/core/src/command_safety/mod.rs
pub mod is_dangerous_command; pub mod is_safe_command; pub mod windows_safe_commands;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/command_safety/is_dangerous_command.rs
codex-rs/core/src/command_safety/is_dangerous_command.rs
use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::SandboxPolicy; use crate::sandboxing::SandboxPermissions; use crate::bash::parse_shell_lc_plain_commands; use crate::is_safe_command::is_known_safe_command; #[cfg(windows)] #[path = "windows_dangerous_commands.rs"] mod windows_dangerous_commands; pub fn requires_initial_appoval( policy: AskForApproval, sandbox_policy: &SandboxPolicy, command: &[String], sandbox_permissions: SandboxPermissions, ) -> bool { if is_known_safe_command(command) { return false; } match policy { AskForApproval::Never | AskForApproval::OnFailure => false, AskForApproval::OnRequest => { // In DangerFullAccess or ExternalSandbox, only prompt if the command looks dangerous. if matches!( sandbox_policy, SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. } ) { return command_might_be_dangerous(command); } // In restricted sandboxes (ReadOnly/WorkspaceWrite), do not prompt for // non‑escalated, non‑dangerous commands — let the sandbox enforce // restrictions (e.g., block network/write) without a user prompt. if sandbox_permissions.requires_escalated_permissions() { return true; } command_might_be_dangerous(command) } AskForApproval::UnlessTrusted => !is_known_safe_command(command), } } pub fn command_might_be_dangerous(command: &[String]) -> bool { #[cfg(windows)] { if windows_dangerous_commands::is_dangerous_command_windows(command) { return true; } } if is_dangerous_to_call_with_exec(command) { return true; } // Support `bash -lc "<script>"` where the any part of the script might contain a dangerous command. if let Some(all_commands) = parse_shell_lc_plain_commands(command) && all_commands .iter() .any(|cmd| is_dangerous_to_call_with_exec(cmd)) { return true; } false } fn is_dangerous_to_call_with_exec(command: &[String]) -> bool { let cmd0 = command.first().map(String::as_str); match cmd0 { Some(cmd) if cmd.ends_with("git") || cmd.ends_with("/git") => { matches!(command.get(1).map(String::as_str), Some("reset" | "rm")) } Some("rm") => matches!(command.get(1).map(String::as_str), Some("-f" | "-rf")), // for sudo <cmd> simply do the check for <cmd> Some("sudo") => is_dangerous_to_call_with_exec(&command[1..]), // ── anything else ───────────────────────────────────────────────── _ => false, } } #[cfg(test)] mod tests { use super::*; use codex_protocol::protocol::NetworkAccess; fn vec_str(items: &[&str]) -> Vec<String> { items.iter().map(std::string::ToString::to_string).collect() } #[test] fn git_reset_is_dangerous() { assert!(command_might_be_dangerous(&vec_str(&["git", "reset"]))); } #[test] fn bash_git_reset_is_dangerous() { assert!(command_might_be_dangerous(&vec_str(&[ "bash", "-lc", "git reset --hard" ]))); } #[test] fn zsh_git_reset_is_dangerous() { assert!(command_might_be_dangerous(&vec_str(&[ "zsh", "-lc", "git reset --hard" ]))); } #[test] fn git_status_is_not_dangerous() { assert!(!command_might_be_dangerous(&vec_str(&["git", "status"]))); } #[test] fn bash_git_status_is_not_dangerous() { assert!(!command_might_be_dangerous(&vec_str(&[ "bash", "-lc", "git status" ]))); } #[test] fn sudo_git_reset_is_dangerous() { assert!(command_might_be_dangerous(&vec_str(&[ "sudo", "git", "reset", "--hard" ]))); } #[test] fn usr_bin_git_is_dangerous() { assert!(command_might_be_dangerous(&vec_str(&[ "/usr/bin/git", "reset", "--hard" ]))); } #[test] fn rm_rf_is_dangerous() { assert!(command_might_be_dangerous(&vec_str(&["rm", "-rf", "/"]))); } #[test] fn rm_f_is_dangerous() { assert!(command_might_be_dangerous(&vec_str(&["rm", "-f", "/"]))); } #[test] fn external_sandbox_only_prompts_for_dangerous_commands() { let external_policy = SandboxPolicy::ExternalSandbox { network_access: NetworkAccess::Restricted, }; assert!(!requires_initial_appoval( AskForApproval::OnRequest, &external_policy, &vec_str(&["ls"]), SandboxPermissions::UseDefault, )); assert!(requires_initial_appoval( AskForApproval::OnRequest, &external_policy, &vec_str(&["rm", "-rf", "/"]), SandboxPermissions::UseDefault, )); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/command_safety/is_safe_command.rs
codex-rs/core/src/command_safety/is_safe_command.rs
use crate::bash::parse_shell_lc_plain_commands; use crate::command_safety::windows_safe_commands::is_safe_command_windows; pub fn is_known_safe_command(command: &[String]) -> bool { let command: Vec<String> = command .iter() .map(|s| { if s == "zsh" { "bash".to_string() } else { s.clone() } }) .collect(); if is_safe_command_windows(&command) { return true; } if is_safe_to_call_with_exec(&command) { return true; } // Support `bash -lc "..."` where the script consists solely of one or // more "plain" commands (only bare words / quoted strings) combined with // a conservative allow‑list of shell operators that themselves do not // introduce side effects ( "&&", "||", ";", and "|" ). If every // individual command in the script is itself a known‑safe command, then // the composite expression is considered safe. if let Some(all_commands) = parse_shell_lc_plain_commands(&command) && !all_commands.is_empty() && all_commands .iter() .all(|cmd| is_safe_to_call_with_exec(cmd)) { return true; } false } fn is_safe_to_call_with_exec(command: &[String]) -> bool { let Some(cmd0) = command.first().map(String::as_str) else { return false; }; match std::path::Path::new(&cmd0) .file_name() .and_then(|osstr| osstr.to_str()) { Some(cmd) if cfg!(target_os = "linux") && matches!(cmd, "numfmt" | "tac") => true, #[rustfmt::skip] Some( "cat" | "cd" | "cut" | "echo" | "expr" | "false" | "grep" | "head" | "id" | "ls" | "nl" | "paste" | "pwd" | "rev" | "seq" | "stat" | "tail" | "tr" | "true" | "uname" | "uniq" | "wc" | "which" | "whoami") => { true }, Some("base64") => { const UNSAFE_BASE64_OPTIONS: &[&str] = &["-o", "--output"]; !command.iter().skip(1).any(|arg| { UNSAFE_BASE64_OPTIONS.contains(&arg.as_str()) || arg.starts_with("--output=") || (arg.starts_with("-o") && arg != "-o") }) } Some("find") => { // Certain options to `find` can delete files, write to files, or // execute arbitrary commands, so we cannot auto-approve the // invocation of `find` in such cases. #[rustfmt::skip] const UNSAFE_FIND_OPTIONS: &[&str] = &[ // Options that can execute arbitrary commands. "-exec", "-execdir", "-ok", "-okdir", // Option that deletes matching files. "-delete", // Options that write pathnames to a file. "-fls", "-fprint", "-fprint0", "-fprintf", ]; !command .iter() .any(|arg| UNSAFE_FIND_OPTIONS.contains(&arg.as_str())) } // Ripgrep Some("rg") => { const UNSAFE_RIPGREP_OPTIONS_WITH_ARGS: &[&str] = &[ // Takes an arbitrary command that is executed for each match. "--pre", // Takes a command that can be used to obtain the local hostname. "--hostname-bin", ]; const UNSAFE_RIPGREP_OPTIONS_WITHOUT_ARGS: &[&str] = &[ // Calls out to other decompression tools, so do not auto-approve // out of an abundance of caution. "--search-zip", "-z", ]; !command.iter().any(|arg| { UNSAFE_RIPGREP_OPTIONS_WITHOUT_ARGS.contains(&arg.as_str()) || UNSAFE_RIPGREP_OPTIONS_WITH_ARGS .iter() .any(|&opt| arg == opt || arg.starts_with(&format!("{opt}="))) }) } // Git Some("git") => matches!( command.get(1).map(String::as_str), Some("branch" | "status" | "log" | "diff" | "show") ), // Rust Some("cargo") if command.get(1).map(String::as_str) == Some("check") => true, // Special-case `sed -n {N|M,N}p` Some("sed") if { command.len() <= 4 && command.get(1).map(String::as_str) == Some("-n") && is_valid_sed_n_arg(command.get(2).map(String::as_str)) } => { true } // ── anything else ───────────────────────────────────────────────── _ => false, } } // (bash parsing helpers implemented in crate::bash) /* ---------------------------------------------------------- Example ---------------------------------------------------------- */ /// Returns true if `arg` matches /^(\d+,)?\d+p$/ fn is_valid_sed_n_arg(arg: Option<&str>) -> bool { // unwrap or bail let s = match arg { Some(s) => s, None => return false, }; // must end with 'p', strip it let core = match s.strip_suffix('p') { Some(rest) => rest, None => return false, }; // split on ',' and ensure 1 or 2 numeric parts let parts: Vec<&str> = core.split(',').collect(); match parts.as_slice() { // single number, e.g. "10" [num] => !num.is_empty() && num.chars().all(|c| c.is_ascii_digit()), // two numbers, e.g. "1,5" [a, b] => { !a.is_empty() && !b.is_empty() && a.chars().all(|c| c.is_ascii_digit()) && b.chars().all(|c| c.is_ascii_digit()) } // anything else (more than one comma) is invalid _ => false, } } #[cfg(test)] mod tests { use super::*; use std::string::ToString; fn vec_str(args: &[&str]) -> Vec<String> { args.iter().map(ToString::to_string).collect() } #[test] fn known_safe_examples() { assert!(is_safe_to_call_with_exec(&vec_str(&["ls"]))); assert!(is_safe_to_call_with_exec(&vec_str(&["git", "status"]))); assert!(is_safe_to_call_with_exec(&vec_str(&["base64"]))); assert!(is_safe_to_call_with_exec(&vec_str(&[ "sed", "-n", "1,5p", "file.txt" ]))); assert!(is_safe_to_call_with_exec(&vec_str(&[ "nl", "-nrz", "Cargo.toml" ]))); // Safe `find` command (no unsafe options). assert!(is_safe_to_call_with_exec(&vec_str(&[ "find", ".", "-name", "file.txt" ]))); if cfg!(target_os = "linux") { assert!(is_safe_to_call_with_exec(&vec_str(&["numfmt", "1000"]))); assert!(is_safe_to_call_with_exec(&vec_str(&["tac", "Cargo.toml"]))); } else { assert!(!is_safe_to_call_with_exec(&vec_str(&["numfmt", "1000"]))); assert!(!is_safe_to_call_with_exec(&vec_str(&["tac", "Cargo.toml"]))); } } #[test] fn zsh_lc_safe_command_sequence() { assert!(is_known_safe_command(&vec_str(&["zsh", "-lc", "ls"]))); } #[test] fn unknown_or_partial() { assert!(!is_safe_to_call_with_exec(&vec_str(&["foo"]))); assert!(!is_safe_to_call_with_exec(&vec_str(&["git", "fetch"]))); assert!(!is_safe_to_call_with_exec(&vec_str(&[ "sed", "-n", "xp", "file.txt" ]))); // Unsafe `find` commands. for args in [ vec_str(&["find", ".", "-name", "file.txt", "-exec", "rm", "{}", ";"]), vec_str(&[ "find", ".", "-name", "*.py", "-execdir", "python3", "{}", ";", ]), vec_str(&["find", ".", "-name", "file.txt", "-ok", "rm", "{}", ";"]), vec_str(&["find", ".", "-name", "*.py", "-okdir", "python3", "{}", ";"]), vec_str(&["find", ".", "-delete", "-name", "file.txt"]), vec_str(&["find", ".", "-fls", "/etc/passwd"]), vec_str(&["find", ".", "-fprint", "/etc/passwd"]), vec_str(&["find", ".", "-fprint0", "/etc/passwd"]), vec_str(&["find", ".", "-fprintf", "/root/suid.txt", "%#m %u %p\n"]), ] { assert!( !is_safe_to_call_with_exec(&args), "expected {args:?} to be unsafe" ); } } #[test] fn base64_output_options_are_unsafe() { for args in [ vec_str(&["base64", "-o", "out.bin"]), vec_str(&["base64", "--output", "out.bin"]), vec_str(&["base64", "--output=out.bin"]), vec_str(&["base64", "-ob64.txt"]), ] { assert!( !is_safe_to_call_with_exec(&args), "expected {args:?} to be considered unsafe due to output option" ); } } #[test] fn ripgrep_rules() { // Safe ripgrep invocations – none of the unsafe flags are present. assert!(is_safe_to_call_with_exec(&vec_str(&[ "rg", "Cargo.toml", "-n" ]))); // Unsafe flags that do not take an argument (present verbatim). for args in [ vec_str(&["rg", "--search-zip", "files"]), vec_str(&["rg", "-z", "files"]), ] { assert!( !is_safe_to_call_with_exec(&args), "expected {args:?} to be considered unsafe due to zip-search flag", ); } // Unsafe flags that expect a value, provided in both split and = forms. for args in [ vec_str(&["rg", "--pre", "pwned", "files"]), vec_str(&["rg", "--pre=pwned", "files"]), vec_str(&["rg", "--hostname-bin", "pwned", "files"]), vec_str(&["rg", "--hostname-bin=pwned", "files"]), ] { assert!( !is_safe_to_call_with_exec(&args), "expected {args:?} to be considered unsafe due to external-command flag", ); } } #[test] fn windows_powershell_full_path_is_safe() { if !cfg!(windows) { // Windows only because on Linux path splitting doesn't handle `/` separators properly return; } assert!(is_known_safe_command(&vec_str(&[ r"C:\Program Files\PowerShell\7\pwsh.exe", "-Command", "Get-Location", ]))); } #[test] fn bash_lc_safe_examples() { assert!(is_known_safe_command(&vec_str(&["bash", "-lc", "ls"]))); assert!(is_known_safe_command(&vec_str(&["bash", "-lc", "ls -1"]))); assert!(is_known_safe_command(&vec_str(&[ "bash", "-lc", "git status" ]))); assert!(is_known_safe_command(&vec_str(&[ "bash", "-lc", "grep -R \"Cargo.toml\" -n" ]))); assert!(is_known_safe_command(&vec_str(&[ "bash", "-lc", "sed -n 1,5p file.txt" ]))); assert!(is_known_safe_command(&vec_str(&[ "bash", "-lc", "sed -n '1,5p' file.txt" ]))); assert!(is_known_safe_command(&vec_str(&[ "bash", "-lc", "find . -name file.txt" ]))); } #[test] fn bash_lc_safe_examples_with_operators() { assert!(is_known_safe_command(&vec_str(&[ "bash", "-lc", "grep -R \"Cargo.toml\" -n || true" ]))); assert!(is_known_safe_command(&vec_str(&[ "bash", "-lc", "ls && pwd" ]))); assert!(is_known_safe_command(&vec_str(&[ "bash", "-lc", "echo 'hi' ; ls" ]))); assert!(is_known_safe_command(&vec_str(&[ "bash", "-lc", "ls | wc -l" ]))); } #[test] fn bash_lc_unsafe_examples() { assert!( !is_known_safe_command(&vec_str(&["bash", "-lc", "git", "status"])), "Four arg version is not known to be safe." ); assert!( !is_known_safe_command(&vec_str(&["bash", "-lc", "'git status'"])), "The extra quoting around 'git status' makes it a program named 'git status' and is therefore unsafe." ); assert!( !is_known_safe_command(&vec_str(&["bash", "-lc", "find . -name file.txt -delete"])), "Unsafe find option should not be auto-approved." ); // Disallowed because of unsafe command in sequence. assert!( !is_known_safe_command(&vec_str(&["bash", "-lc", "ls && rm -rf /"])), "Sequence containing unsafe command must be rejected" ); // Disallowed because of parentheses / subshell. assert!( !is_known_safe_command(&vec_str(&["bash", "-lc", "(ls)"])), "Parentheses (subshell) are not provably safe with the current parser" ); assert!( !is_known_safe_command(&vec_str(&["bash", "-lc", "ls || (pwd && echo hi)"])), "Nested parentheses are not provably safe with the current parser" ); // Disallowed redirection. assert!( !is_known_safe_command(&vec_str(&["bash", "-lc", "ls > out.txt"])), "> redirection should be rejected" ); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/state/session.rs
codex-rs/core/src/state/session.rs
//! Session-wide mutable state. use codex_protocol::models::ResponseItem; use crate::codex::SessionConfiguration; use crate::context_manager::ContextManager; use crate::protocol::RateLimitSnapshot; use crate::protocol::TokenUsage; use crate::protocol::TokenUsageInfo; use crate::truncate::TruncationPolicy; /// Persistent, session-scoped state previously stored directly on `Session`. pub(crate) struct SessionState { pub(crate) session_configuration: SessionConfiguration, pub(crate) history: ContextManager, pub(crate) latest_rate_limits: Option<RateLimitSnapshot>, } impl SessionState { /// Create a new session state mirroring previous `State::default()` semantics. pub(crate) fn new(session_configuration: SessionConfiguration) -> Self { let history = ContextManager::new(); Self { session_configuration, history, latest_rate_limits: None, } } // History helpers pub(crate) fn record_items<I>(&mut self, items: I, policy: TruncationPolicy) where I: IntoIterator, I::Item: std::ops::Deref<Target = ResponseItem>, { self.history.record_items(items, policy); } pub(crate) fn clone_history(&self) -> ContextManager { self.history.clone() } pub(crate) fn replace_history(&mut self, items: Vec<ResponseItem>) { self.history.replace(items); } pub(crate) fn set_token_info(&mut self, info: Option<TokenUsageInfo>) { self.history.set_token_info(info); } // Token/rate limit helpers pub(crate) fn update_token_info_from_usage( &mut self, usage: &TokenUsage, model_context_window: Option<i64>, ) { self.history.update_token_info(usage, model_context_window); } pub(crate) fn token_info(&self) -> Option<TokenUsageInfo> { self.history.token_info() } pub(crate) fn set_rate_limits(&mut self, snapshot: RateLimitSnapshot) { self.latest_rate_limits = Some(merge_rate_limit_fields( self.latest_rate_limits.as_ref(), snapshot, )); } pub(crate) fn token_info_and_rate_limits( &self, ) -> (Option<TokenUsageInfo>, Option<RateLimitSnapshot>) { (self.token_info(), self.latest_rate_limits.clone()) } pub(crate) fn set_token_usage_full(&mut self, context_window: i64) { self.history.set_token_usage_full(context_window); } pub(crate) fn get_total_token_usage(&self) -> i64 { self.history.get_total_token_usage() } } // Sometimes new snapshots don't include credits or plan information. fn merge_rate_limit_fields( previous: Option<&RateLimitSnapshot>, mut snapshot: RateLimitSnapshot, ) -> RateLimitSnapshot { if snapshot.credits.is_none() { snapshot.credits = previous.and_then(|prior| prior.credits.clone()); } if snapshot.plan_type.is_none() { snapshot.plan_type = previous.and_then(|prior| prior.plan_type); } snapshot }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/state/service.rs
codex-rs/core/src/state/service.rs
use std::sync::Arc; use crate::AuthManager; use crate::RolloutRecorder; use crate::exec_policy::ExecPolicyManager; use crate::mcp_connection_manager::McpConnectionManager; use crate::models_manager::manager::ModelsManager; use crate::skills::SkillsManager; use crate::tools::sandboxing::ApprovalStore; use crate::unified_exec::UnifiedExecSessionManager; use crate::user_notification::UserNotifier; use codex_otel::otel_manager::OtelManager; use tokio::sync::Mutex; use tokio::sync::RwLock; use tokio_util::sync::CancellationToken; pub(crate) struct SessionServices { pub(crate) mcp_connection_manager: Arc<RwLock<McpConnectionManager>>, pub(crate) mcp_startup_cancellation_token: CancellationToken, pub(crate) unified_exec_manager: UnifiedExecSessionManager, pub(crate) notifier: UserNotifier, pub(crate) rollout: Mutex<Option<RolloutRecorder>>, pub(crate) user_shell: Arc<crate::shell::Shell>, pub(crate) show_raw_agent_reasoning: bool, pub(crate) exec_policy: ExecPolicyManager, pub(crate) auth_manager: Arc<AuthManager>, pub(crate) models_manager: Arc<ModelsManager>, pub(crate) otel_manager: OtelManager, pub(crate) tool_approvals: Mutex<ApprovalStore>, pub(crate) skills_manager: Arc<SkillsManager>, }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/state/mod.rs
codex-rs/core/src/state/mod.rs
mod service; mod session; mod turn; pub(crate) use service::SessionServices; pub(crate) use session::SessionState; pub(crate) use turn::ActiveTurn; pub(crate) use turn::RunningTask; pub(crate) use turn::TaskKind;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/state/turn.rs
codex-rs/core/src/state/turn.rs
//! Turn-scoped state and active turn metadata scaffolding. use indexmap::IndexMap; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::Mutex; use tokio::sync::Notify; use tokio_util::sync::CancellationToken; use tokio_util::task::AbortOnDropHandle; use codex_protocol::models::ResponseInputItem; use tokio::sync::oneshot; use crate::codex::TurnContext; use crate::protocol::ReviewDecision; use crate::tasks::SessionTask; /// Metadata about the currently running turn. pub(crate) struct ActiveTurn { pub(crate) tasks: IndexMap<String, RunningTask>, pub(crate) turn_state: Arc<Mutex<TurnState>>, } impl Default for ActiveTurn { fn default() -> Self { Self { tasks: IndexMap::new(), turn_state: Arc::new(Mutex::new(TurnState::default())), } } } #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub(crate) enum TaskKind { Regular, Review, Compact, } #[derive(Clone)] pub(crate) struct RunningTask { pub(crate) done: Arc<Notify>, pub(crate) kind: TaskKind, pub(crate) task: Arc<dyn SessionTask>, pub(crate) cancellation_token: CancellationToken, pub(crate) handle: Arc<AbortOnDropHandle<()>>, pub(crate) turn_context: Arc<TurnContext>, } impl ActiveTurn { pub(crate) fn add_task(&mut self, task: RunningTask) { let sub_id = task.turn_context.sub_id.clone(); self.tasks.insert(sub_id, task); } pub(crate) fn remove_task(&mut self, sub_id: &str) -> bool { self.tasks.swap_remove(sub_id); self.tasks.is_empty() } pub(crate) fn drain_tasks(&mut self) -> Vec<RunningTask> { self.tasks.drain(..).map(|(_, task)| task).collect() } } /// Mutable state for a single turn. #[derive(Default)] pub(crate) struct TurnState { pending_approvals: HashMap<String, oneshot::Sender<ReviewDecision>>, pending_input: Vec<ResponseInputItem>, } impl TurnState { pub(crate) fn insert_pending_approval( &mut self, key: String, tx: oneshot::Sender<ReviewDecision>, ) -> Option<oneshot::Sender<ReviewDecision>> { self.pending_approvals.insert(key, tx) } pub(crate) fn remove_pending_approval( &mut self, key: &str, ) -> Option<oneshot::Sender<ReviewDecision>> { self.pending_approvals.remove(key) } pub(crate) fn clear_pending(&mut self) { self.pending_approvals.clear(); self.pending_input.clear(); } pub(crate) fn push_pending_input(&mut self, input: ResponseInputItem) { self.pending_input.push(input); } pub(crate) fn take_pending_input(&mut self) -> Vec<ResponseInputItem> { if self.pending_input.is_empty() { Vec::with_capacity(0) } else { let mut ret = Vec::new(); std::mem::swap(&mut ret, &mut self.pending_input); ret } } } impl ActiveTurn { /// Clear any pending approvals and input buffered for the current turn. pub(crate) async fn clear_pending(&self) { let mut ts = self.turn_state.lock().await; ts.clear_pending(); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/sandboxing/mod.rs
codex-rs/core/src/sandboxing/mod.rs
/* Module: sandboxing Build platform wrappers and produce ExecEnv for execution. Owns low‑level sandbox placement and transformation of portable CommandSpec into a ready‑to‑spawn environment. */ use crate::exec::ExecExpiration; use crate::exec::ExecToolCallOutput; use crate::exec::SandboxType; use crate::exec::StdoutStream; use crate::exec::execute_exec_env; use crate::landlock::create_linux_sandbox_command_args; use crate::protocol::SandboxPolicy; #[cfg(target_os = "macos")] use crate::seatbelt::MACOS_PATH_TO_SEATBELT_EXECUTABLE; #[cfg(target_os = "macos")] use crate::seatbelt::create_seatbelt_command_args; #[cfg(target_os = "macos")] use crate::spawn::CODEX_SANDBOX_ENV_VAR; use crate::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR; use crate::tools::sandboxing::SandboxablePreference; pub use codex_protocol::models::SandboxPermissions; use std::collections::HashMap; use std::path::Path; use std::path::PathBuf; #[derive(Debug)] pub struct CommandSpec { pub program: String, pub args: Vec<String>, pub cwd: PathBuf, pub env: HashMap<String, String>, pub expiration: ExecExpiration, pub sandbox_permissions: SandboxPermissions, pub justification: Option<String>, } #[derive(Debug)] pub struct ExecEnv { pub command: Vec<String>, pub cwd: PathBuf, pub env: HashMap<String, String>, pub expiration: ExecExpiration, pub sandbox: SandboxType, pub sandbox_permissions: SandboxPermissions, pub justification: Option<String>, pub arg0: Option<String>, } pub enum SandboxPreference { Auto, Require, Forbid, } #[derive(Debug, thiserror::Error)] pub(crate) enum SandboxTransformError { #[error("missing codex-linux-sandbox executable path")] MissingLinuxSandboxExecutable, #[cfg(not(target_os = "macos"))] #[error("seatbelt sandbox is only available on macOS")] SeatbeltUnavailable, } #[derive(Default)] pub struct SandboxManager; impl SandboxManager { pub fn new() -> Self { Self } pub(crate) fn select_initial( &self, policy: &SandboxPolicy, pref: SandboxablePreference, ) -> SandboxType { match pref { SandboxablePreference::Forbid => SandboxType::None, SandboxablePreference::Require => { // Require a platform sandbox when available; on Windows this // respects the experimental_windows_sandbox feature. crate::safety::get_platform_sandbox().unwrap_or(SandboxType::None) } SandboxablePreference::Auto => match policy { SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. } => { SandboxType::None } _ => crate::safety::get_platform_sandbox().unwrap_or(SandboxType::None), }, } } pub(crate) fn transform( &self, mut spec: CommandSpec, policy: &SandboxPolicy, sandbox: SandboxType, sandbox_policy_cwd: &Path, codex_linux_sandbox_exe: Option<&PathBuf>, ) -> Result<ExecEnv, SandboxTransformError> { let mut env = spec.env; if !policy.has_full_network_access() { env.insert( CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR.to_string(), "1".to_string(), ); } let mut command = Vec::with_capacity(1 + spec.args.len()); command.push(spec.program); command.append(&mut spec.args); let (command, sandbox_env, arg0_override) = match sandbox { SandboxType::None => (command, HashMap::new(), None), #[cfg(target_os = "macos")] SandboxType::MacosSeatbelt => { let mut seatbelt_env = HashMap::new(); seatbelt_env.insert(CODEX_SANDBOX_ENV_VAR.to_string(), "seatbelt".to_string()); let mut args = create_seatbelt_command_args(command.clone(), policy, sandbox_policy_cwd); let mut full_command = Vec::with_capacity(1 + args.len()); full_command.push(MACOS_PATH_TO_SEATBELT_EXECUTABLE.to_string()); full_command.append(&mut args); (full_command, seatbelt_env, None) } #[cfg(not(target_os = "macos"))] SandboxType::MacosSeatbelt => return Err(SandboxTransformError::SeatbeltUnavailable), SandboxType::LinuxSeccomp => { let exe = codex_linux_sandbox_exe .ok_or(SandboxTransformError::MissingLinuxSandboxExecutable)?; let mut args = create_linux_sandbox_command_args(command.clone(), policy, sandbox_policy_cwd); let mut full_command = Vec::with_capacity(1 + args.len()); full_command.push(exe.to_string_lossy().to_string()); full_command.append(&mut args); ( full_command, HashMap::new(), Some("codex-linux-sandbox".to_string()), ) } // On Windows, the restricted token sandbox executes in-process via the // codex-windows-sandbox crate. We leave the command unchanged here and // branch during execution based on the sandbox type. #[cfg(target_os = "windows")] SandboxType::WindowsRestrictedToken => (command, HashMap::new(), None), // When building for non-Windows targets, this variant is never constructed. #[cfg(not(target_os = "windows"))] SandboxType::WindowsRestrictedToken => (command, HashMap::new(), None), }; env.extend(sandbox_env); Ok(ExecEnv { command, cwd: spec.cwd, env, expiration: spec.expiration, sandbox, sandbox_permissions: spec.sandbox_permissions, justification: spec.justification, arg0: arg0_override, }) } pub fn denied(&self, sandbox: SandboxType, out: &ExecToolCallOutput) -> bool { crate::exec::is_likely_sandbox_denied(sandbox, out) } } pub async fn execute_env( env: ExecEnv, policy: &SandboxPolicy, stdout_stream: Option<StdoutStream>, ) -> crate::error::Result<ExecToolCallOutput> { execute_exec_env(env, policy, stdout_stream).await }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/router.rs
codex-rs/core/src/tools/router.rs
use crate::client_common::tools::ToolSpec; use crate::codex::Session; use crate::codex::TurnContext; use crate::function_tool::FunctionCallError; use crate::sandboxing::SandboxPermissions; use crate::tools::context::SharedTurnDiffTracker; use crate::tools::context::ToolInvocation; use crate::tools::context::ToolPayload; use crate::tools::registry::ConfiguredToolSpec; use crate::tools::registry::ToolRegistry; use crate::tools::spec::ToolsConfig; use crate::tools::spec::build_specs; use codex_protocol::models::LocalShellAction; use codex_protocol::models::ResponseInputItem; use codex_protocol::models::ResponseItem; use codex_protocol::models::ShellToolCallParams; use std::collections::HashMap; use std::sync::Arc; use tracing::instrument; #[derive(Clone, Debug)] pub struct ToolCall { pub tool_name: String, pub call_id: String, pub payload: ToolPayload, } pub struct ToolRouter { registry: ToolRegistry, specs: Vec<ConfiguredToolSpec>, } impl ToolRouter { pub fn from_config( config: &ToolsConfig, mcp_tools: Option<HashMap<String, mcp_types::Tool>>, ) -> Self { let builder = build_specs(config, mcp_tools); let (specs, registry) = builder.build(); Self { registry, specs } } pub fn specs(&self) -> Vec<ToolSpec> { self.specs .iter() .map(|config| config.spec.clone()) .collect() } pub fn tool_supports_parallel(&self, tool_name: &str) -> bool { self.specs .iter() .filter(|config| config.supports_parallel_tool_calls) .any(|config| config.spec.name() == tool_name) } #[instrument(level = "trace", skip_all, err)] pub async fn build_tool_call( session: &Session, item: ResponseItem, ) -> Result<Option<ToolCall>, FunctionCallError> { match item { ResponseItem::FunctionCall { name, arguments, call_id, .. } => { if let Some((server, tool)) = session.parse_mcp_tool_name(&name).await { Ok(Some(ToolCall { tool_name: name, call_id, payload: ToolPayload::Mcp { server, tool, raw_arguments: arguments, }, })) } else { Ok(Some(ToolCall { tool_name: name, call_id, payload: ToolPayload::Function { arguments }, })) } } ResponseItem::CustomToolCall { name, input, call_id, .. } => Ok(Some(ToolCall { tool_name: name, call_id, payload: ToolPayload::Custom { input }, })), ResponseItem::LocalShellCall { id, call_id, action, .. } => { let call_id = call_id .or(id) .ok_or(FunctionCallError::MissingLocalShellCallId)?; match action { LocalShellAction::Exec(exec) => { let params = ShellToolCallParams { command: exec.command, workdir: exec.working_directory, timeout_ms: exec.timeout_ms, sandbox_permissions: Some(SandboxPermissions::UseDefault), justification: None, }; Ok(Some(ToolCall { tool_name: "local_shell".to_string(), call_id, payload: ToolPayload::LocalShell { params }, })) } } } _ => Ok(None), } } #[instrument(level = "trace", skip_all, err)] pub async fn dispatch_tool_call( &self, session: Arc<Session>, turn: Arc<TurnContext>, tracker: SharedTurnDiffTracker, call: ToolCall, ) -> Result<ResponseInputItem, FunctionCallError> { let ToolCall { tool_name, call_id, payload, } = call; let payload_outputs_custom = matches!(payload, ToolPayload::Custom { .. }); let failure_call_id = call_id.clone(); let invocation = ToolInvocation { session, turn, tracker, call_id, tool_name, payload, }; match self.registry.dispatch(invocation).await { Ok(response) => Ok(response), Err(FunctionCallError::Fatal(message)) => Err(FunctionCallError::Fatal(message)), Err(err) => Ok(Self::failure_response( failure_call_id, payload_outputs_custom, err, )), } } fn failure_response( call_id: String, payload_outputs_custom: bool, err: FunctionCallError, ) -> ResponseInputItem { let message = err.to_string(); if payload_outputs_custom { ResponseInputItem::CustomToolCallOutput { call_id, output: message, } } else { ResponseInputItem::FunctionCallOutput { call_id, output: codex_protocol::models::FunctionCallOutputPayload { content: message, success: Some(false), ..Default::default() }, } } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/parallel.rs
codex-rs/core/src/tools/parallel.rs
use std::sync::Arc; use std::time::Instant; use tokio::sync::RwLock; use tokio_util::either::Either; use tokio_util::sync::CancellationToken; use tokio_util::task::AbortOnDropHandle; use tracing::Instrument; use tracing::instrument; use tracing::trace_span; use crate::codex::Session; use crate::codex::TurnContext; use crate::error::CodexErr; use crate::function_tool::FunctionCallError; use crate::tools::context::SharedTurnDiffTracker; use crate::tools::context::ToolPayload; use crate::tools::router::ToolCall; use crate::tools::router::ToolRouter; use codex_protocol::models::FunctionCallOutputPayload; use codex_protocol::models::ResponseInputItem; #[derive(Clone)] pub(crate) struct ToolCallRuntime { router: Arc<ToolRouter>, session: Arc<Session>, turn_context: Arc<TurnContext>, tracker: SharedTurnDiffTracker, parallel_execution: Arc<RwLock<()>>, } impl ToolCallRuntime { pub(crate) fn new( router: Arc<ToolRouter>, session: Arc<Session>, turn_context: Arc<TurnContext>, tracker: SharedTurnDiffTracker, ) -> Self { Self { router, session, turn_context, tracker, parallel_execution: Arc::new(RwLock::new(())), } } #[instrument(level = "trace", skip_all, fields(call = ?call))] pub(crate) fn handle_tool_call( self, call: ToolCall, cancellation_token: CancellationToken, ) -> impl std::future::Future<Output = Result<ResponseInputItem, CodexErr>> { let supports_parallel = self.router.tool_supports_parallel(&call.tool_name); let router = Arc::clone(&self.router); let session = Arc::clone(&self.session); let turn = Arc::clone(&self.turn_context); let tracker = Arc::clone(&self.tracker); let lock = Arc::clone(&self.parallel_execution); let started = Instant::now(); let dispatch_span = trace_span!( "dispatch_tool_call", otel.name = call.tool_name.as_str(), tool_name = call.tool_name.as_str(), call_id = call.call_id.as_str(), aborted = false, ); let handle: AbortOnDropHandle<Result<ResponseInputItem, FunctionCallError>> = AbortOnDropHandle::new(tokio::spawn(async move { tokio::select! { _ = cancellation_token.cancelled() => { let secs = started.elapsed().as_secs_f32().max(0.1); dispatch_span.record("aborted", true); Ok(Self::aborted_response(&call, secs)) }, res = async { let _guard = if supports_parallel { Either::Left(lock.read().await) } else { Either::Right(lock.write().await) }; router .dispatch_tool_call(session, turn, tracker, call.clone()) .instrument(dispatch_span.clone()) .await } => res, } })); async move { match handle.await { Ok(Ok(response)) => Ok(response), Ok(Err(FunctionCallError::Fatal(message))) => Err(CodexErr::Fatal(message)), Ok(Err(other)) => Err(CodexErr::Fatal(other.to_string())), Err(err) => Err(CodexErr::Fatal(format!( "tool task failed to receive: {err:?}" ))), } } .in_current_span() } } impl ToolCallRuntime { fn aborted_response(call: &ToolCall, secs: f32) -> ResponseInputItem { match &call.payload { ToolPayload::Custom { .. } => ResponseInputItem::CustomToolCallOutput { call_id: call.call_id.clone(), output: Self::abort_message(call, secs), }, ToolPayload::Mcp { .. } => ResponseInputItem::McpToolCallOutput { call_id: call.call_id.clone(), result: Err(Self::abort_message(call, secs)), }, _ => ResponseInputItem::FunctionCallOutput { call_id: call.call_id.clone(), output: FunctionCallOutputPayload { content: Self::abort_message(call, secs), ..Default::default() }, }, } } fn abort_message(call: &ToolCall, secs: f32) -> String { match call.tool_name.as_str() { "shell" | "container.exec" | "local_shell" | "shell_command" | "unified_exec" => { format!("Wall time: {secs:.1} seconds\naborted by user") } _ => format!("aborted by user after {secs:.1}s"), } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/registry.rs
codex-rs/core/src/tools/registry.rs
use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use crate::client_common::tools::ToolSpec; use crate::function_tool::FunctionCallError; use crate::tools::context::ToolInvocation; use crate::tools::context::ToolOutput; use crate::tools::context::ToolPayload; use async_trait::async_trait; use codex_protocol::models::ResponseInputItem; use codex_utils_readiness::Readiness; use tracing::warn; #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub enum ToolKind { Function, Mcp, } #[async_trait] pub trait ToolHandler: Send + Sync { fn kind(&self) -> ToolKind; fn matches_kind(&self, payload: &ToolPayload) -> bool { matches!( (self.kind(), payload), (ToolKind::Function, ToolPayload::Function { .. }) | (ToolKind::Mcp, ToolPayload::Mcp { .. }) ) } async fn is_mutating(&self, _invocation: &ToolInvocation) -> bool { false } async fn handle(&self, invocation: ToolInvocation) -> Result<ToolOutput, FunctionCallError>; } pub struct ToolRegistry { handlers: HashMap<String, Arc<dyn ToolHandler>>, } impl ToolRegistry { pub fn new(handlers: HashMap<String, Arc<dyn ToolHandler>>) -> Self { Self { handlers } } pub fn handler(&self, name: &str) -> Option<Arc<dyn ToolHandler>> { self.handlers.get(name).map(Arc::clone) } // TODO(jif) for dynamic tools. // pub fn register(&mut self, name: impl Into<String>, handler: Arc<dyn ToolHandler>) { // let name = name.into(); // if self.handlers.insert(name.clone(), handler).is_some() { // warn!("overwriting handler for tool {name}"); // } // } pub async fn dispatch( &self, invocation: ToolInvocation, ) -> Result<ResponseInputItem, FunctionCallError> { let tool_name = invocation.tool_name.clone(); let call_id_owned = invocation.call_id.clone(); let otel = invocation.turn.client.get_otel_manager(); let payload_for_response = invocation.payload.clone(); let log_payload = payload_for_response.log_payload(); let handler = match self.handler(tool_name.as_ref()) { Some(handler) => handler, None => { let message = unsupported_tool_call_message(&invocation.payload, tool_name.as_ref()); otel.tool_result( tool_name.as_ref(), &call_id_owned, log_payload.as_ref(), Duration::ZERO, false, &message, ); return Err(FunctionCallError::RespondToModel(message)); } }; if !handler.matches_kind(&invocation.payload) { let message = format!("tool {tool_name} invoked with incompatible payload"); otel.tool_result( tool_name.as_ref(), &call_id_owned, log_payload.as_ref(), Duration::ZERO, false, &message, ); return Err(FunctionCallError::Fatal(message)); } let output_cell = tokio::sync::Mutex::new(None); let result = otel .log_tool_result( tool_name.as_ref(), &call_id_owned, log_payload.as_ref(), || { let handler = handler.clone(); let output_cell = &output_cell; let invocation = invocation; async move { if handler.is_mutating(&invocation).await { tracing::trace!("waiting for tool gate"); invocation.turn.tool_call_gate.wait_ready().await; tracing::trace!("tool gate released"); } match handler.handle(invocation).await { Ok(output) => { let preview = output.log_preview(); let success = output.success_for_logging(); let mut guard = output_cell.lock().await; *guard = Some(output); Ok((preview, success)) } Err(err) => Err(err), } } }, ) .await; match result { Ok(_) => { let mut guard = output_cell.lock().await; let output = guard.take().ok_or_else(|| { FunctionCallError::Fatal("tool produced no output".to_string()) })?; Ok(output.into_response(&call_id_owned, &payload_for_response)) } Err(err) => Err(err), } } } #[derive(Debug, Clone)] pub struct ConfiguredToolSpec { pub spec: ToolSpec, pub supports_parallel_tool_calls: bool, } impl ConfiguredToolSpec { pub fn new(spec: ToolSpec, supports_parallel_tool_calls: bool) -> Self { Self { spec, supports_parallel_tool_calls, } } } pub struct ToolRegistryBuilder { handlers: HashMap<String, Arc<dyn ToolHandler>>, specs: Vec<ConfiguredToolSpec>, } impl ToolRegistryBuilder { pub fn new() -> Self { Self { handlers: HashMap::new(), specs: Vec::new(), } } pub fn push_spec(&mut self, spec: ToolSpec) { self.push_spec_with_parallel_support(spec, false); } pub fn push_spec_with_parallel_support( &mut self, spec: ToolSpec, supports_parallel_tool_calls: bool, ) { self.specs .push(ConfiguredToolSpec::new(spec, supports_parallel_tool_calls)); } pub fn register_handler(&mut self, name: impl Into<String>, handler: Arc<dyn ToolHandler>) { let name = name.into(); if self .handlers .insert(name.clone(), handler.clone()) .is_some() { warn!("overwriting handler for tool {name}"); } } // TODO(jif) for dynamic tools. // pub fn register_many<I>(&mut self, names: I, handler: Arc<dyn ToolHandler>) // where // I: IntoIterator, // I::Item: Into<String>, // { // for name in names { // let name = name.into(); // if self // .handlers // .insert(name.clone(), handler.clone()) // .is_some() // { // warn!("overwriting handler for tool {name}"); // } // } // } pub fn build(self) -> (Vec<ConfiguredToolSpec>, ToolRegistry) { let registry = ToolRegistry::new(self.handlers); (self.specs, registry) } } fn unsupported_tool_call_message(payload: &ToolPayload, tool_name: &str) -> String { match payload { ToolPayload::Custom { .. } => format!("unsupported custom tool call: {tool_name}"), _ => format!("unsupported call: {tool_name}"), } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/sandboxing.rs
codex-rs/core/src/tools/sandboxing.rs
//! Shared approvals and sandboxing traits used by tool runtimes. //! //! Consolidates the approval flow primitives (`ApprovalDecision`, `ApprovalStore`, //! `ApprovalCtx`, `Approvable`) together with the sandbox orchestration traits //! and helpers (`Sandboxable`, `ToolRuntime`, `SandboxAttempt`, etc.). use crate::codex::Session; use crate::codex::TurnContext; use crate::error::CodexErr; use crate::protocol::SandboxPolicy; use crate::sandboxing::CommandSpec; use crate::sandboxing::SandboxManager; use crate::sandboxing::SandboxTransformError; use crate::state::SessionServices; use codex_protocol::approvals::ExecPolicyAmendment; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::ReviewDecision; use std::collections::HashMap; use std::fmt::Debug; use std::hash::Hash; use std::path::Path; use futures::Future; use futures::future::BoxFuture; use serde::Serialize; #[derive(Clone, Default, Debug)] pub(crate) struct ApprovalStore { // Store serialized keys for generic caching across requests. map: HashMap<String, ReviewDecision>, } impl ApprovalStore { pub fn get<K>(&self, key: &K) -> Option<ReviewDecision> where K: Serialize, { let s = serde_json::to_string(key).ok()?; self.map.get(&s).cloned() } pub fn put<K>(&mut self, key: K, value: ReviewDecision) where K: Serialize, { if let Ok(s) = serde_json::to_string(&key) { self.map.insert(s, value); } } } pub(crate) async fn with_cached_approval<K, F, Fut>( services: &SessionServices, key: K, fetch: F, ) -> ReviewDecision where K: Serialize + Clone, F: FnOnce() -> Fut, Fut: Future<Output = ReviewDecision>, { { let store = services.tool_approvals.lock().await; if let Some(decision) = store.get(&key) { return decision; } } let decision = fetch().await; if matches!(decision, ReviewDecision::ApprovedForSession) { let mut store = services.tool_approvals.lock().await; store.put(key, ReviewDecision::ApprovedForSession); } decision } #[derive(Clone)] pub(crate) struct ApprovalCtx<'a> { pub session: &'a Session, pub turn: &'a TurnContext, pub call_id: &'a str, pub retry_reason: Option<String>, } // Specifies what tool orchestrator should do with a given tool call. #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) enum ExecApprovalRequirement { /// No approval required for this tool call. Skip { /// The first attempt should skip sandboxing (e.g., when explicitly /// greenlit by policy). bypass_sandbox: bool, /// Proposed execpolicy amendment to skip future approvals for similar commands /// Only applies if the command fails to run in sandbox and codex prompts the user to run outside the sandbox. proposed_execpolicy_amendment: Option<ExecPolicyAmendment>, }, /// Approval required for this tool call. NeedsApproval { reason: Option<String>, /// Proposed execpolicy amendment to skip future approvals for similar commands /// See core/src/exec_policy.rs for more details on how proposed_execpolicy_amendment is determined. proposed_execpolicy_amendment: Option<ExecPolicyAmendment>, }, /// Execution forbidden for this tool call. Forbidden { reason: String }, } impl ExecApprovalRequirement { pub fn proposed_execpolicy_amendment(&self) -> Option<&ExecPolicyAmendment> { match self { Self::NeedsApproval { proposed_execpolicy_amendment: Some(prefix), .. } => Some(prefix), Self::Skip { proposed_execpolicy_amendment: Some(prefix), .. } => Some(prefix), _ => None, } } } /// - Never, OnFailure: do not ask /// - OnRequest: ask unless sandbox policy is DangerFullAccess /// - UnlessTrusted: always ask pub(crate) fn default_exec_approval_requirement( policy: AskForApproval, sandbox_policy: &SandboxPolicy, ) -> ExecApprovalRequirement { let needs_approval = match policy { AskForApproval::Never | AskForApproval::OnFailure => false, AskForApproval::OnRequest => !matches!( sandbox_policy, SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. } ), AskForApproval::UnlessTrusted => true, }; if needs_approval { ExecApprovalRequirement::NeedsApproval { reason: None, proposed_execpolicy_amendment: None, } } else { ExecApprovalRequirement::Skip { bypass_sandbox: false, proposed_execpolicy_amendment: None, } } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub(crate) enum SandboxOverride { NoOverride, BypassSandboxFirstAttempt, } pub(crate) trait Approvable<Req> { type ApprovalKey: Hash + Eq + Clone + Debug + Serialize; fn approval_key(&self, req: &Req) -> Self::ApprovalKey; /// Some tools may request to skip the sandbox on the first attempt /// (e.g., when the request explicitly asks for escalated permissions). /// Defaults to `NoOverride`. fn sandbox_mode_for_first_attempt(&self, _req: &Req) -> SandboxOverride { SandboxOverride::NoOverride } fn should_bypass_approval(&self, policy: AskForApproval, already_approved: bool) -> bool { if already_approved { // We do not ask one more time return true; } matches!(policy, AskForApproval::Never) } /// Return `Some(_)` to specify a custom exec approval requirement, or `None` /// to fall back to policy-based default. fn exec_approval_requirement(&self, _req: &Req) -> Option<ExecApprovalRequirement> { None } /// Decide we can request an approval for no-sandbox execution. fn wants_no_sandbox_approval(&self, policy: AskForApproval) -> bool { !matches!(policy, AskForApproval::Never | AskForApproval::OnRequest) } fn start_approval_async<'a>( &'a mut self, req: &'a Req, ctx: ApprovalCtx<'a>, ) -> BoxFuture<'a, ReviewDecision>; } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub(crate) enum SandboxablePreference { Auto, #[allow(dead_code)] // Will be used by later tools. Require, #[allow(dead_code)] // Will be used by later tools. Forbid, } pub(crate) trait Sandboxable { fn sandbox_preference(&self) -> SandboxablePreference; fn escalate_on_failure(&self) -> bool { true } } pub(crate) struct ToolCtx<'a> { pub session: &'a Session, pub turn: &'a TurnContext, pub call_id: String, pub tool_name: String, } #[derive(Debug)] pub(crate) enum ToolError { Rejected(String), Codex(CodexErr), } pub(crate) trait ToolRuntime<Req, Out>: Approvable<Req> + Sandboxable { async fn run( &mut self, req: &Req, attempt: &SandboxAttempt<'_>, ctx: &ToolCtx, ) -> Result<Out, ToolError>; } pub(crate) struct SandboxAttempt<'a> { pub sandbox: crate::exec::SandboxType, pub policy: &'a crate::protocol::SandboxPolicy, pub(crate) manager: &'a SandboxManager, pub(crate) sandbox_cwd: &'a Path, pub codex_linux_sandbox_exe: Option<&'a std::path::PathBuf>, } impl<'a> SandboxAttempt<'a> { pub fn env_for( &self, spec: CommandSpec, ) -> Result<crate::sandboxing::ExecEnv, SandboxTransformError> { self.manager.transform( spec, self.policy, self.sandbox, self.sandbox_cwd, self.codex_linux_sandbox_exe, ) } } #[cfg(test)] mod tests { use super::*; use codex_protocol::protocol::NetworkAccess; use pretty_assertions::assert_eq; #[test] fn external_sandbox_skips_exec_approval_on_request() { assert_eq!( default_exec_approval_requirement( AskForApproval::OnRequest, &SandboxPolicy::ExternalSandbox { network_access: NetworkAccess::Restricted, }, ), ExecApprovalRequirement::Skip { bypass_sandbox: false, proposed_execpolicy_amendment: None, } ); } #[test] fn restricted_sandbox_requires_exec_approval_on_request() { assert_eq!( default_exec_approval_requirement(AskForApproval::OnRequest, &SandboxPolicy::ReadOnly), ExecApprovalRequirement::NeedsApproval { reason: None, proposed_execpolicy_amendment: None, } ); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/mod.rs
codex-rs/core/src/tools/mod.rs
pub mod context; pub mod events; pub(crate) mod handlers; pub mod orchestrator; pub mod parallel; pub mod registry; pub mod router; pub mod runtimes; pub mod sandboxing; pub mod spec; use crate::exec::ExecToolCallOutput; use crate::truncate::TruncationPolicy; use crate::truncate::formatted_truncate_text; use crate::truncate::truncate_text; pub use router::ToolRouter; use serde::Serialize; // Telemetry preview limits: keep log events smaller than model budgets. pub(crate) const TELEMETRY_PREVIEW_MAX_BYTES: usize = 2 * 1024; // 2 KiB pub(crate) const TELEMETRY_PREVIEW_MAX_LINES: usize = 64; // lines pub(crate) const TELEMETRY_PREVIEW_TRUNCATION_NOTICE: &str = "[... telemetry preview truncated ...]"; /// Format the combined exec output for sending back to the model. /// Includes exit code and duration metadata; truncates large bodies safely. pub fn format_exec_output_for_model_structured( exec_output: &ExecToolCallOutput, truncation_policy: TruncationPolicy, ) -> String { let ExecToolCallOutput { exit_code, duration, .. } = exec_output; #[derive(Serialize)] struct ExecMetadata { exit_code: i32, duration_seconds: f32, } #[derive(Serialize)] struct ExecOutput<'a> { output: &'a str, metadata: ExecMetadata, } // round to 1 decimal place let duration_seconds = ((duration.as_secs_f32()) * 10.0).round() / 10.0; let formatted_output = format_exec_output_str(exec_output, truncation_policy); let payload = ExecOutput { output: &formatted_output, metadata: ExecMetadata { exit_code: *exit_code, duration_seconds, }, }; #[expect(clippy::expect_used)] serde_json::to_string(&payload).expect("serialize ExecOutput") } pub fn format_exec_output_for_model_freeform( exec_output: &ExecToolCallOutput, truncation_policy: TruncationPolicy, ) -> String { // round to 1 decimal place let duration_seconds = ((exec_output.duration.as_secs_f32()) * 10.0).round() / 10.0; let content = build_content_with_timeout(exec_output); let total_lines = content.lines().count(); let formatted_output = truncate_text(&content, truncation_policy); let mut sections = Vec::new(); sections.push(format!("Exit code: {}", exec_output.exit_code)); sections.push(format!("Wall time: {duration_seconds} seconds")); if total_lines != formatted_output.lines().count() { sections.push(format!("Total output lines: {total_lines}")); } sections.push("Output:".to_string()); sections.push(formatted_output); sections.join("\n") } pub fn format_exec_output_str( exec_output: &ExecToolCallOutput, truncation_policy: TruncationPolicy, ) -> String { let content = build_content_with_timeout(exec_output); // Truncate for model consumption before serialization. formatted_truncate_text(&content, truncation_policy) } /// Extracts exec output content and prepends a timeout message if the command timed out. fn build_content_with_timeout(exec_output: &ExecToolCallOutput) -> String { if exec_output.timed_out { format!( "command timed out after {} milliseconds\n{}", exec_output.duration.as_millis(), exec_output.aggregated_output.text ) } else { exec_output.aggregated_output.text.clone() } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/orchestrator.rs
codex-rs/core/src/tools/orchestrator.rs
/* Module: orchestrator Central place for approvals + sandbox selection + retry semantics. Drives a simple sequence for any ToolRuntime: approval → select sandbox → attempt → retry without sandbox on denial (no re‑approval thanks to caching). */ use crate::error::CodexErr; use crate::error::SandboxErr; use crate::exec::ExecToolCallOutput; use crate::sandboxing::SandboxManager; use crate::tools::sandboxing::ApprovalCtx; use crate::tools::sandboxing::ExecApprovalRequirement; use crate::tools::sandboxing::SandboxAttempt; use crate::tools::sandboxing::SandboxOverride; use crate::tools::sandboxing::ToolCtx; use crate::tools::sandboxing::ToolError; use crate::tools::sandboxing::ToolRuntime; use crate::tools::sandboxing::default_exec_approval_requirement; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::ReviewDecision; pub(crate) struct ToolOrchestrator { sandbox: SandboxManager, } impl ToolOrchestrator { pub fn new() -> Self { Self { sandbox: SandboxManager::new(), } } pub async fn run<Rq, Out, T>( &mut self, tool: &mut T, req: &Rq, tool_ctx: &ToolCtx<'_>, turn_ctx: &crate::codex::TurnContext, approval_policy: AskForApproval, ) -> Result<Out, ToolError> where T: ToolRuntime<Rq, Out>, { let otel = turn_ctx.client.get_otel_manager(); let otel_tn = &tool_ctx.tool_name; let otel_ci = &tool_ctx.call_id; let otel_user = codex_otel::otel_manager::ToolDecisionSource::User; let otel_cfg = codex_otel::otel_manager::ToolDecisionSource::Config; // 1) Approval let mut already_approved = false; let requirement = tool.exec_approval_requirement(req).unwrap_or_else(|| { default_exec_approval_requirement(approval_policy, &turn_ctx.sandbox_policy) }); match requirement { ExecApprovalRequirement::Skip { .. } => { otel.tool_decision(otel_tn, otel_ci, &ReviewDecision::Approved, otel_cfg); } ExecApprovalRequirement::Forbidden { reason } => { return Err(ToolError::Rejected(reason)); } ExecApprovalRequirement::NeedsApproval { reason, .. } => { let approval_ctx = ApprovalCtx { session: tool_ctx.session, turn: turn_ctx, call_id: &tool_ctx.call_id, retry_reason: reason, }; let decision = tool.start_approval_async(req, approval_ctx).await; otel.tool_decision(otel_tn, otel_ci, &decision, otel_user.clone()); match decision { ReviewDecision::Denied | ReviewDecision::Abort => { return Err(ToolError::Rejected("rejected by user".to_string())); } ReviewDecision::Approved | ReviewDecision::ApprovedExecpolicyAmendment { .. } | ReviewDecision::ApprovedForSession => {} } already_approved = true; } } // 2) First attempt under the selected sandbox. let initial_sandbox = match tool.sandbox_mode_for_first_attempt(req) { SandboxOverride::BypassSandboxFirstAttempt => crate::exec::SandboxType::None, SandboxOverride::NoOverride => self .sandbox .select_initial(&turn_ctx.sandbox_policy, tool.sandbox_preference()), }; // Platform-specific flag gating is handled by SandboxManager::select_initial // via crate::safety::get_platform_sandbox(). let initial_attempt = SandboxAttempt { sandbox: initial_sandbox, policy: &turn_ctx.sandbox_policy, manager: &self.sandbox, sandbox_cwd: &turn_ctx.cwd, codex_linux_sandbox_exe: turn_ctx.codex_linux_sandbox_exe.as_ref(), }; match tool.run(req, &initial_attempt, tool_ctx).await { Ok(out) => { // We have a successful initial result Ok(out) } Err(ToolError::Codex(CodexErr::Sandbox(SandboxErr::Denied { output }))) => { if !tool.escalate_on_failure() { return Err(ToolError::Codex(CodexErr::Sandbox(SandboxErr::Denied { output, }))); } // Under `Never` or `OnRequest`, do not retry without sandbox; surface a concise // sandbox denial that preserves the original output. if !tool.wants_no_sandbox_approval(approval_policy) { return Err(ToolError::Codex(CodexErr::Sandbox(SandboxErr::Denied { output, }))); } // Ask for approval before retrying without sandbox. if !tool.should_bypass_approval(approval_policy, already_approved) { let reason_msg = build_denial_reason_from_output(output.as_ref()); let approval_ctx = ApprovalCtx { session: tool_ctx.session, turn: turn_ctx, call_id: &tool_ctx.call_id, retry_reason: Some(reason_msg), }; let decision = tool.start_approval_async(req, approval_ctx).await; otel.tool_decision(otel_tn, otel_ci, &decision, otel_user); match decision { ReviewDecision::Denied | ReviewDecision::Abort => { return Err(ToolError::Rejected("rejected by user".to_string())); } ReviewDecision::Approved | ReviewDecision::ApprovedExecpolicyAmendment { .. } | ReviewDecision::ApprovedForSession => {} } } let escalated_attempt = SandboxAttempt { sandbox: crate::exec::SandboxType::None, policy: &turn_ctx.sandbox_policy, manager: &self.sandbox, sandbox_cwd: &turn_ctx.cwd, codex_linux_sandbox_exe: None, }; // Second attempt. (*tool).run(req, &escalated_attempt, tool_ctx).await } other => other, } } } fn build_denial_reason_from_output(_output: &ExecToolCallOutput) -> String { // Keep approval reason terse and stable for UX/tests, but accept the // output so we can evolve heuristics later without touching call sites. "command failed; retry without sandbox?".to_string() }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/events.rs
codex-rs/core/src/tools/events.rs
use crate::codex::Session; use crate::codex::TurnContext; use crate::error::CodexErr; use crate::error::SandboxErr; use crate::exec::ExecToolCallOutput; use crate::function_tool::FunctionCallError; use crate::parse_command::parse_command; use crate::protocol::EventMsg; use crate::protocol::ExecCommandBeginEvent; use crate::protocol::ExecCommandEndEvent; use crate::protocol::ExecCommandSource; use crate::protocol::FileChange; use crate::protocol::PatchApplyBeginEvent; use crate::protocol::PatchApplyEndEvent; use crate::protocol::TurnDiffEvent; use crate::tools::context::SharedTurnDiffTracker; use crate::tools::sandboxing::ToolError; use codex_protocol::parse_command::ParsedCommand; use std::collections::HashMap; use std::path::Path; use std::path::PathBuf; use std::time::Duration; use super::format_exec_output_str; #[derive(Clone, Copy)] pub(crate) struct ToolEventCtx<'a> { pub session: &'a Session, pub turn: &'a TurnContext, pub call_id: &'a str, pub turn_diff_tracker: Option<&'a SharedTurnDiffTracker>, } impl<'a> ToolEventCtx<'a> { pub fn new( session: &'a Session, turn: &'a TurnContext, call_id: &'a str, turn_diff_tracker: Option<&'a SharedTurnDiffTracker>, ) -> Self { Self { session, turn, call_id, turn_diff_tracker, } } } pub(crate) enum ToolEventStage { Begin, Success(ExecToolCallOutput), Failure(ToolEventFailure), } pub(crate) enum ToolEventFailure { Output(ExecToolCallOutput), Message(String), } pub(crate) async fn emit_exec_command_begin( ctx: ToolEventCtx<'_>, command: &[String], cwd: &Path, parsed_cmd: &[ParsedCommand], source: ExecCommandSource, interaction_input: Option<String>, process_id: Option<&str>, ) { ctx.session .send_event( ctx.turn, EventMsg::ExecCommandBegin(ExecCommandBeginEvent { call_id: ctx.call_id.to_string(), process_id: process_id.map(str::to_owned), turn_id: ctx.turn.sub_id.clone(), command: command.to_vec(), cwd: cwd.to_path_buf(), parsed_cmd: parsed_cmd.to_vec(), source, interaction_input, }), ) .await; } // Concrete, allocation-free emitter: avoid trait objects and boxed futures. pub(crate) enum ToolEmitter { Shell { command: Vec<String>, cwd: PathBuf, source: ExecCommandSource, parsed_cmd: Vec<ParsedCommand>, freeform: bool, }, ApplyPatch { changes: HashMap<PathBuf, FileChange>, auto_approved: bool, }, UnifiedExec { command: Vec<String>, cwd: PathBuf, source: ExecCommandSource, interaction_input: Option<String>, parsed_cmd: Vec<ParsedCommand>, process_id: Option<String>, }, } impl ToolEmitter { pub fn shell( command: Vec<String>, cwd: PathBuf, source: ExecCommandSource, freeform: bool, ) -> Self { let parsed_cmd = parse_command(&command); Self::Shell { command, cwd, source, parsed_cmd, freeform, } } pub fn apply_patch(changes: HashMap<PathBuf, FileChange>, auto_approved: bool) -> Self { Self::ApplyPatch { changes, auto_approved, } } pub fn unified_exec( command: &[String], cwd: PathBuf, source: ExecCommandSource, process_id: Option<String>, ) -> Self { let parsed_cmd = parse_command(command); Self::UnifiedExec { command: command.to_vec(), cwd, source, interaction_input: None, // TODO(jif) drop this field in the protocol. parsed_cmd, process_id, } } pub async fn emit(&self, ctx: ToolEventCtx<'_>, stage: ToolEventStage) { match (self, stage) { ( Self::Shell { command, cwd, source, parsed_cmd, .. }, stage, ) => { emit_exec_stage( ctx, ExecCommandInput::new(command, cwd.as_path(), parsed_cmd, *source, None, None), stage, ) .await; } ( Self::ApplyPatch { changes, auto_approved, }, ToolEventStage::Begin, ) => { if let Some(tracker) = ctx.turn_diff_tracker { let mut guard = tracker.lock().await; guard.on_patch_begin(changes); } ctx.session .send_event( ctx.turn, EventMsg::PatchApplyBegin(PatchApplyBeginEvent { call_id: ctx.call_id.to_string(), turn_id: ctx.turn.sub_id.clone(), auto_approved: *auto_approved, changes: changes.clone(), }), ) .await; } (Self::ApplyPatch { changes, .. }, ToolEventStage::Success(output)) => { emit_patch_end( ctx, changes.clone(), output.stdout.text.clone(), output.stderr.text.clone(), output.exit_code == 0, ) .await; } ( Self::ApplyPatch { changes, .. }, ToolEventStage::Failure(ToolEventFailure::Output(output)), ) => { emit_patch_end( ctx, changes.clone(), output.stdout.text.clone(), output.stderr.text.clone(), output.exit_code == 0, ) .await; } ( Self::ApplyPatch { changes, .. }, ToolEventStage::Failure(ToolEventFailure::Message(message)), ) => { emit_patch_end( ctx, changes.clone(), String::new(), (*message).to_string(), false, ) .await; } ( Self::UnifiedExec { command, cwd, source, interaction_input, parsed_cmd, process_id, }, stage, ) => { emit_exec_stage( ctx, ExecCommandInput::new( command, cwd.as_path(), parsed_cmd, *source, interaction_input.as_deref(), process_id.as_deref(), ), stage, ) .await; } } } pub async fn begin(&self, ctx: ToolEventCtx<'_>) { self.emit(ctx, ToolEventStage::Begin).await; } fn format_exec_output_for_model( &self, output: &ExecToolCallOutput, ctx: ToolEventCtx<'_>, ) -> String { match self { Self::Shell { freeform: true, .. } => { super::format_exec_output_for_model_freeform(output, ctx.turn.truncation_policy) } _ => super::format_exec_output_for_model_structured(output, ctx.turn.truncation_policy), } } pub async fn finish( &self, ctx: ToolEventCtx<'_>, out: Result<ExecToolCallOutput, ToolError>, ) -> Result<String, FunctionCallError> { let (event, result) = match out { Ok(output) => { let content = self.format_exec_output_for_model(&output, ctx); let exit_code = output.exit_code; let event = ToolEventStage::Success(output); let result = if exit_code == 0 { Ok(content) } else { Err(FunctionCallError::RespondToModel(content)) }; (event, result) } Err(ToolError::Codex(CodexErr::Sandbox(SandboxErr::Timeout { output }))) | Err(ToolError::Codex(CodexErr::Sandbox(SandboxErr::Denied { output }))) => { let response = self.format_exec_output_for_model(&output, ctx); let event = ToolEventStage::Failure(ToolEventFailure::Output(*output)); let result = Err(FunctionCallError::RespondToModel(response)); (event, result) } Err(ToolError::Codex(err)) => { let message = format!("execution error: {err:?}"); let event = ToolEventStage::Failure(ToolEventFailure::Message(message.clone())); let result = Err(FunctionCallError::RespondToModel(message)); (event, result) } Err(ToolError::Rejected(msg)) => { // Normalize common rejection messages for exec tools so tests and // users see a clear, consistent phrase. let normalized = if msg == "rejected by user" { "exec command rejected by user".to_string() } else { msg }; let event = ToolEventStage::Failure(ToolEventFailure::Message(normalized.clone())); let result = Err(FunctionCallError::RespondToModel(normalized)); (event, result) } }; self.emit(ctx, event).await; result } } struct ExecCommandInput<'a> { command: &'a [String], cwd: &'a Path, parsed_cmd: &'a [ParsedCommand], source: ExecCommandSource, interaction_input: Option<&'a str>, process_id: Option<&'a str>, } impl<'a> ExecCommandInput<'a> { fn new( command: &'a [String], cwd: &'a Path, parsed_cmd: &'a [ParsedCommand], source: ExecCommandSource, interaction_input: Option<&'a str>, process_id: Option<&'a str>, ) -> Self { Self { command, cwd, parsed_cmd, source, interaction_input, process_id, } } } struct ExecCommandResult { stdout: String, stderr: String, aggregated_output: String, exit_code: i32, duration: Duration, formatted_output: String, } async fn emit_exec_stage( ctx: ToolEventCtx<'_>, exec_input: ExecCommandInput<'_>, stage: ToolEventStage, ) { match stage { ToolEventStage::Begin => { emit_exec_command_begin( ctx, exec_input.command, exec_input.cwd, exec_input.parsed_cmd, exec_input.source, exec_input.interaction_input.map(str::to_owned), exec_input.process_id, ) .await; } ToolEventStage::Success(output) | ToolEventStage::Failure(ToolEventFailure::Output(output)) => { let exec_result = ExecCommandResult { stdout: output.stdout.text.clone(), stderr: output.stderr.text.clone(), aggregated_output: output.aggregated_output.text.clone(), exit_code: output.exit_code, duration: output.duration, formatted_output: format_exec_output_str(&output, ctx.turn.truncation_policy), }; emit_exec_end(ctx, exec_input, exec_result).await; } ToolEventStage::Failure(ToolEventFailure::Message(message)) => { let text = message.to_string(); let exec_result = ExecCommandResult { stdout: String::new(), stderr: text.clone(), aggregated_output: text.clone(), exit_code: -1, duration: Duration::ZERO, formatted_output: text, }; emit_exec_end(ctx, exec_input, exec_result).await; } } } async fn emit_exec_end( ctx: ToolEventCtx<'_>, exec_input: ExecCommandInput<'_>, exec_result: ExecCommandResult, ) { ctx.session .send_event( ctx.turn, EventMsg::ExecCommandEnd(ExecCommandEndEvent { call_id: ctx.call_id.to_string(), process_id: exec_input.process_id.map(str::to_owned), turn_id: ctx.turn.sub_id.clone(), command: exec_input.command.to_vec(), cwd: exec_input.cwd.to_path_buf(), parsed_cmd: exec_input.parsed_cmd.to_vec(), source: exec_input.source, interaction_input: exec_input.interaction_input.map(str::to_owned), stdout: exec_result.stdout, stderr: exec_result.stderr, aggregated_output: exec_result.aggregated_output, exit_code: exec_result.exit_code, duration: exec_result.duration, formatted_output: exec_result.formatted_output, }), ) .await; } async fn emit_patch_end( ctx: ToolEventCtx<'_>, changes: HashMap<PathBuf, FileChange>, stdout: String, stderr: String, success: bool, ) { ctx.session .send_event( ctx.turn, EventMsg::PatchApplyEnd(PatchApplyEndEvent { call_id: ctx.call_id.to_string(), turn_id: ctx.turn.sub_id.clone(), stdout, stderr, success, changes, }), ) .await; if let Some(tracker) = ctx.turn_diff_tracker { let unified_diff = { let mut guard = tracker.lock().await; guard.get_unified_diff() }; if let Ok(Some(unified_diff)) = unified_diff { ctx.session .send_event(ctx.turn, EventMsg::TurnDiff(TurnDiffEvent { unified_diff })) .await; } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/context.rs
codex-rs/core/src/tools/context.rs
use crate::codex::Session; use crate::codex::TurnContext; use crate::tools::TELEMETRY_PREVIEW_MAX_BYTES; use crate::tools::TELEMETRY_PREVIEW_MAX_LINES; use crate::tools::TELEMETRY_PREVIEW_TRUNCATION_NOTICE; use crate::turn_diff_tracker::TurnDiffTracker; use codex_protocol::models::FunctionCallOutputContentItem; use codex_protocol::models::FunctionCallOutputPayload; use codex_protocol::models::ResponseInputItem; use codex_protocol::models::ShellToolCallParams; use codex_utils_string::take_bytes_at_char_boundary; use mcp_types::CallToolResult; use std::borrow::Cow; use std::sync::Arc; use tokio::sync::Mutex; pub type SharedTurnDiffTracker = Arc<Mutex<TurnDiffTracker>>; #[derive(Clone)] pub struct ToolInvocation { pub session: Arc<Session>, pub turn: Arc<TurnContext>, pub tracker: SharedTurnDiffTracker, pub call_id: String, pub tool_name: String, pub payload: ToolPayload, } #[derive(Clone, Debug)] pub enum ToolPayload { Function { arguments: String, }, Custom { input: String, }, LocalShell { params: ShellToolCallParams, }, Mcp { server: String, tool: String, raw_arguments: String, }, } impl ToolPayload { pub fn log_payload(&self) -> Cow<'_, str> { match self { ToolPayload::Function { arguments } => Cow::Borrowed(arguments), ToolPayload::Custom { input } => Cow::Borrowed(input), ToolPayload::LocalShell { params } => Cow::Owned(params.command.join(" ")), ToolPayload::Mcp { raw_arguments, .. } => Cow::Borrowed(raw_arguments), } } } #[derive(Clone)] pub enum ToolOutput { Function { // Plain text representation of the tool output. content: String, // Some tool calls such as MCP calls may return structured content that can get parsed into an array of polymorphic content items. content_items: Option<Vec<FunctionCallOutputContentItem>>, success: Option<bool>, }, Mcp { result: Result<CallToolResult, String>, }, } impl ToolOutput { pub fn log_preview(&self) -> String { match self { ToolOutput::Function { content, .. } => telemetry_preview(content), ToolOutput::Mcp { result } => format!("{result:?}"), } } pub fn success_for_logging(&self) -> bool { match self { ToolOutput::Function { success, .. } => success.unwrap_or(true), ToolOutput::Mcp { result } => result.is_ok(), } } pub fn into_response(self, call_id: &str, payload: &ToolPayload) -> ResponseInputItem { match self { ToolOutput::Function { content, content_items, success, } => { if matches!(payload, ToolPayload::Custom { .. }) { ResponseInputItem::CustomToolCallOutput { call_id: call_id.to_string(), output: content, } } else { ResponseInputItem::FunctionCallOutput { call_id: call_id.to_string(), output: FunctionCallOutputPayload { content, content_items, success, }, } } } ToolOutput::Mcp { result } => ResponseInputItem::McpToolCallOutput { call_id: call_id.to_string(), result, }, } } } fn telemetry_preview(content: &str) -> String { let truncated_slice = take_bytes_at_char_boundary(content, TELEMETRY_PREVIEW_MAX_BYTES); let truncated_by_bytes = truncated_slice.len() < content.len(); let mut preview = String::new(); let mut lines_iter = truncated_slice.lines(); for idx in 0..TELEMETRY_PREVIEW_MAX_LINES { match lines_iter.next() { Some(line) => { if idx > 0 { preview.push('\n'); } preview.push_str(line); } None => break, } } let truncated_by_lines = lines_iter.next().is_some(); if !truncated_by_bytes && !truncated_by_lines { return content.to_string(); } if preview.len() < truncated_slice.len() && truncated_slice .as_bytes() .get(preview.len()) .is_some_and(|byte| *byte == b'\n') { preview.push('\n'); } if !preview.is_empty() && !preview.ends_with('\n') { preview.push('\n'); } preview.push_str(TELEMETRY_PREVIEW_TRUNCATION_NOTICE); preview } #[cfg(test)] mod tests { use super::*; use pretty_assertions::assert_eq; #[test] fn custom_tool_calls_should_roundtrip_as_custom_outputs() { let payload = ToolPayload::Custom { input: "patch".to_string(), }; let response = ToolOutput::Function { content: "patched".to_string(), content_items: None, success: Some(true), } .into_response("call-42", &payload); match response { ResponseInputItem::CustomToolCallOutput { call_id, output } => { assert_eq!(call_id, "call-42"); assert_eq!(output, "patched"); } other => panic!("expected CustomToolCallOutput, got {other:?}"), } } #[test] fn function_payloads_remain_function_outputs() { let payload = ToolPayload::Function { arguments: "{}".to_string(), }; let response = ToolOutput::Function { content: "ok".to_string(), content_items: None, success: Some(true), } .into_response("fn-1", &payload); match response { ResponseInputItem::FunctionCallOutput { call_id, output } => { assert_eq!(call_id, "fn-1"); assert_eq!(output.content, "ok"); assert!(output.content_items.is_none()); assert_eq!(output.success, Some(true)); } other => panic!("expected FunctionCallOutput, got {other:?}"), } } #[test] fn telemetry_preview_returns_original_within_limits() { let content = "short output"; assert_eq!(telemetry_preview(content), content); } #[test] fn telemetry_preview_truncates_by_bytes() { let content = "x".repeat(TELEMETRY_PREVIEW_MAX_BYTES + 8); let preview = telemetry_preview(&content); assert!(preview.contains(TELEMETRY_PREVIEW_TRUNCATION_NOTICE)); assert!( preview.len() <= TELEMETRY_PREVIEW_MAX_BYTES + TELEMETRY_PREVIEW_TRUNCATION_NOTICE.len() + 1 ); } #[test] fn telemetry_preview_truncates_by_lines() { let content = (0..(TELEMETRY_PREVIEW_MAX_LINES + 5)) .map(|idx| format!("line {idx}")) .collect::<Vec<_>>() .join("\n"); let preview = telemetry_preview(&content); let lines: Vec<&str> = preview.lines().collect(); assert!(lines.len() <= TELEMETRY_PREVIEW_MAX_LINES + 1); assert_eq!(lines.last(), Some(&TELEMETRY_PREVIEW_TRUNCATION_NOTICE)); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/spec.rs
codex-rs/core/src/tools/spec.rs
use crate::client_common::tools::ResponsesApiTool; use crate::client_common::tools::ToolSpec; use crate::features::Feature; use crate::features::Features; use crate::models_manager::model_family::ModelFamily; use crate::tools::handlers::PLAN_TOOL; use crate::tools::handlers::apply_patch::create_apply_patch_freeform_tool; use crate::tools::handlers::apply_patch::create_apply_patch_json_tool; use crate::tools::registry::ToolRegistryBuilder; use codex_protocol::openai_models::ApplyPatchToolType; use codex_protocol::openai_models::ConfigShellToolType; use serde::Deserialize; use serde::Serialize; use serde_json::Value as JsonValue; use serde_json::json; use std::collections::BTreeMap; use std::collections::HashMap; #[derive(Debug, Clone)] pub(crate) struct ToolsConfig { pub shell_type: ConfigShellToolType, pub apply_patch_tool_type: Option<ApplyPatchToolType>, pub web_search_request: bool, pub include_view_image_tool: bool, pub experimental_supported_tools: Vec<String>, } pub(crate) struct ToolsConfigParams<'a> { pub(crate) model_family: &'a ModelFamily, pub(crate) features: &'a Features, } impl ToolsConfig { pub fn new(params: &ToolsConfigParams) -> Self { let ToolsConfigParams { model_family, features, } = params; let include_apply_patch_tool = features.enabled(Feature::ApplyPatchFreeform); let include_web_search_request = features.enabled(Feature::WebSearchRequest); let include_view_image_tool = features.enabled(Feature::ViewImageTool); let shell_type = if !features.enabled(Feature::ShellTool) { ConfigShellToolType::Disabled } else if features.enabled(Feature::UnifiedExec) { // If ConPTY not supported (for old Windows versions), fallback on ShellCommand. if codex_utils_pty::conpty_supported() { ConfigShellToolType::UnifiedExec } else { ConfigShellToolType::ShellCommand } } else { model_family.shell_type }; let apply_patch_tool_type = match model_family.apply_patch_tool_type { Some(ApplyPatchToolType::Freeform) => Some(ApplyPatchToolType::Freeform), Some(ApplyPatchToolType::Function) => Some(ApplyPatchToolType::Function), None => { if include_apply_patch_tool { Some(ApplyPatchToolType::Freeform) } else { None } } }; Self { shell_type, apply_patch_tool_type, web_search_request: include_web_search_request, include_view_image_tool, experimental_supported_tools: model_family.experimental_supported_tools.clone(), } } } /// Generic JSON‑Schema subset needed for our tool definitions #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] #[serde(tag = "type", rename_all = "lowercase")] pub(crate) enum JsonSchema { Boolean { #[serde(skip_serializing_if = "Option::is_none")] description: Option<String>, }, String { #[serde(skip_serializing_if = "Option::is_none")] description: Option<String>, }, /// MCP schema allows "number" | "integer" for Number #[serde(alias = "integer")] Number { #[serde(skip_serializing_if = "Option::is_none")] description: Option<String>, }, Array { items: Box<JsonSchema>, #[serde(skip_serializing_if = "Option::is_none")] description: Option<String>, }, Object { properties: BTreeMap<String, JsonSchema>, #[serde(skip_serializing_if = "Option::is_none")] required: Option<Vec<String>>, #[serde( rename = "additionalProperties", skip_serializing_if = "Option::is_none" )] additional_properties: Option<AdditionalProperties>, }, } /// Whether additional properties are allowed, and if so, any required schema #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] #[serde(untagged)] pub(crate) enum AdditionalProperties { Boolean(bool), Schema(Box<JsonSchema>), } impl From<bool> for AdditionalProperties { fn from(b: bool) -> Self { Self::Boolean(b) } } impl From<JsonSchema> for AdditionalProperties { fn from(s: JsonSchema) -> Self { Self::Schema(Box::new(s)) } } fn create_exec_command_tool() -> ToolSpec { let mut properties = BTreeMap::new(); properties.insert( "cmd".to_string(), JsonSchema::String { description: Some("Shell command to execute.".to_string()), }, ); properties.insert( "workdir".to_string(), JsonSchema::String { description: Some( "Optional working directory to run the command in; defaults to the turn cwd." .to_string(), ), }, ); properties.insert( "shell".to_string(), JsonSchema::String { description: Some("Shell binary to launch. Defaults to /bin/bash.".to_string()), }, ); properties.insert( "login".to_string(), JsonSchema::Boolean { description: Some( "Whether to run the shell with -l/-i semantics. Defaults to true.".to_string(), ), }, ); properties.insert( "yield_time_ms".to_string(), JsonSchema::Number { description: Some( "How long to wait (in milliseconds) for output before yielding.".to_string(), ), }, ); properties.insert( "max_output_tokens".to_string(), JsonSchema::Number { description: Some( "Maximum number of tokens to return. Excess output will be truncated.".to_string(), ), }, ); properties.insert( "sandbox_permissions".to_string(), JsonSchema::String { description: Some( "Sandbox permissions for the command. Set to \"require_escalated\" to request running without sandbox restrictions; defaults to \"use_default\"." .to_string(), ), }, ); properties.insert( "justification".to_string(), JsonSchema::String { description: Some( "Only set if sandbox_permissions is \"require_escalated\". 1-sentence explanation of why we want to run this command." .to_string(), ), }, ); ToolSpec::Function(ResponsesApiTool { name: "exec_command".to_string(), description: "Runs a command in a PTY, returning output or a session ID for ongoing interaction." .to_string(), strict: false, parameters: JsonSchema::Object { properties, required: Some(vec!["cmd".to_string()]), additional_properties: Some(false.into()), }, }) } fn create_write_stdin_tool() -> ToolSpec { let mut properties = BTreeMap::new(); properties.insert( "session_id".to_string(), JsonSchema::Number { description: Some("Identifier of the running unified exec session.".to_string()), }, ); properties.insert( "chars".to_string(), JsonSchema::String { description: Some("Bytes to write to stdin (may be empty to poll).".to_string()), }, ); properties.insert( "yield_time_ms".to_string(), JsonSchema::Number { description: Some( "How long to wait (in milliseconds) for output before yielding.".to_string(), ), }, ); properties.insert( "max_output_tokens".to_string(), JsonSchema::Number { description: Some( "Maximum number of tokens to return. Excess output will be truncated.".to_string(), ), }, ); ToolSpec::Function(ResponsesApiTool { name: "write_stdin".to_string(), description: "Writes characters to an existing unified exec session and returns recent output." .to_string(), strict: false, parameters: JsonSchema::Object { properties, required: Some(vec!["session_id".to_string()]), additional_properties: Some(false.into()), }, }) } fn create_shell_tool() -> ToolSpec { let mut properties = BTreeMap::new(); properties.insert( "command".to_string(), JsonSchema::Array { items: Box::new(JsonSchema::String { description: None }), description: Some("The command to execute".to_string()), }, ); properties.insert( "workdir".to_string(), JsonSchema::String { description: Some("The working directory to execute the command in".to_string()), }, ); properties.insert( "timeout_ms".to_string(), JsonSchema::Number { description: Some("The timeout for the command in milliseconds".to_string()), }, ); properties.insert( "sandbox_permissions".to_string(), JsonSchema::String { description: Some("Sandbox permissions for the command. Set to \"require_escalated\" to request running without sandbox restrictions; defaults to \"use_default\".".to_string()), }, ); properties.insert( "justification".to_string(), JsonSchema::String { description: Some("Only set if sandbox_permissions is \"require_escalated\". 1-sentence explanation of why we want to run this command.".to_string()), }, ); let description = if cfg!(windows) { r#"Runs a Powershell command (Windows) and returns its output. Arguments to `shell` will be passed to CreateProcessW(). Most commands should be prefixed with ["powershell.exe", "-Command"]. Examples of valid command strings: - ls -a (show hidden): ["powershell.exe", "-Command", "Get-ChildItem -Force"] - recursive find by name: ["powershell.exe", "-Command", "Get-ChildItem -Recurse -Filter *.py"] - recursive grep: ["powershell.exe", "-Command", "Get-ChildItem -Path C:\\myrepo -Recurse | Select-String -Pattern 'TODO' -CaseSensitive"] - ps aux | grep python: ["powershell.exe", "-Command", "Get-Process | Where-Object { $_.ProcessName -like '*python*' }"] - setting an env var: ["powershell.exe", "-Command", "$env:FOO='bar'; echo $env:FOO"] - running an inline Python script: ["powershell.exe", "-Command", "@'\\nprint('Hello, world!')\\n'@ | python -"]"# } else { r#"Runs a shell command and returns its output. - The arguments to `shell` will be passed to execvp(). Most terminal commands should be prefixed with ["bash", "-lc"]. - Always set the `workdir` param when using the shell function. Do not use `cd` unless absolutely necessary."# }.to_string(); ToolSpec::Function(ResponsesApiTool { name: "shell".to_string(), description, strict: false, parameters: JsonSchema::Object { properties, required: Some(vec!["command".to_string()]), additional_properties: Some(false.into()), }, }) } fn create_shell_command_tool() -> ToolSpec { let mut properties = BTreeMap::new(); properties.insert( "command".to_string(), JsonSchema::String { description: Some( "The shell script to execute in the user's default shell".to_string(), ), }, ); properties.insert( "workdir".to_string(), JsonSchema::String { description: Some("The working directory to execute the command in".to_string()), }, ); properties.insert( "login".to_string(), JsonSchema::Boolean { description: Some( "Whether to run the shell with login shell semantics. Defaults to true." .to_string(), ), }, ); properties.insert( "timeout_ms".to_string(), JsonSchema::Number { description: Some("The timeout for the command in milliseconds".to_string()), }, ); properties.insert( "sandbox_permissions".to_string(), JsonSchema::String { description: Some("Sandbox permissions for the command. Set to \"require_escalated\" to request running without sandbox restrictions; defaults to \"use_default\".".to_string()), }, ); properties.insert( "justification".to_string(), JsonSchema::String { description: Some("Only set if sandbox_permissions is \"require_escalated\". 1-sentence explanation of why we want to run this command.".to_string()), }, ); let description = if cfg!(windows) { r#"Runs a Powershell command (Windows) and returns its output. Examples of valid command strings: - ls -a (show hidden): "Get-ChildItem -Force" - recursive find by name: "Get-ChildItem -Recurse -Filter *.py" - recursive grep: "Get-ChildItem -Path C:\\myrepo -Recurse | Select-String -Pattern 'TODO' -CaseSensitive" - ps aux | grep python: "Get-Process | Where-Object { $_.ProcessName -like '*python*' }" - setting an env var: "$env:FOO='bar'; echo $env:FOO" - running an inline Python script: "@'\\nprint('Hello, world!')\\n'@ | python -"# } else { r#"Runs a shell command and returns its output. - Always set the `workdir` param when using the shell_command function. Do not use `cd` unless absolutely necessary."# }.to_string(); ToolSpec::Function(ResponsesApiTool { name: "shell_command".to_string(), description, strict: false, parameters: JsonSchema::Object { properties, required: Some(vec!["command".to_string()]), additional_properties: Some(false.into()), }, }) } fn create_view_image_tool() -> ToolSpec { // Support only local filesystem path. let mut properties = BTreeMap::new(); properties.insert( "path".to_string(), JsonSchema::String { description: Some("Local filesystem path to an image file".to_string()), }, ); ToolSpec::Function(ResponsesApiTool { name: "view_image".to_string(), description: "Attach a local image (by filesystem path) to the conversation context for this turn." .to_string(), strict: false, parameters: JsonSchema::Object { properties, required: Some(vec!["path".to_string()]), additional_properties: Some(false.into()), }, }) } fn create_test_sync_tool() -> ToolSpec { let mut properties = BTreeMap::new(); properties.insert( "sleep_before_ms".to_string(), JsonSchema::Number { description: Some("Optional delay in milliseconds before any other action".to_string()), }, ); properties.insert( "sleep_after_ms".to_string(), JsonSchema::Number { description: Some( "Optional delay in milliseconds after completing the barrier".to_string(), ), }, ); let mut barrier_properties = BTreeMap::new(); barrier_properties.insert( "id".to_string(), JsonSchema::String { description: Some( "Identifier shared by concurrent calls that should rendezvous".to_string(), ), }, ); barrier_properties.insert( "participants".to_string(), JsonSchema::Number { description: Some( "Number of tool calls that must arrive before the barrier opens".to_string(), ), }, ); barrier_properties.insert( "timeout_ms".to_string(), JsonSchema::Number { description: Some("Maximum time in milliseconds to wait at the barrier".to_string()), }, ); properties.insert( "barrier".to_string(), JsonSchema::Object { properties: barrier_properties, required: Some(vec!["id".to_string(), "participants".to_string()]), additional_properties: Some(false.into()), }, ); ToolSpec::Function(ResponsesApiTool { name: "test_sync_tool".to_string(), description: "Internal synchronization helper used by Codex integration tests.".to_string(), strict: false, parameters: JsonSchema::Object { properties, required: None, additional_properties: Some(false.into()), }, }) } fn create_grep_files_tool() -> ToolSpec { let mut properties = BTreeMap::new(); properties.insert( "pattern".to_string(), JsonSchema::String { description: Some("Regular expression pattern to search for.".to_string()), }, ); properties.insert( "include".to_string(), JsonSchema::String { description: Some( "Optional glob that limits which files are searched (e.g. \"*.rs\" or \ \"*.{ts,tsx}\")." .to_string(), ), }, ); properties.insert( "path".to_string(), JsonSchema::String { description: Some( "Directory or file path to search. Defaults to the session's working directory." .to_string(), ), }, ); properties.insert( "limit".to_string(), JsonSchema::Number { description: Some( "Maximum number of file paths to return (defaults to 100).".to_string(), ), }, ); ToolSpec::Function(ResponsesApiTool { name: "grep_files".to_string(), description: "Finds files whose contents match the pattern and lists them by modification \ time." .to_string(), strict: false, parameters: JsonSchema::Object { properties, required: Some(vec!["pattern".to_string()]), additional_properties: Some(false.into()), }, }) } fn create_read_file_tool() -> ToolSpec { let mut properties = BTreeMap::new(); properties.insert( "file_path".to_string(), JsonSchema::String { description: Some("Absolute path to the file".to_string()), }, ); properties.insert( "offset".to_string(), JsonSchema::Number { description: Some( "The line number to start reading from. Must be 1 or greater.".to_string(), ), }, ); properties.insert( "limit".to_string(), JsonSchema::Number { description: Some("The maximum number of lines to return.".to_string()), }, ); properties.insert( "mode".to_string(), JsonSchema::String { description: Some( "Optional mode selector: \"slice\" for simple ranges (default) or \"indentation\" \ to expand around an anchor line." .to_string(), ), }, ); let mut indentation_properties = BTreeMap::new(); indentation_properties.insert( "anchor_line".to_string(), JsonSchema::Number { description: Some( "Anchor line to center the indentation lookup on (defaults to offset).".to_string(), ), }, ); indentation_properties.insert( "max_levels".to_string(), JsonSchema::Number { description: Some( "How many parent indentation levels (smaller indents) to include.".to_string(), ), }, ); indentation_properties.insert( "include_siblings".to_string(), JsonSchema::Boolean { description: Some( "When true, include additional blocks that share the anchor indentation." .to_string(), ), }, ); indentation_properties.insert( "include_header".to_string(), JsonSchema::Boolean { description: Some( "Include doc comments or attributes directly above the selected block.".to_string(), ), }, ); indentation_properties.insert( "max_lines".to_string(), JsonSchema::Number { description: Some( "Hard cap on the number of lines returned when using indentation mode.".to_string(), ), }, ); properties.insert( "indentation".to_string(), JsonSchema::Object { properties: indentation_properties, required: None, additional_properties: Some(false.into()), }, ); ToolSpec::Function(ResponsesApiTool { name: "read_file".to_string(), description: "Reads a local file with 1-indexed line numbers, supporting slice and indentation-aware block modes." .to_string(), strict: false, parameters: JsonSchema::Object { properties, required: Some(vec!["file_path".to_string()]), additional_properties: Some(false.into()), }, }) } fn create_list_dir_tool() -> ToolSpec { let mut properties = BTreeMap::new(); properties.insert( "dir_path".to_string(), JsonSchema::String { description: Some("Absolute path to the directory to list.".to_string()), }, ); properties.insert( "offset".to_string(), JsonSchema::Number { description: Some( "The entry number to start listing from. Must be 1 or greater.".to_string(), ), }, ); properties.insert( "limit".to_string(), JsonSchema::Number { description: Some("The maximum number of entries to return.".to_string()), }, ); properties.insert( "depth".to_string(), JsonSchema::Number { description: Some( "The maximum directory depth to traverse. Must be 1 or greater.".to_string(), ), }, ); ToolSpec::Function(ResponsesApiTool { name: "list_dir".to_string(), description: "Lists entries in a local directory with 1-indexed entry numbers and simple type labels." .to_string(), strict: false, parameters: JsonSchema::Object { properties, required: Some(vec!["dir_path".to_string()]), additional_properties: Some(false.into()), }, }) } fn create_list_mcp_resources_tool() -> ToolSpec { let mut properties = BTreeMap::new(); properties.insert( "server".to_string(), JsonSchema::String { description: Some( "Optional MCP server name. When omitted, lists resources from every configured server." .to_string(), ), }, ); properties.insert( "cursor".to_string(), JsonSchema::String { description: Some( "Opaque cursor returned by a previous list_mcp_resources call for the same server." .to_string(), ), }, ); ToolSpec::Function(ResponsesApiTool { name: "list_mcp_resources".to_string(), description: "Lists resources provided by MCP servers. Resources allow servers to share data that provides context to language models, such as files, database schemas, or application-specific information. Prefer resources over web search when possible.".to_string(), strict: false, parameters: JsonSchema::Object { properties, required: None, additional_properties: Some(false.into()), }, }) } fn create_list_mcp_resource_templates_tool() -> ToolSpec { let mut properties = BTreeMap::new(); properties.insert( "server".to_string(), JsonSchema::String { description: Some( "Optional MCP server name. When omitted, lists resource templates from all configured servers." .to_string(), ), }, ); properties.insert( "cursor".to_string(), JsonSchema::String { description: Some( "Opaque cursor returned by a previous list_mcp_resource_templates call for the same server." .to_string(), ), }, ); ToolSpec::Function(ResponsesApiTool { name: "list_mcp_resource_templates".to_string(), description: "Lists resource templates provided by MCP servers. Parameterized resource templates allow servers to share data that takes parameters and provides context to language models, such as files, database schemas, or application-specific information. Prefer resource templates over web search when possible.".to_string(), strict: false, parameters: JsonSchema::Object { properties, required: None, additional_properties: Some(false.into()), }, }) } fn create_read_mcp_resource_tool() -> ToolSpec { let mut properties = BTreeMap::new(); properties.insert( "server".to_string(), JsonSchema::String { description: Some( "MCP server name exactly as configured. Must match the 'server' field returned by list_mcp_resources." .to_string(), ), }, ); properties.insert( "uri".to_string(), JsonSchema::String { description: Some( "Resource URI to read. Must be one of the URIs returned by list_mcp_resources." .to_string(), ), }, ); ToolSpec::Function(ResponsesApiTool { name: "read_mcp_resource".to_string(), description: "Read a specific resource from an MCP server given the server name and resource URI." .to_string(), strict: false, parameters: JsonSchema::Object { properties, required: Some(vec!["server".to_string(), "uri".to_string()]), additional_properties: Some(false.into()), }, }) } /// TODO(dylan): deprecate once we get rid of json tool #[derive(Serialize, Deserialize)] pub(crate) struct ApplyPatchToolArgs { pub(crate) input: String, } /// Returns JSON values that are compatible with Function Calling in the /// Responses API: /// https://platform.openai.com/docs/guides/function-calling?api-mode=responses pub fn create_tools_json_for_responses_api( tools: &[ToolSpec], ) -> crate::error::Result<Vec<serde_json::Value>> { let mut tools_json = Vec::new(); for tool in tools { let json = serde_json::to_value(tool)?; tools_json.push(json); } Ok(tools_json) } /// Returns JSON values that are compatible with Function Calling in the /// Chat Completions API: /// https://platform.openai.com/docs/guides/function-calling?api-mode=chat pub(crate) fn create_tools_json_for_chat_completions_api( tools: &[ToolSpec], ) -> crate::error::Result<Vec<serde_json::Value>> { // We start with the JSON for the Responses API and than rewrite it to match // the chat completions tool call format. let responses_api_tools_json = create_tools_json_for_responses_api(tools)?; let tools_json = responses_api_tools_json .into_iter() .filter_map(|mut tool| { if tool.get("type") != Some(&serde_json::Value::String("function".to_string())) { return None; } if let Some(map) = tool.as_object_mut() { let name = map .get("name") .and_then(|v| v.as_str()) .unwrap_or_default() .to_string(); // Remove "type" field as it is not needed in chat completions. map.remove("type"); Some(json!({ "type": "function", "name": name, "function": map, })) } else { None } }) .collect::<Vec<serde_json::Value>>(); Ok(tools_json) } pub(crate) fn mcp_tool_to_openai_tool( fully_qualified_name: String, tool: mcp_types::Tool, ) -> Result<ResponsesApiTool, serde_json::Error> { let mcp_types::Tool { description, mut input_schema, .. } = tool; // OpenAI models mandate the "properties" field in the schema. The Agents // SDK fixed this by inserting an empty object for "properties" if it is not // already present https://github.com/openai/openai-agents-python/issues/449 // so here we do the same. if input_schema.properties.is_none() { input_schema.properties = Some(serde_json::Value::Object(serde_json::Map::new())); } // Serialize to a raw JSON value so we can sanitize schemas coming from MCP // servers. Some servers omit the top-level or nested `type` in JSON // Schemas (e.g. using enum/anyOf), or use unsupported variants like // `integer`. Our internal JsonSchema is a small subset and requires // `type`, so we coerce/sanitize here for compatibility. let mut serialized_input_schema = serde_json::to_value(input_schema)?; sanitize_json_schema(&mut serialized_input_schema); let input_schema = serde_json::from_value::<JsonSchema>(serialized_input_schema)?; Ok(ResponsesApiTool { name: fully_qualified_name, description: description.unwrap_or_default(), strict: false, parameters: input_schema, }) } /// Sanitize a JSON Schema (as serde_json::Value) so it can fit our limited /// JsonSchema enum. This function: /// - Ensures every schema object has a "type". If missing, infers it from /// common keywords (properties => object, items => array, enum/const/format => string) /// and otherwise defaults to "string". /// - Fills required child fields (e.g. array items, object properties) with /// permissive defaults when absent. fn sanitize_json_schema(value: &mut JsonValue) { match value { JsonValue::Bool(_) => { // JSON Schema boolean form: true/false. Coerce to an accept-all string. *value = json!({ "type": "string" }); } JsonValue::Array(arr) => { for v in arr.iter_mut() { sanitize_json_schema(v); } } JsonValue::Object(map) => { // First, recursively sanitize known nested schema holders if let Some(props) = map.get_mut("properties") && let Some(props_map) = props.as_object_mut() { for (_k, v) in props_map.iter_mut() { sanitize_json_schema(v); } } if let Some(items) = map.get_mut("items") { sanitize_json_schema(items); } // Some schemas use oneOf/anyOf/allOf - sanitize their entries for combiner in ["oneOf", "anyOf", "allOf", "prefixItems"] { if let Some(v) = map.get_mut(combiner) { sanitize_json_schema(v); } } // Normalize/ensure type let mut ty = map.get("type").and_then(|v| v.as_str()).map(str::to_string); // If type is an array (union), pick first supported; else leave to inference if ty.is_none() && let Some(JsonValue::Array(types)) = map.get("type") { for t in types { if let Some(tt) = t.as_str() && matches!( tt, "object" | "array" | "string" | "number" | "integer" | "boolean" ) { ty = Some(tt.to_string()); break; } } } // Infer type if still missing if ty.is_none() { if map.contains_key("properties") || map.contains_key("required") || map.contains_key("additionalProperties") { ty = Some("object".to_string()); } else if map.contains_key("items") || map.contains_key("prefixItems") { ty = Some("array".to_string()); } else if map.contains_key("enum")
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
true
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/runtimes/unified_exec.rs
codex-rs/core/src/tools/runtimes/unified_exec.rs
/* Runtime: unified exec Handles approval + sandbox orchestration for unified exec requests, delegating to the session manager to spawn PTYs once an ExecEnv is prepared. */ use crate::error::CodexErr; use crate::error::SandboxErr; use crate::exec::ExecExpiration; use crate::features::Feature; use crate::powershell::prefix_powershell_script_with_utf8; use crate::sandboxing::SandboxPermissions; use crate::shell::ShellType; use crate::tools::runtimes::build_command_spec; use crate::tools::runtimes::maybe_wrap_shell_lc_with_snapshot; use crate::tools::sandboxing::Approvable; use crate::tools::sandboxing::ApprovalCtx; use crate::tools::sandboxing::ExecApprovalRequirement; use crate::tools::sandboxing::SandboxAttempt; use crate::tools::sandboxing::SandboxOverride; use crate::tools::sandboxing::Sandboxable; use crate::tools::sandboxing::SandboxablePreference; use crate::tools::sandboxing::ToolCtx; use crate::tools::sandboxing::ToolError; use crate::tools::sandboxing::ToolRuntime; use crate::tools::sandboxing::with_cached_approval; use crate::unified_exec::UnifiedExecError; use crate::unified_exec::UnifiedExecSession; use crate::unified_exec::UnifiedExecSessionManager; use codex_protocol::protocol::ReviewDecision; use futures::future::BoxFuture; use std::collections::HashMap; use std::path::PathBuf; #[derive(Clone, Debug)] pub struct UnifiedExecRequest { pub command: Vec<String>, pub cwd: PathBuf, pub env: HashMap<String, String>, pub sandbox_permissions: SandboxPermissions, pub justification: Option<String>, pub exec_approval_requirement: ExecApprovalRequirement, } #[derive(serde::Serialize, Clone, Debug, Eq, PartialEq, Hash)] pub struct UnifiedExecApprovalKey { pub command: Vec<String>, pub cwd: PathBuf, pub sandbox_permissions: SandboxPermissions, } pub struct UnifiedExecRuntime<'a> { manager: &'a UnifiedExecSessionManager, } impl UnifiedExecRequest { pub fn new( command: Vec<String>, cwd: PathBuf, env: HashMap<String, String>, sandbox_permissions: SandboxPermissions, justification: Option<String>, exec_approval_requirement: ExecApprovalRequirement, ) -> Self { Self { command, cwd, env, sandbox_permissions, justification, exec_approval_requirement, } } } impl<'a> UnifiedExecRuntime<'a> { pub fn new(manager: &'a UnifiedExecSessionManager) -> Self { Self { manager } } } impl Sandboxable for UnifiedExecRuntime<'_> { fn sandbox_preference(&self) -> SandboxablePreference { SandboxablePreference::Auto } fn escalate_on_failure(&self) -> bool { true } } impl Approvable<UnifiedExecRequest> for UnifiedExecRuntime<'_> { type ApprovalKey = UnifiedExecApprovalKey; fn approval_key(&self, req: &UnifiedExecRequest) -> Self::ApprovalKey { UnifiedExecApprovalKey { command: req.command.clone(), cwd: req.cwd.clone(), sandbox_permissions: req.sandbox_permissions, } } fn start_approval_async<'b>( &'b mut self, req: &'b UnifiedExecRequest, ctx: ApprovalCtx<'b>, ) -> BoxFuture<'b, ReviewDecision> { let key = self.approval_key(req); let session = ctx.session; let turn = ctx.turn; let call_id = ctx.call_id.to_string(); let command = req.command.clone(); let cwd = req.cwd.clone(); let reason = ctx .retry_reason .clone() .or_else(|| req.justification.clone()); Box::pin(async move { with_cached_approval(&session.services, key, || async move { session .request_command_approval( turn, call_id, command, cwd, reason, req.exec_approval_requirement .proposed_execpolicy_amendment() .cloned(), ) .await }) .await }) } fn exec_approval_requirement( &self, req: &UnifiedExecRequest, ) -> Option<ExecApprovalRequirement> { Some(req.exec_approval_requirement.clone()) } fn sandbox_mode_for_first_attempt(&self, req: &UnifiedExecRequest) -> SandboxOverride { if req.sandbox_permissions.requires_escalated_permissions() || matches!( req.exec_approval_requirement, ExecApprovalRequirement::Skip { bypass_sandbox: true, .. } ) { SandboxOverride::BypassSandboxFirstAttempt } else { SandboxOverride::NoOverride } } } impl<'a> ToolRuntime<UnifiedExecRequest, UnifiedExecSession> for UnifiedExecRuntime<'a> { async fn run( &mut self, req: &UnifiedExecRequest, attempt: &SandboxAttempt<'_>, ctx: &ToolCtx<'_>, ) -> Result<UnifiedExecSession, ToolError> { let base_command = &req.command; let session_shell = ctx.session.user_shell(); let command = maybe_wrap_shell_lc_with_snapshot(base_command, session_shell.as_ref()); let command = if matches!(session_shell.shell_type, ShellType::PowerShell) && ctx.session.features().enabled(Feature::PowershellUtf8) { prefix_powershell_script_with_utf8(&command) } else { command }; let spec = build_command_spec( &command, &req.cwd, &req.env, ExecExpiration::DefaultTimeout, req.sandbox_permissions, req.justification.clone(), ) .map_err(|_| ToolError::Rejected("missing command line for PTY".to_string()))?; let exec_env = attempt .env_for(spec) .map_err(|err| ToolError::Codex(err.into()))?; self.manager .open_session_with_exec_env(&exec_env) .await .map_err(|err| match err { UnifiedExecError::SandboxDenied { output, .. } => { ToolError::Codex(CodexErr::Sandbox(SandboxErr::Denied { output: Box::new(output), })) } other => ToolError::Rejected(other.to_string()), }) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/runtimes/apply_patch.rs
codex-rs/core/src/tools/runtimes/apply_patch.rs
//! Apply Patch runtime: executes verified patches under the orchestrator. //! //! Assumes `apply_patch` verification/approval happened upstream. Reuses that //! decision to avoid re-prompting, builds the self-invocation command for //! `codex --codex-run-as-apply-patch`, and runs under the current //! `SandboxAttempt` with a minimal environment. use crate::CODEX_APPLY_PATCH_ARG1; use crate::exec::ExecToolCallOutput; use crate::sandboxing::CommandSpec; use crate::sandboxing::SandboxPermissions; use crate::sandboxing::execute_env; use crate::tools::sandboxing::Approvable; use crate::tools::sandboxing::ApprovalCtx; use crate::tools::sandboxing::SandboxAttempt; use crate::tools::sandboxing::Sandboxable; use crate::tools::sandboxing::SandboxablePreference; use crate::tools::sandboxing::ToolCtx; use crate::tools::sandboxing::ToolError; use crate::tools::sandboxing::ToolRuntime; use crate::tools::sandboxing::with_cached_approval; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::ReviewDecision; use futures::future::BoxFuture; use std::collections::HashMap; use std::path::PathBuf; #[derive(Clone, Debug)] pub struct ApplyPatchRequest { pub patch: String, pub cwd: PathBuf, pub timeout_ms: Option<u64>, pub user_explicitly_approved: bool, pub codex_exe: Option<PathBuf>, } #[derive(Default)] pub struct ApplyPatchRuntime; #[derive(serde::Serialize, Clone, Debug, Eq, PartialEq, Hash)] pub(crate) struct ApprovalKey { patch: String, cwd: PathBuf, } impl ApplyPatchRuntime { pub fn new() -> Self { Self } fn build_command_spec(req: &ApplyPatchRequest) -> Result<CommandSpec, ToolError> { use std::env; let exe = if let Some(path) = &req.codex_exe { path.clone() } else { env::current_exe() .map_err(|e| ToolError::Rejected(format!("failed to determine codex exe: {e}")))? }; let program = exe.to_string_lossy().to_string(); Ok(CommandSpec { program, args: vec![CODEX_APPLY_PATCH_ARG1.to_string(), req.patch.clone()], cwd: req.cwd.clone(), expiration: req.timeout_ms.into(), // Run apply_patch with a minimal environment for determinism and to avoid leaks. env: HashMap::new(), sandbox_permissions: SandboxPermissions::UseDefault, justification: None, }) } fn stdout_stream(ctx: &ToolCtx<'_>) -> Option<crate::exec::StdoutStream> { Some(crate::exec::StdoutStream { sub_id: ctx.turn.sub_id.clone(), call_id: ctx.call_id.clone(), tx_event: ctx.session.get_tx_event(), }) } } impl Sandboxable for ApplyPatchRuntime { fn sandbox_preference(&self) -> SandboxablePreference { SandboxablePreference::Auto } fn escalate_on_failure(&self) -> bool { true } } impl Approvable<ApplyPatchRequest> for ApplyPatchRuntime { type ApprovalKey = ApprovalKey; fn approval_key(&self, req: &ApplyPatchRequest) -> Self::ApprovalKey { ApprovalKey { patch: req.patch.clone(), cwd: req.cwd.clone(), } } fn start_approval_async<'a>( &'a mut self, req: &'a ApplyPatchRequest, ctx: ApprovalCtx<'a>, ) -> BoxFuture<'a, ReviewDecision> { let key = self.approval_key(req); let session = ctx.session; let turn = ctx.turn; let call_id = ctx.call_id.to_string(); let cwd = req.cwd.clone(); let retry_reason = ctx.retry_reason.clone(); let user_explicitly_approved = req.user_explicitly_approved; Box::pin(async move { with_cached_approval(&session.services, key, move || async move { if let Some(reason) = retry_reason { session .request_command_approval( turn, call_id, vec!["apply_patch".to_string()], cwd, Some(reason), None, ) .await } else if user_explicitly_approved { ReviewDecision::ApprovedForSession } else { ReviewDecision::Approved } }) .await }) } fn wants_no_sandbox_approval(&self, policy: AskForApproval) -> bool { !matches!(policy, AskForApproval::Never) } } impl ToolRuntime<ApplyPatchRequest, ExecToolCallOutput> for ApplyPatchRuntime { async fn run( &mut self, req: &ApplyPatchRequest, attempt: &SandboxAttempt<'_>, ctx: &ToolCtx<'_>, ) -> Result<ExecToolCallOutput, ToolError> { let spec = Self::build_command_spec(req)?; let env = attempt .env_for(spec) .map_err(|err| ToolError::Codex(err.into()))?; let out = execute_env(env, attempt.policy, Self::stdout_stream(ctx)) .await .map_err(ToolError::Codex)?; Ok(out) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/runtimes/shell.rs
codex-rs/core/src/tools/runtimes/shell.rs
/* Runtime: shell Executes shell requests under the orchestrator: asks for approval when needed, builds a CommandSpec, and runs it under the current SandboxAttempt. */ use crate::exec::ExecToolCallOutput; use crate::features::Feature; use crate::powershell::prefix_powershell_script_with_utf8; use crate::sandboxing::SandboxPermissions; use crate::sandboxing::execute_env; use crate::shell::ShellType; use crate::tools::runtimes::build_command_spec; use crate::tools::runtimes::maybe_wrap_shell_lc_with_snapshot; use crate::tools::sandboxing::Approvable; use crate::tools::sandboxing::ApprovalCtx; use crate::tools::sandboxing::ExecApprovalRequirement; use crate::tools::sandboxing::SandboxAttempt; use crate::tools::sandboxing::SandboxOverride; use crate::tools::sandboxing::Sandboxable; use crate::tools::sandboxing::SandboxablePreference; use crate::tools::sandboxing::ToolCtx; use crate::tools::sandboxing::ToolError; use crate::tools::sandboxing::ToolRuntime; use crate::tools::sandboxing::with_cached_approval; use codex_protocol::protocol::ReviewDecision; use futures::future::BoxFuture; use std::path::PathBuf; #[derive(Clone, Debug)] pub struct ShellRequest { pub command: Vec<String>, pub cwd: PathBuf, pub timeout_ms: Option<u64>, pub env: std::collections::HashMap<String, String>, pub sandbox_permissions: SandboxPermissions, pub justification: Option<String>, pub exec_approval_requirement: ExecApprovalRequirement, } #[derive(Default)] pub struct ShellRuntime; #[derive(serde::Serialize, Clone, Debug, Eq, PartialEq, Hash)] pub(crate) struct ApprovalKey { command: Vec<String>, cwd: PathBuf, sandbox_permissions: SandboxPermissions, } impl ShellRuntime { pub fn new() -> Self { Self } fn stdout_stream(ctx: &ToolCtx<'_>) -> Option<crate::exec::StdoutStream> { Some(crate::exec::StdoutStream { sub_id: ctx.turn.sub_id.clone(), call_id: ctx.call_id.clone(), tx_event: ctx.session.get_tx_event(), }) } } impl Sandboxable for ShellRuntime { fn sandbox_preference(&self) -> SandboxablePreference { SandboxablePreference::Auto } fn escalate_on_failure(&self) -> bool { true } } impl Approvable<ShellRequest> for ShellRuntime { type ApprovalKey = ApprovalKey; fn approval_key(&self, req: &ShellRequest) -> Self::ApprovalKey { ApprovalKey { command: req.command.clone(), cwd: req.cwd.clone(), sandbox_permissions: req.sandbox_permissions, } } fn start_approval_async<'a>( &'a mut self, req: &'a ShellRequest, ctx: ApprovalCtx<'a>, ) -> BoxFuture<'a, ReviewDecision> { let key = self.approval_key(req); let command = req.command.clone(); let cwd = req.cwd.clone(); let reason = ctx .retry_reason .clone() .or_else(|| req.justification.clone()); let session = ctx.session; let turn = ctx.turn; let call_id = ctx.call_id.to_string(); Box::pin(async move { with_cached_approval(&session.services, key, move || async move { session .request_command_approval( turn, call_id, command, cwd, reason, req.exec_approval_requirement .proposed_execpolicy_amendment() .cloned(), ) .await }) .await }) } fn exec_approval_requirement(&self, req: &ShellRequest) -> Option<ExecApprovalRequirement> { Some(req.exec_approval_requirement.clone()) } fn sandbox_mode_for_first_attempt(&self, req: &ShellRequest) -> SandboxOverride { if req.sandbox_permissions.requires_escalated_permissions() || matches!( req.exec_approval_requirement, ExecApprovalRequirement::Skip { bypass_sandbox: true, .. } ) { SandboxOverride::BypassSandboxFirstAttempt } else { SandboxOverride::NoOverride } } } impl ToolRuntime<ShellRequest, ExecToolCallOutput> for ShellRuntime { async fn run( &mut self, req: &ShellRequest, attempt: &SandboxAttempt<'_>, ctx: &ToolCtx<'_>, ) -> Result<ExecToolCallOutput, ToolError> { let base_command = &req.command; let session_shell = ctx.session.user_shell(); let command = maybe_wrap_shell_lc_with_snapshot(base_command, session_shell.as_ref()); let command = if matches!(session_shell.shell_type, ShellType::PowerShell) && ctx.session.features().enabled(Feature::PowershellUtf8) { prefix_powershell_script_with_utf8(&command) } else { command }; let spec = build_command_spec( &command, &req.cwd, &req.env, req.timeout_ms.into(), req.sandbox_permissions, req.justification.clone(), )?; let env = attempt .env_for(spec) .map_err(|err| ToolError::Codex(err.into()))?; let out = execute_env(env, attempt.policy, Self::stdout_stream(ctx)) .await .map_err(ToolError::Codex)?; Ok(out) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/runtimes/mod.rs
codex-rs/core/src/tools/runtimes/mod.rs
/* Module: runtimes Concrete ToolRuntime implementations for specific tools. Each runtime stays small and focused and reuses the orchestrator for approvals + sandbox + retry. */ use crate::exec::ExecExpiration; use crate::sandboxing::CommandSpec; use crate::sandboxing::SandboxPermissions; use crate::shell::Shell; use crate::tools::sandboxing::ToolError; use std::collections::HashMap; use std::path::Path; pub mod apply_patch; pub mod shell; pub mod unified_exec; /// Shared helper to construct a CommandSpec from a tokenized command line. /// Validates that at least a program is present. pub(crate) fn build_command_spec( command: &[String], cwd: &Path, env: &HashMap<String, String>, expiration: ExecExpiration, sandbox_permissions: SandboxPermissions, justification: Option<String>, ) -> Result<CommandSpec, ToolError> { let (program, args) = command .split_first() .ok_or_else(|| ToolError::Rejected("command args are empty".to_string()))?; Ok(CommandSpec { program: program.clone(), args: args.to_vec(), cwd: cwd.to_path_buf(), env: env.clone(), expiration, sandbox_permissions, justification, }) } /// POSIX-only helper: for commands produced by `Shell::derive_exec_args` /// for Bash/Zsh/sh of the form `[shell_path, "-lc", "<script>"]`, and /// when a snapshot is configured on the session shell, rewrite the argv /// to a single non-login shell that sources the snapshot before running /// the original script: /// /// shell -lc "<script>" /// => shell -c ". SNAPSHOT && <script>" /// /// On non-POSIX shells or non-matching commands this is a no-op. pub(crate) fn maybe_wrap_shell_lc_with_snapshot( command: &[String], session_shell: &Shell, ) -> Vec<String> { let Some(snapshot) = &session_shell.shell_snapshot else { return command.to_vec(); }; if command.len() < 3 { return command.to_vec(); } let flag = command[1].as_str(); if flag != "-lc" { return command.to_vec(); } let snapshot_path = snapshot.path.to_string_lossy(); let rewritten_script = format!(". \"{snapshot_path}\" && {}", command[2]); let mut rewritten = command.to_vec(); rewritten[1] = "-c".to_string(); rewritten[2] = rewritten_script; rewritten }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/handlers/unified_exec.rs
codex-rs/core/src/tools/handlers/unified_exec.rs
use crate::function_tool::FunctionCallError; use crate::is_safe_command::is_known_safe_command; use crate::protocol::EventMsg; use crate::protocol::ExecCommandSource; use crate::protocol::TerminalInteractionEvent; use crate::sandboxing::SandboxPermissions; use crate::shell::Shell; use crate::shell::get_shell_by_model_provided_path; use crate::tools::context::ToolInvocation; use crate::tools::context::ToolOutput; use crate::tools::context::ToolPayload; use crate::tools::events::ToolEmitter; use crate::tools::events::ToolEventCtx; use crate::tools::events::ToolEventStage; use crate::tools::handlers::apply_patch::intercept_apply_patch; use crate::tools::registry::ToolHandler; use crate::tools::registry::ToolKind; use crate::unified_exec::ExecCommandRequest; use crate::unified_exec::UnifiedExecContext; use crate::unified_exec::UnifiedExecResponse; use crate::unified_exec::UnifiedExecSessionManager; use crate::unified_exec::WriteStdinRequest; use async_trait::async_trait; use serde::Deserialize; use std::path::PathBuf; use std::sync::Arc; pub struct UnifiedExecHandler; #[derive(Debug, Deserialize)] struct ExecCommandArgs { cmd: String, #[serde(default)] workdir: Option<String>, #[serde(default)] shell: Option<String>, #[serde(default = "default_login")] login: bool, #[serde(default = "default_exec_yield_time_ms")] yield_time_ms: u64, #[serde(default)] max_output_tokens: Option<usize>, #[serde(default)] sandbox_permissions: SandboxPermissions, #[serde(default)] justification: Option<String>, } #[derive(Debug, Deserialize)] struct WriteStdinArgs { // The model is trained on `session_id`. session_id: i32, #[serde(default)] chars: String, #[serde(default = "default_write_stdin_yield_time_ms")] yield_time_ms: u64, #[serde(default)] max_output_tokens: Option<usize>, } fn default_exec_yield_time_ms() -> u64 { 10000 } fn default_write_stdin_yield_time_ms() -> u64 { 250 } fn default_login() -> bool { true } #[async_trait] impl ToolHandler for UnifiedExecHandler { fn kind(&self) -> ToolKind { ToolKind::Function } fn matches_kind(&self, payload: &ToolPayload) -> bool { matches!(payload, ToolPayload::Function { .. }) } async fn is_mutating(&self, invocation: &ToolInvocation) -> bool { let ToolPayload::Function { arguments } = &invocation.payload else { tracing::error!( "This should never happen, invocation payload is wrong: {:?}", invocation.payload ); return true; }; let Ok(params) = serde_json::from_str::<ExecCommandArgs>(arguments) else { return true; }; let command = get_command(&params, invocation.session.user_shell()); !is_known_safe_command(&command) } async fn handle(&self, invocation: ToolInvocation) -> Result<ToolOutput, FunctionCallError> { let ToolInvocation { session, turn, tracker, call_id, tool_name, payload, .. } = invocation; let arguments = match payload { ToolPayload::Function { arguments } => arguments, _ => { return Err(FunctionCallError::RespondToModel( "unified_exec handler received unsupported payload".to_string(), )); } }; let manager: &UnifiedExecSessionManager = &session.services.unified_exec_manager; let context = UnifiedExecContext::new(session.clone(), turn.clone(), call_id.clone()); let response = match tool_name.as_str() { "exec_command" => { let args: ExecCommandArgs = serde_json::from_str(&arguments).map_err(|err| { FunctionCallError::RespondToModel(format!( "failed to parse exec_command arguments: {err:?}" )) })?; let process_id = manager.allocate_process_id().await; let command = get_command(&args, session.user_shell()); let ExecCommandArgs { workdir, yield_time_ms, max_output_tokens, sandbox_permissions, justification, .. } = args; if sandbox_permissions.requires_escalated_permissions() && !matches!( context.turn.approval_policy, codex_protocol::protocol::AskForApproval::OnRequest ) { manager.release_process_id(&process_id).await; return Err(FunctionCallError::RespondToModel(format!( "approval policy is {policy:?}; reject command — you cannot ask for escalated permissions if the approval policy is {policy:?}", policy = context.turn.approval_policy ))); } let workdir = workdir.filter(|value| !value.is_empty()); let workdir = workdir.map(|dir| context.turn.resolve_path(Some(dir))); let cwd = workdir.clone().unwrap_or_else(|| context.turn.cwd.clone()); if let Some(output) = intercept_apply_patch( &command, &cwd, Some(yield_time_ms), context.session.as_ref(), context.turn.as_ref(), Some(&tracker), &context.call_id, tool_name.as_str(), ) .await? { manager.release_process_id(&process_id).await; return Ok(output); } let event_ctx = ToolEventCtx::new( context.session.as_ref(), context.turn.as_ref(), &context.call_id, None, ); let emitter = ToolEmitter::unified_exec( &command, cwd.clone(), ExecCommandSource::UnifiedExecStartup, Some(process_id.clone()), ); emitter.emit(event_ctx, ToolEventStage::Begin).await; manager .exec_command( ExecCommandRequest { command, process_id, yield_time_ms, max_output_tokens, workdir, sandbox_permissions, justification, }, &context, ) .await .map_err(|err| { FunctionCallError::RespondToModel(format!("exec_command failed: {err:?}")) })? } "write_stdin" => { let args: WriteStdinArgs = serde_json::from_str(&arguments).map_err(|err| { FunctionCallError::RespondToModel(format!( "failed to parse write_stdin arguments: {err:?}" )) })?; let response = manager .write_stdin(WriteStdinRequest { process_id: &args.session_id.to_string(), input: &args.chars, yield_time_ms: args.yield_time_ms, max_output_tokens: args.max_output_tokens, }) .await .map_err(|err| { FunctionCallError::RespondToModel(format!("write_stdin failed: {err:?}")) })?; let interaction = TerminalInteractionEvent { call_id: response.event_call_id.clone(), process_id: args.session_id.to_string(), stdin: args.chars.clone(), }; session .send_event(turn.as_ref(), EventMsg::TerminalInteraction(interaction)) .await; response } other => { return Err(FunctionCallError::RespondToModel(format!( "unsupported unified exec function {other}" ))); } }; let content = format_response(&response); Ok(ToolOutput::Function { content, content_items: None, success: Some(true), }) } } fn get_command(args: &ExecCommandArgs, session_shell: Arc<Shell>) -> Vec<String> { let model_shell = args.shell.as_ref().map(|shell_str| { let mut shell = get_shell_by_model_provided_path(&PathBuf::from(shell_str)); shell.shell_snapshot = None; shell }); let shell = model_shell.as_ref().unwrap_or(session_shell.as_ref()); shell.derive_exec_args(&args.cmd, args.login) } fn format_response(response: &UnifiedExecResponse) -> String { let mut sections = Vec::new(); if !response.chunk_id.is_empty() { sections.push(format!("Chunk ID: {}", response.chunk_id)); } let wall_time_seconds = response.wall_time.as_secs_f64(); sections.push(format!("Wall time: {wall_time_seconds:.4} seconds")); if let Some(exit_code) = response.exit_code { sections.push(format!("Process exited with code {exit_code}")); } if let Some(process_id) = &response.process_id { // Training still uses "session ID". sections.push(format!("Process running with session ID {process_id}")); } if let Some(original_token_count) = response.original_token_count { sections.push(format!("Original token count: {original_token_count}")); } sections.push("Output:".to_string()); sections.push(response.output.clone()); sections.join("\n") } #[cfg(test)] mod tests { use super::*; use crate::shell::default_user_shell; use std::sync::Arc; #[test] fn test_get_command_uses_default_shell_when_unspecified() { let json = r#"{"cmd": "echo hello"}"#; let args: ExecCommandArgs = serde_json::from_str(json).expect("deserialize ExecCommandArgs"); assert!(args.shell.is_none()); let command = get_command(&args, Arc::new(default_user_shell())); assert_eq!(command.len(), 3); assert_eq!(command[2], "echo hello"); } #[test] fn test_get_command_respects_explicit_bash_shell() { let json = r#"{"cmd": "echo hello", "shell": "/bin/bash"}"#; let args: ExecCommandArgs = serde_json::from_str(json).expect("deserialize ExecCommandArgs"); assert_eq!(args.shell.as_deref(), Some("/bin/bash")); let command = get_command(&args, Arc::new(default_user_shell())); assert_eq!(command.last(), Some(&"echo hello".to_string())); if command .iter() .any(|arg| arg.eq_ignore_ascii_case("-Command")) { assert!(command.contains(&"-NoProfile".to_string())); } } #[test] fn test_get_command_respects_explicit_powershell_shell() { let json = r#"{"cmd": "echo hello", "shell": "powershell"}"#; let args: ExecCommandArgs = serde_json::from_str(json).expect("deserialize ExecCommandArgs"); assert_eq!(args.shell.as_deref(), Some("powershell")); let command = get_command(&args, Arc::new(default_user_shell())); assert_eq!(command[2], "echo hello"); } #[test] fn test_get_command_respects_explicit_cmd_shell() { let json = r#"{"cmd": "echo hello", "shell": "cmd"}"#; let args: ExecCommandArgs = serde_json::from_str(json).expect("deserialize ExecCommandArgs"); assert_eq!(args.shell.as_deref(), Some("cmd")); let command = get_command(&args, Arc::new(default_user_shell())); assert_eq!(command[2], "echo hello"); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/handlers/list_dir.rs
codex-rs/core/src/tools/handlers/list_dir.rs
use std::collections::VecDeque; use std::ffi::OsStr; use std::fs::FileType; use std::path::Path; use std::path::PathBuf; use async_trait::async_trait; use codex_utils_string::take_bytes_at_char_boundary; use serde::Deserialize; use tokio::fs; use crate::function_tool::FunctionCallError; use crate::tools::context::ToolInvocation; use crate::tools::context::ToolOutput; use crate::tools::context::ToolPayload; use crate::tools::registry::ToolHandler; use crate::tools::registry::ToolKind; pub struct ListDirHandler; const MAX_ENTRY_LENGTH: usize = 500; const INDENTATION_SPACES: usize = 2; fn default_offset() -> usize { 1 } fn default_limit() -> usize { 25 } fn default_depth() -> usize { 2 } #[derive(Deserialize)] struct ListDirArgs { dir_path: String, #[serde(default = "default_offset")] offset: usize, #[serde(default = "default_limit")] limit: usize, #[serde(default = "default_depth")] depth: usize, } #[async_trait] impl ToolHandler for ListDirHandler { fn kind(&self) -> ToolKind { ToolKind::Function } async fn handle(&self, invocation: ToolInvocation) -> Result<ToolOutput, FunctionCallError> { let ToolInvocation { payload, .. } = invocation; let arguments = match payload { ToolPayload::Function { arguments } => arguments, _ => { return Err(FunctionCallError::RespondToModel( "list_dir handler received unsupported payload".to_string(), )); } }; let args: ListDirArgs = serde_json::from_str(&arguments).map_err(|err| { FunctionCallError::RespondToModel(format!( "failed to parse function arguments: {err:?}" )) })?; let ListDirArgs { dir_path, offset, limit, depth, } = args; if offset == 0 { return Err(FunctionCallError::RespondToModel( "offset must be a 1-indexed entry number".to_string(), )); } if limit == 0 { return Err(FunctionCallError::RespondToModel( "limit must be greater than zero".to_string(), )); } if depth == 0 { return Err(FunctionCallError::RespondToModel( "depth must be greater than zero".to_string(), )); } let path = PathBuf::from(&dir_path); if !path.is_absolute() { return Err(FunctionCallError::RespondToModel( "dir_path must be an absolute path".to_string(), )); } let entries = list_dir_slice(&path, offset, limit, depth).await?; let mut output = Vec::with_capacity(entries.len() + 1); output.push(format!("Absolute path: {}", path.display())); output.extend(entries); Ok(ToolOutput::Function { content: output.join("\n"), content_items: None, success: Some(true), }) } } async fn list_dir_slice( path: &Path, offset: usize, limit: usize, depth: usize, ) -> Result<Vec<String>, FunctionCallError> { let mut entries = Vec::new(); collect_entries(path, Path::new(""), depth, &mut entries).await?; if entries.is_empty() { return Ok(Vec::new()); } let start_index = offset - 1; if start_index >= entries.len() { return Err(FunctionCallError::RespondToModel( "offset exceeds directory entry count".to_string(), )); } let remaining_entries = entries.len() - start_index; let capped_limit = limit.min(remaining_entries); let end_index = start_index + capped_limit; let mut selected_entries = entries[start_index..end_index].to_vec(); selected_entries.sort_unstable_by(|a, b| a.name.cmp(&b.name)); let mut formatted = Vec::with_capacity(selected_entries.len()); for entry in &selected_entries { formatted.push(format_entry_line(entry)); } if end_index < entries.len() { formatted.push(format!("More than {capped_limit} entries found")); } Ok(formatted) } async fn collect_entries( dir_path: &Path, relative_prefix: &Path, depth: usize, entries: &mut Vec<DirEntry>, ) -> Result<(), FunctionCallError> { let mut queue = VecDeque::new(); queue.push_back((dir_path.to_path_buf(), relative_prefix.to_path_buf(), depth)); while let Some((current_dir, prefix, remaining_depth)) = queue.pop_front() { let mut read_dir = fs::read_dir(&current_dir).await.map_err(|err| { FunctionCallError::RespondToModel(format!("failed to read directory: {err}")) })?; let mut dir_entries = Vec::new(); while let Some(entry) = read_dir.next_entry().await.map_err(|err| { FunctionCallError::RespondToModel(format!("failed to read directory: {err}")) })? { let file_type = entry.file_type().await.map_err(|err| { FunctionCallError::RespondToModel(format!("failed to inspect entry: {err}")) })?; let file_name = entry.file_name(); let relative_path = if prefix.as_os_str().is_empty() { PathBuf::from(&file_name) } else { prefix.join(&file_name) }; let display_name = format_entry_component(&file_name); let display_depth = prefix.components().count(); let sort_key = format_entry_name(&relative_path); let kind = DirEntryKind::from(&file_type); dir_entries.push(( entry.path(), relative_path, kind, DirEntry { name: sort_key, display_name, depth: display_depth, kind, }, )); } dir_entries.sort_unstable_by(|a, b| a.3.name.cmp(&b.3.name)); for (entry_path, relative_path, kind, dir_entry) in dir_entries { if kind == DirEntryKind::Directory && remaining_depth > 1 { queue.push_back((entry_path, relative_path, remaining_depth - 1)); } entries.push(dir_entry); } } Ok(()) } fn format_entry_name(path: &Path) -> String { let normalized = path.to_string_lossy().replace("\\", "/"); if normalized.len() > MAX_ENTRY_LENGTH { take_bytes_at_char_boundary(&normalized, MAX_ENTRY_LENGTH).to_string() } else { normalized } } fn format_entry_component(name: &OsStr) -> String { let normalized = name.to_string_lossy(); if normalized.len() > MAX_ENTRY_LENGTH { take_bytes_at_char_boundary(&normalized, MAX_ENTRY_LENGTH).to_string() } else { normalized.to_string() } } fn format_entry_line(entry: &DirEntry) -> String { let indent = " ".repeat(entry.depth * INDENTATION_SPACES); let mut name = entry.display_name.clone(); match entry.kind { DirEntryKind::Directory => name.push('/'), DirEntryKind::Symlink => name.push('@'), DirEntryKind::Other => name.push('?'), DirEntryKind::File => {} } format!("{indent}{name}") } #[derive(Clone)] struct DirEntry { name: String, display_name: String, depth: usize, kind: DirEntryKind, } #[derive(Clone, Copy, PartialEq, Eq)] enum DirEntryKind { Directory, File, Symlink, Other, } impl From<&FileType> for DirEntryKind { fn from(file_type: &FileType) -> Self { if file_type.is_symlink() { DirEntryKind::Symlink } else if file_type.is_dir() { DirEntryKind::Directory } else if file_type.is_file() { DirEntryKind::File } else { DirEntryKind::Other } } } #[cfg(test)] mod tests { use super::*; use tempfile::tempdir; #[tokio::test] async fn lists_directory_entries() { let temp = tempdir().expect("create tempdir"); let dir_path = temp.path(); let sub_dir = dir_path.join("nested"); tokio::fs::create_dir(&sub_dir) .await .expect("create sub dir"); let deeper_dir = sub_dir.join("deeper"); tokio::fs::create_dir(&deeper_dir) .await .expect("create deeper dir"); tokio::fs::write(dir_path.join("entry.txt"), b"content") .await .expect("write file"); tokio::fs::write(sub_dir.join("child.txt"), b"child") .await .expect("write child"); tokio::fs::write(deeper_dir.join("grandchild.txt"), b"grandchild") .await .expect("write grandchild"); #[cfg(unix)] { use std::os::unix::fs::symlink; let link_path = dir_path.join("link"); symlink(dir_path.join("entry.txt"), &link_path).expect("create symlink"); } let entries = list_dir_slice(dir_path, 1, 20, 3) .await .expect("list directory"); #[cfg(unix)] let expected = vec![ "entry.txt".to_string(), "link@".to_string(), "nested/".to_string(), " child.txt".to_string(), " deeper/".to_string(), " grandchild.txt".to_string(), ]; #[cfg(not(unix))] let expected = vec![ "entry.txt".to_string(), "nested/".to_string(), " child.txt".to_string(), " deeper/".to_string(), " grandchild.txt".to_string(), ]; assert_eq!(entries, expected); } #[tokio::test] async fn errors_when_offset_exceeds_entries() { let temp = tempdir().expect("create tempdir"); let dir_path = temp.path(); tokio::fs::create_dir(dir_path.join("nested")) .await .expect("create sub dir"); let err = list_dir_slice(dir_path, 10, 1, 2) .await .expect_err("offset exceeds entries"); assert_eq!( err, FunctionCallError::RespondToModel("offset exceeds directory entry count".to_string()) ); } #[tokio::test] async fn respects_depth_parameter() { let temp = tempdir().expect("create tempdir"); let dir_path = temp.path(); let nested = dir_path.join("nested"); let deeper = nested.join("deeper"); tokio::fs::create_dir(&nested).await.expect("create nested"); tokio::fs::create_dir(&deeper).await.expect("create deeper"); tokio::fs::write(dir_path.join("root.txt"), b"root") .await .expect("write root"); tokio::fs::write(nested.join("child.txt"), b"child") .await .expect("write nested"); tokio::fs::write(deeper.join("grandchild.txt"), b"deep") .await .expect("write deeper"); let entries_depth_one = list_dir_slice(dir_path, 1, 10, 1) .await .expect("list depth 1"); assert_eq!( entries_depth_one, vec!["nested/".to_string(), "root.txt".to_string(),] ); let entries_depth_two = list_dir_slice(dir_path, 1, 20, 2) .await .expect("list depth 2"); assert_eq!( entries_depth_two, vec![ "nested/".to_string(), " child.txt".to_string(), " deeper/".to_string(), "root.txt".to_string(), ] ); let entries_depth_three = list_dir_slice(dir_path, 1, 30, 3) .await .expect("list depth 3"); assert_eq!( entries_depth_three, vec![ "nested/".to_string(), " child.txt".to_string(), " deeper/".to_string(), " grandchild.txt".to_string(), "root.txt".to_string(), ] ); } #[tokio::test] async fn handles_large_limit_without_overflow() { let temp = tempdir().expect("create tempdir"); let dir_path = temp.path(); tokio::fs::write(dir_path.join("alpha.txt"), b"alpha") .await .expect("write alpha"); tokio::fs::write(dir_path.join("beta.txt"), b"beta") .await .expect("write beta"); tokio::fs::write(dir_path.join("gamma.txt"), b"gamma") .await .expect("write gamma"); let entries = list_dir_slice(dir_path, 2, usize::MAX, 1) .await .expect("list without overflow"); assert_eq!( entries, vec!["beta.txt".to_string(), "gamma.txt".to_string(),] ); } #[tokio::test] async fn indicates_truncated_results() { let temp = tempdir().expect("create tempdir"); let dir_path = temp.path(); for idx in 0..40 { let file = dir_path.join(format!("file_{idx:02}.txt")); tokio::fs::write(file, b"content") .await .expect("write file"); } let entries = list_dir_slice(dir_path, 1, 25, 1) .await .expect("list directory"); assert_eq!(entries.len(), 26); assert_eq!( entries.last(), Some(&"More than 25 entries found".to_string()) ); } #[tokio::test] async fn bfs_truncation() -> anyhow::Result<()> { let temp = tempdir()?; let dir_path = temp.path(); let nested = dir_path.join("nested"); let deeper = nested.join("deeper"); tokio::fs::create_dir(&nested).await?; tokio::fs::create_dir(&deeper).await?; tokio::fs::write(dir_path.join("root.txt"), b"root").await?; tokio::fs::write(nested.join("child.txt"), b"child").await?; tokio::fs::write(deeper.join("grandchild.txt"), b"deep").await?; let entries_depth_three = list_dir_slice(dir_path, 1, 3, 3).await?; assert_eq!( entries_depth_three, vec![ "nested/".to_string(), " child.txt".to_string(), "root.txt".to_string(), "More than 3 entries found".to_string() ] ); Ok(()) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/handlers/apply_patch.rs
codex-rs/core/src/tools/handlers/apply_patch.rs
use std::collections::BTreeMap; use std::path::Path; use crate::apply_patch; use crate::apply_patch::InternalApplyPatchInvocation; use crate::apply_patch::convert_apply_patch_to_protocol; use crate::client_common::tools::FreeformTool; use crate::client_common::tools::FreeformToolFormat; use crate::client_common::tools::ResponsesApiTool; use crate::client_common::tools::ToolSpec; use crate::codex::Session; use crate::codex::TurnContext; use crate::function_tool::FunctionCallError; use crate::tools::context::SharedTurnDiffTracker; use crate::tools::context::ToolInvocation; use crate::tools::context::ToolOutput; use crate::tools::context::ToolPayload; use crate::tools::events::ToolEmitter; use crate::tools::events::ToolEventCtx; use crate::tools::orchestrator::ToolOrchestrator; use crate::tools::registry::ToolHandler; use crate::tools::registry::ToolKind; use crate::tools::runtimes::apply_patch::ApplyPatchRequest; use crate::tools::runtimes::apply_patch::ApplyPatchRuntime; use crate::tools::sandboxing::ToolCtx; use crate::tools::spec::ApplyPatchToolArgs; use crate::tools::spec::JsonSchema; use async_trait::async_trait; pub struct ApplyPatchHandler; const APPLY_PATCH_LARK_GRAMMAR: &str = include_str!("tool_apply_patch.lark"); #[async_trait] impl ToolHandler for ApplyPatchHandler { fn kind(&self) -> ToolKind { ToolKind::Function } fn matches_kind(&self, payload: &ToolPayload) -> bool { matches!( payload, ToolPayload::Function { .. } | ToolPayload::Custom { .. } ) } async fn is_mutating(&self, _invocation: &ToolInvocation) -> bool { true } async fn handle(&self, invocation: ToolInvocation) -> Result<ToolOutput, FunctionCallError> { let ToolInvocation { session, turn, tracker, call_id, tool_name, payload, } = invocation; let patch_input = match payload { ToolPayload::Function { arguments } => { let args: ApplyPatchToolArgs = serde_json::from_str(&arguments).map_err(|e| { FunctionCallError::RespondToModel(format!( "failed to parse function arguments: {e:?}" )) })?; args.input } ToolPayload::Custom { input } => input, _ => { return Err(FunctionCallError::RespondToModel( "apply_patch handler received unsupported payload".to_string(), )); } }; // Re-parse and verify the patch so we can compute changes and approval. // Avoid building temporary ExecParams/command vectors; derive directly from inputs. let cwd = turn.cwd.clone(); let command = vec!["apply_patch".to_string(), patch_input.clone()]; match codex_apply_patch::maybe_parse_apply_patch_verified(&command, &cwd) { codex_apply_patch::MaybeApplyPatchVerified::Body(changes) => { match apply_patch::apply_patch(session.as_ref(), turn.as_ref(), &call_id, changes) .await { InternalApplyPatchInvocation::Output(item) => { let content = item?; Ok(ToolOutput::Function { content, content_items: None, success: Some(true), }) } InternalApplyPatchInvocation::DelegateToExec(apply) => { let emitter = ToolEmitter::apply_patch( convert_apply_patch_to_protocol(&apply.action), !apply.user_explicitly_approved_this_action, ); let event_ctx = ToolEventCtx::new( session.as_ref(), turn.as_ref(), &call_id, Some(&tracker), ); emitter.begin(event_ctx).await; let req = ApplyPatchRequest { patch: apply.action.patch.clone(), cwd: apply.action.cwd.clone(), timeout_ms: None, user_explicitly_approved: apply.user_explicitly_approved_this_action, codex_exe: turn.codex_linux_sandbox_exe.clone(), }; let mut orchestrator = ToolOrchestrator::new(); let mut runtime = ApplyPatchRuntime::new(); let tool_ctx = ToolCtx { session: session.as_ref(), turn: turn.as_ref(), call_id: call_id.clone(), tool_name: tool_name.to_string(), }; let out = orchestrator .run(&mut runtime, &req, &tool_ctx, &turn, turn.approval_policy) .await; let event_ctx = ToolEventCtx::new( session.as_ref(), turn.as_ref(), &call_id, Some(&tracker), ); let content = emitter.finish(event_ctx, out).await?; Ok(ToolOutput::Function { content, content_items: None, success: Some(true), }) } } } codex_apply_patch::MaybeApplyPatchVerified::CorrectnessError(parse_error) => { Err(FunctionCallError::RespondToModel(format!( "apply_patch verification failed: {parse_error}" ))) } codex_apply_patch::MaybeApplyPatchVerified::ShellParseError(error) => { tracing::trace!("Failed to parse apply_patch input, {error:?}"); Err(FunctionCallError::RespondToModel( "apply_patch handler received invalid patch input".to_string(), )) } codex_apply_patch::MaybeApplyPatchVerified::NotApplyPatch => { Err(FunctionCallError::RespondToModel( "apply_patch handler received non-apply_patch input".to_string(), )) } } } } #[allow(clippy::too_many_arguments)] pub(crate) async fn intercept_apply_patch( command: &[String], cwd: &Path, timeout_ms: Option<u64>, session: &Session, turn: &TurnContext, tracker: Option<&SharedTurnDiffTracker>, call_id: &str, tool_name: &str, ) -> Result<Option<ToolOutput>, FunctionCallError> { match codex_apply_patch::maybe_parse_apply_patch_verified(command, cwd) { codex_apply_patch::MaybeApplyPatchVerified::Body(changes) => { session .record_model_warning( format!("apply_patch was requested via {tool_name}. Use the apply_patch tool instead of exec_command."), turn, ) .await; match apply_patch::apply_patch(session, turn, call_id, changes).await { InternalApplyPatchInvocation::Output(item) => { let content = item?; Ok(Some(ToolOutput::Function { content, content_items: None, success: Some(true), })) } InternalApplyPatchInvocation::DelegateToExec(apply) => { let emitter = ToolEmitter::apply_patch( convert_apply_patch_to_protocol(&apply.action), !apply.user_explicitly_approved_this_action, ); let event_ctx = ToolEventCtx::new(session, turn, call_id, tracker.as_ref().copied()); emitter.begin(event_ctx).await; let req = ApplyPatchRequest { patch: apply.action.patch.clone(), cwd: apply.action.cwd.clone(), timeout_ms, user_explicitly_approved: apply.user_explicitly_approved_this_action, codex_exe: turn.codex_linux_sandbox_exe.clone(), }; let mut orchestrator = ToolOrchestrator::new(); let mut runtime = ApplyPatchRuntime::new(); let tool_ctx = ToolCtx { session, turn, call_id: call_id.to_string(), tool_name: tool_name.to_string(), }; let out = orchestrator .run(&mut runtime, &req, &tool_ctx, turn, turn.approval_policy) .await; let event_ctx = ToolEventCtx::new(session, turn, call_id, tracker.as_ref().copied()); let content = emitter.finish(event_ctx, out).await?; Ok(Some(ToolOutput::Function { content, content_items: None, success: Some(true), })) } } } codex_apply_patch::MaybeApplyPatchVerified::CorrectnessError(parse_error) => { Err(FunctionCallError::RespondToModel(format!( "apply_patch verification failed: {parse_error}" ))) } codex_apply_patch::MaybeApplyPatchVerified::ShellParseError(error) => { tracing::trace!("Failed to parse apply_patch input, {error:?}"); Ok(None) } codex_apply_patch::MaybeApplyPatchVerified::NotApplyPatch => Ok(None), } } /// Returns a custom tool that can be used to edit files. Well-suited for GPT-5 models /// https://platform.openai.com/docs/guides/function-calling#custom-tools pub(crate) fn create_apply_patch_freeform_tool() -> ToolSpec { ToolSpec::Freeform(FreeformTool { name: "apply_patch".to_string(), description: "Use the `apply_patch` tool to edit files. This is a FREEFORM tool, so do not wrap the patch in JSON.".to_string(), format: FreeformToolFormat { r#type: "grammar".to_string(), syntax: "lark".to_string(), definition: APPLY_PATCH_LARK_GRAMMAR.to_string(), }, }) } /// Returns a json tool that can be used to edit files. Should only be used with gpt-oss models pub(crate) fn create_apply_patch_json_tool() -> ToolSpec { let mut properties = BTreeMap::new(); properties.insert( "input".to_string(), JsonSchema::String { description: Some(r#"The entire contents of the apply_patch command"#.to_string()), }, ); ToolSpec::Function(ResponsesApiTool { name: "apply_patch".to_string(), description: r#"Use the `apply_patch` tool to edit files. Your patch language is a stripped‑down, file‑oriented diff format designed to be easy to parse and safe to apply. You can think of it as a high‑level envelope: *** Begin Patch [ one or more file sections ] *** End Patch Within that envelope, you get a sequence of file operations. You MUST include a header to specify the action you are taking. Each operation starts with one of three headers: *** Add File: <path> - create a new file. Every following line is a + line (the initial contents). *** Delete File: <path> - remove an existing file. Nothing follows. *** Update File: <path> - patch an existing file in place (optionally with a rename). May be immediately followed by *** Move to: <new path> if you want to rename the file. Then one or more “hunks”, each introduced by @@ (optionally followed by a hunk header). Within a hunk each line starts with: For instructions on [context_before] and [context_after]: - By default, show 3 lines of code immediately above and 3 lines immediately below each change. If a change is within 3 lines of a previous change, do NOT duplicate the first change’s [context_after] lines in the second change’s [context_before] lines. - If 3 lines of context is insufficient to uniquely identify the snippet of code within the file, use the @@ operator to indicate the class or function to which the snippet belongs. For instance, we might have: @@ class BaseClass [3 lines of pre-context] - [old_code] + [new_code] [3 lines of post-context] - If a code block is repeated so many times in a class or function such that even a single `@@` statement and 3 lines of context cannot uniquely identify the snippet of code, you can use multiple `@@` statements to jump to the right context. For instance: @@ class BaseClass @@ def method(): [3 lines of pre-context] - [old_code] + [new_code] [3 lines of post-context] The full grammar definition is below: Patch := Begin { FileOp } End Begin := "*** Begin Patch" NEWLINE End := "*** End Patch" NEWLINE FileOp := AddFile | DeleteFile | UpdateFile AddFile := "*** Add File: " path NEWLINE { "+" line NEWLINE } DeleteFile := "*** Delete File: " path NEWLINE UpdateFile := "*** Update File: " path NEWLINE [ MoveTo ] { Hunk } MoveTo := "*** Move to: " newPath NEWLINE Hunk := "@@" [ header ] NEWLINE { HunkLine } [ "*** End of File" NEWLINE ] HunkLine := (" " | "-" | "+") text NEWLINE A full patch can combine several operations: *** Begin Patch *** Add File: hello.txt +Hello world *** Update File: src/app.py *** Move to: src/main.py @@ def greet(): -print("Hi") +print("Hello, world!") *** Delete File: obsolete.txt *** End Patch It is important to remember: - You must include a header with your intended action (Add/Delete/Update) - You must prefix new lines with `+` even when creating a new file - File references can only be relative, NEVER ABSOLUTE. "# .to_string(), strict: false, parameters: JsonSchema::Object { properties, required: Some(vec!["input".to_string()]), additional_properties: Some(false.into()), }, }) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/handlers/mcp.rs
codex-rs/core/src/tools/handlers/mcp.rs
use async_trait::async_trait; use crate::function_tool::FunctionCallError; use crate::mcp_tool_call::handle_mcp_tool_call; use crate::tools::context::ToolInvocation; use crate::tools::context::ToolOutput; use crate::tools::context::ToolPayload; use crate::tools::registry::ToolHandler; use crate::tools::registry::ToolKind; pub struct McpHandler; #[async_trait] impl ToolHandler for McpHandler { fn kind(&self) -> ToolKind { ToolKind::Mcp } async fn handle(&self, invocation: ToolInvocation) -> Result<ToolOutput, FunctionCallError> { let ToolInvocation { session, turn, call_id, payload, .. } = invocation; let payload = match payload { ToolPayload::Mcp { server, tool, raw_arguments, } => (server, tool, raw_arguments), _ => { return Err(FunctionCallError::RespondToModel( "mcp handler received unsupported payload".to_string(), )); } }; let (server, tool, raw_arguments) = payload; let arguments_str = raw_arguments; let response = handle_mcp_tool_call( session.as_ref(), turn.as_ref(), call_id.clone(), server, tool, arguments_str, ) .await; match response { codex_protocol::models::ResponseInputItem::McpToolCallOutput { result, .. } => { Ok(ToolOutput::Mcp { result }) } codex_protocol::models::ResponseInputItem::FunctionCallOutput { output, .. } => { let codex_protocol::models::FunctionCallOutputPayload { content, content_items, success, } = output; Ok(ToolOutput::Function { content, content_items, success, }) } _ => Err(FunctionCallError::RespondToModel( "mcp handler received unexpected response variant".to_string(), )), } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/handlers/mcp_resource.rs
codex-rs/core/src/tools/handlers/mcp_resource.rs
use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use std::time::Instant; use async_trait::async_trait; use mcp_types::CallToolResult; use mcp_types::ContentBlock; use mcp_types::ListResourceTemplatesRequestParams; use mcp_types::ListResourceTemplatesResult; use mcp_types::ListResourcesRequestParams; use mcp_types::ListResourcesResult; use mcp_types::ReadResourceRequestParams; use mcp_types::ReadResourceResult; use mcp_types::Resource; use mcp_types::ResourceTemplate; use mcp_types::TextContent; use serde::Deserialize; use serde::Serialize; use serde::de::DeserializeOwned; use serde_json::Value; use crate::codex::Session; use crate::codex::TurnContext; use crate::function_tool::FunctionCallError; use crate::protocol::EventMsg; use crate::protocol::McpInvocation; use crate::protocol::McpToolCallBeginEvent; use crate::protocol::McpToolCallEndEvent; use crate::tools::context::ToolInvocation; use crate::tools::context::ToolOutput; use crate::tools::context::ToolPayload; use crate::tools::registry::ToolHandler; use crate::tools::registry::ToolKind; pub struct McpResourceHandler; #[derive(Debug, Deserialize, Default)] struct ListResourcesArgs { /// Lists all resources from all servers if not specified. #[serde(default)] server: Option<String>, #[serde(default)] cursor: Option<String>, } #[derive(Debug, Deserialize, Default)] struct ListResourceTemplatesArgs { /// Lists all resource templates from all servers if not specified. #[serde(default)] server: Option<String>, #[serde(default)] cursor: Option<String>, } #[derive(Debug, Deserialize)] struct ReadResourceArgs { server: String, uri: String, } #[derive(Debug, Serialize)] struct ResourceWithServer { server: String, #[serde(flatten)] resource: Resource, } impl ResourceWithServer { fn new(server: String, resource: Resource) -> Self { Self { server, resource } } } #[derive(Debug, Serialize)] struct ResourceTemplateWithServer { server: String, #[serde(flatten)] template: ResourceTemplate, } impl ResourceTemplateWithServer { fn new(server: String, template: ResourceTemplate) -> Self { Self { server, template } } } #[derive(Debug, Serialize)] #[serde(rename_all = "camelCase")] struct ListResourcesPayload { #[serde(skip_serializing_if = "Option::is_none")] server: Option<String>, resources: Vec<ResourceWithServer>, #[serde(skip_serializing_if = "Option::is_none")] next_cursor: Option<String>, } impl ListResourcesPayload { fn from_single_server(server: String, result: ListResourcesResult) -> Self { let resources = result .resources .into_iter() .map(|resource| ResourceWithServer::new(server.clone(), resource)) .collect(); Self { server: Some(server), resources, next_cursor: result.next_cursor, } } fn from_all_servers(resources_by_server: HashMap<String, Vec<Resource>>) -> Self { let mut entries: Vec<(String, Vec<Resource>)> = resources_by_server.into_iter().collect(); entries.sort_by(|a, b| a.0.cmp(&b.0)); let mut resources = Vec::new(); for (server, server_resources) in entries { for resource in server_resources { resources.push(ResourceWithServer::new(server.clone(), resource)); } } Self { server: None, resources, next_cursor: None, } } } #[derive(Debug, Serialize)] #[serde(rename_all = "camelCase")] struct ListResourceTemplatesPayload { #[serde(skip_serializing_if = "Option::is_none")] server: Option<String>, resource_templates: Vec<ResourceTemplateWithServer>, #[serde(skip_serializing_if = "Option::is_none")] next_cursor: Option<String>, } impl ListResourceTemplatesPayload { fn from_single_server(server: String, result: ListResourceTemplatesResult) -> Self { let resource_templates = result .resource_templates .into_iter() .map(|template| ResourceTemplateWithServer::new(server.clone(), template)) .collect(); Self { server: Some(server), resource_templates, next_cursor: result.next_cursor, } } fn from_all_servers(templates_by_server: HashMap<String, Vec<ResourceTemplate>>) -> Self { let mut entries: Vec<(String, Vec<ResourceTemplate>)> = templates_by_server.into_iter().collect(); entries.sort_by(|a, b| a.0.cmp(&b.0)); let mut resource_templates = Vec::new(); for (server, server_templates) in entries { for template in server_templates { resource_templates.push(ResourceTemplateWithServer::new(server.clone(), template)); } } Self { server: None, resource_templates, next_cursor: None, } } } #[derive(Debug, Serialize)] struct ReadResourcePayload { server: String, uri: String, #[serde(flatten)] result: ReadResourceResult, } #[async_trait] impl ToolHandler for McpResourceHandler { fn kind(&self) -> ToolKind { ToolKind::Function } async fn handle(&self, invocation: ToolInvocation) -> Result<ToolOutput, FunctionCallError> { let ToolInvocation { session, turn, call_id, tool_name, payload, .. } = invocation; let arguments = match payload { ToolPayload::Function { arguments } => arguments, _ => { return Err(FunctionCallError::RespondToModel( "mcp_resource handler received unsupported payload".to_string(), )); } }; let arguments_value = parse_arguments(arguments.as_str())?; match tool_name.as_str() { "list_mcp_resources" => { handle_list_resources( Arc::clone(&session), Arc::clone(&turn), call_id.clone(), arguments_value.clone(), ) .await } "list_mcp_resource_templates" => { handle_list_resource_templates( Arc::clone(&session), Arc::clone(&turn), call_id.clone(), arguments_value.clone(), ) .await } "read_mcp_resource" => { handle_read_resource( Arc::clone(&session), Arc::clone(&turn), call_id, arguments_value, ) .await } other => Err(FunctionCallError::RespondToModel(format!( "unsupported MCP resource tool: {other}" ))), } } } async fn handle_list_resources( session: Arc<Session>, turn: Arc<TurnContext>, call_id: String, arguments: Option<Value>, ) -> Result<ToolOutput, FunctionCallError> { let args: ListResourcesArgs = parse_args_with_default(arguments.clone())?; let ListResourcesArgs { server, cursor } = args; let server = normalize_optional_string(server); let cursor = normalize_optional_string(cursor); let invocation = McpInvocation { server: server.clone().unwrap_or_else(|| "codex".to_string()), tool: "list_mcp_resources".to_string(), arguments: arguments.clone(), }; emit_tool_call_begin(&session, turn.as_ref(), &call_id, invocation.clone()).await; let start = Instant::now(); let payload_result: Result<ListResourcesPayload, FunctionCallError> = async { if let Some(server_name) = server.clone() { let params = cursor.clone().map(|value| ListResourcesRequestParams { cursor: Some(value), }); let result = session .list_resources(&server_name, params) .await .map_err(|err| { FunctionCallError::RespondToModel(format!("resources/list failed: {err:#}")) })?; Ok(ListResourcesPayload::from_single_server( server_name, result, )) } else { if cursor.is_some() { return Err(FunctionCallError::RespondToModel( "cursor can only be used when a server is specified".to_string(), )); } let resources = session .services .mcp_connection_manager .read() .await .list_all_resources() .await; Ok(ListResourcesPayload::from_all_servers(resources)) } } .await; match payload_result { Ok(payload) => match serialize_function_output(payload) { Ok(output) => { let ToolOutput::Function { content, success, .. } = &output else { unreachable!("MCP resource handler should return function output"); }; let duration = start.elapsed(); emit_tool_call_end( &session, turn.as_ref(), &call_id, invocation, duration, Ok(call_tool_result_from_content(content, *success)), ) .await; Ok(output) } Err(err) => { let duration = start.elapsed(); let message = err.to_string(); emit_tool_call_end( &session, turn.as_ref(), &call_id, invocation, duration, Err(message.clone()), ) .await; Err(err) } }, Err(err) => { let duration = start.elapsed(); let message = err.to_string(); emit_tool_call_end( &session, turn.as_ref(), &call_id, invocation, duration, Err(message.clone()), ) .await; Err(err) } } } async fn handle_list_resource_templates( session: Arc<Session>, turn: Arc<TurnContext>, call_id: String, arguments: Option<Value>, ) -> Result<ToolOutput, FunctionCallError> { let args: ListResourceTemplatesArgs = parse_args_with_default(arguments.clone())?; let ListResourceTemplatesArgs { server, cursor } = args; let server = normalize_optional_string(server); let cursor = normalize_optional_string(cursor); let invocation = McpInvocation { server: server.clone().unwrap_or_else(|| "codex".to_string()), tool: "list_mcp_resource_templates".to_string(), arguments: arguments.clone(), }; emit_tool_call_begin(&session, turn.as_ref(), &call_id, invocation.clone()).await; let start = Instant::now(); let payload_result: Result<ListResourceTemplatesPayload, FunctionCallError> = async { if let Some(server_name) = server.clone() { let params = cursor .clone() .map(|value| ListResourceTemplatesRequestParams { cursor: Some(value), }); let result = session .list_resource_templates(&server_name, params) .await .map_err(|err| { FunctionCallError::RespondToModel(format!( "resources/templates/list failed: {err:#}" )) })?; Ok(ListResourceTemplatesPayload::from_single_server( server_name, result, )) } else { if cursor.is_some() { return Err(FunctionCallError::RespondToModel( "cursor can only be used when a server is specified".to_string(), )); } let templates = session .services .mcp_connection_manager .read() .await .list_all_resource_templates() .await; Ok(ListResourceTemplatesPayload::from_all_servers(templates)) } } .await; match payload_result { Ok(payload) => match serialize_function_output(payload) { Ok(output) => { let ToolOutput::Function { content, success, .. } = &output else { unreachable!("MCP resource handler should return function output"); }; let duration = start.elapsed(); emit_tool_call_end( &session, turn.as_ref(), &call_id, invocation, duration, Ok(call_tool_result_from_content(content, *success)), ) .await; Ok(output) } Err(err) => { let duration = start.elapsed(); let message = err.to_string(); emit_tool_call_end( &session, turn.as_ref(), &call_id, invocation, duration, Err(message.clone()), ) .await; Err(err) } }, Err(err) => { let duration = start.elapsed(); let message = err.to_string(); emit_tool_call_end( &session, turn.as_ref(), &call_id, invocation, duration, Err(message.clone()), ) .await; Err(err) } } } async fn handle_read_resource( session: Arc<Session>, turn: Arc<TurnContext>, call_id: String, arguments: Option<Value>, ) -> Result<ToolOutput, FunctionCallError> { let args: ReadResourceArgs = parse_args(arguments.clone())?; let ReadResourceArgs { server, uri } = args; let server = normalize_required_string("server", server)?; let uri = normalize_required_string("uri", uri)?; let invocation = McpInvocation { server: server.clone(), tool: "read_mcp_resource".to_string(), arguments: arguments.clone(), }; emit_tool_call_begin(&session, turn.as_ref(), &call_id, invocation.clone()).await; let start = Instant::now(); let payload_result: Result<ReadResourcePayload, FunctionCallError> = async { let result = session .read_resource(&server, ReadResourceRequestParams { uri: uri.clone() }) .await .map_err(|err| { FunctionCallError::RespondToModel(format!("resources/read failed: {err:#}")) })?; Ok(ReadResourcePayload { server, uri, result, }) } .await; match payload_result { Ok(payload) => match serialize_function_output(payload) { Ok(output) => { let ToolOutput::Function { content, success, .. } = &output else { unreachable!("MCP resource handler should return function output"); }; let duration = start.elapsed(); emit_tool_call_end( &session, turn.as_ref(), &call_id, invocation, duration, Ok(call_tool_result_from_content(content, *success)), ) .await; Ok(output) } Err(err) => { let duration = start.elapsed(); let message = err.to_string(); emit_tool_call_end( &session, turn.as_ref(), &call_id, invocation, duration, Err(message.clone()), ) .await; Err(err) } }, Err(err) => { let duration = start.elapsed(); let message = err.to_string(); emit_tool_call_end( &session, turn.as_ref(), &call_id, invocation, duration, Err(message.clone()), ) .await; Err(err) } } } fn call_tool_result_from_content(content: &str, success: Option<bool>) -> CallToolResult { CallToolResult { content: vec![ContentBlock::TextContent(TextContent { annotations: None, text: content.to_string(), r#type: "text".to_string(), })], is_error: success.map(|value| !value), structured_content: None, } } async fn emit_tool_call_begin( session: &Arc<Session>, turn: &TurnContext, call_id: &str, invocation: McpInvocation, ) { session .send_event( turn, EventMsg::McpToolCallBegin(McpToolCallBeginEvent { call_id: call_id.to_string(), invocation, }), ) .await; } async fn emit_tool_call_end( session: &Arc<Session>, turn: &TurnContext, call_id: &str, invocation: McpInvocation, duration: Duration, result: Result<CallToolResult, String>, ) { session .send_event( turn, EventMsg::McpToolCallEnd(McpToolCallEndEvent { call_id: call_id.to_string(), invocation, duration, result, }), ) .await; } fn normalize_optional_string(input: Option<String>) -> Option<String> { input.and_then(|value| { let trimmed = value.trim().to_string(); if trimmed.is_empty() { None } else { Some(trimmed) } }) } fn normalize_required_string(field: &str, value: String) -> Result<String, FunctionCallError> { match normalize_optional_string(Some(value)) { Some(normalized) => Ok(normalized), None => Err(FunctionCallError::RespondToModel(format!( "{field} must be provided" ))), } } fn serialize_function_output<T>(payload: T) -> Result<ToolOutput, FunctionCallError> where T: Serialize, { let content = serde_json::to_string(&payload).map_err(|err| { FunctionCallError::RespondToModel(format!( "failed to serialize MCP resource response: {err}" )) })?; Ok(ToolOutput::Function { content, content_items: None, success: Some(true), }) } fn parse_arguments(raw_args: &str) -> Result<Option<Value>, FunctionCallError> { if raw_args.trim().is_empty() { Ok(None) } else { serde_json::from_str(raw_args).map(Some).map_err(|err| { FunctionCallError::RespondToModel(format!("failed to parse function arguments: {err}")) }) } } fn parse_args<T>(arguments: Option<Value>) -> Result<T, FunctionCallError> where T: DeserializeOwned, { match arguments { Some(value) => serde_json::from_value(value).map_err(|err| { FunctionCallError::RespondToModel(format!("failed to parse function arguments: {err}")) }), None => Err(FunctionCallError::RespondToModel( "failed to parse function arguments: expected value".to_string(), )), } } fn parse_args_with_default<T>(arguments: Option<Value>) -> Result<T, FunctionCallError> where T: DeserializeOwned + Default, { match arguments { Some(value) => parse_args(Some(value)), None => Ok(T::default()), } } #[cfg(test)] mod tests { use super::*; use mcp_types::ListResourcesResult; use mcp_types::ResourceTemplate; use pretty_assertions::assert_eq; use serde_json::json; fn resource(uri: &str, name: &str) -> Resource { Resource { annotations: None, description: None, mime_type: None, name: name.to_string(), size: None, title: None, uri: uri.to_string(), } } fn template(uri_template: &str, name: &str) -> ResourceTemplate { ResourceTemplate { annotations: None, description: None, mime_type: None, name: name.to_string(), title: None, uri_template: uri_template.to_string(), } } #[test] fn resource_with_server_serializes_server_field() { let entry = ResourceWithServer::new("test".to_string(), resource("memo://id", "memo")); let value = serde_json::to_value(&entry).expect("serialize resource"); assert_eq!(value["server"], json!("test")); assert_eq!(value["uri"], json!("memo://id")); assert_eq!(value["name"], json!("memo")); } #[test] fn list_resources_payload_from_single_server_copies_next_cursor() { let result = ListResourcesResult { next_cursor: Some("cursor-1".to_string()), resources: vec![resource("memo://id", "memo")], }; let payload = ListResourcesPayload::from_single_server("srv".to_string(), result); let value = serde_json::to_value(&payload).expect("serialize payload"); assert_eq!(value["server"], json!("srv")); assert_eq!(value["nextCursor"], json!("cursor-1")); let resources = value["resources"].as_array().expect("resources array"); assert_eq!(resources.len(), 1); assert_eq!(resources[0]["server"], json!("srv")); } #[test] fn list_resources_payload_from_all_servers_is_sorted() { let mut map = HashMap::new(); map.insert("beta".to_string(), vec![resource("memo://b-1", "b-1")]); map.insert( "alpha".to_string(), vec![resource("memo://a-1", "a-1"), resource("memo://a-2", "a-2")], ); let payload = ListResourcesPayload::from_all_servers(map); let value = serde_json::to_value(&payload).expect("serialize payload"); let uris: Vec<String> = value["resources"] .as_array() .expect("resources array") .iter() .map(|entry| entry["uri"].as_str().unwrap().to_string()) .collect(); assert_eq!( uris, vec![ "memo://a-1".to_string(), "memo://a-2".to_string(), "memo://b-1".to_string() ] ); } #[test] fn call_tool_result_from_content_marks_success() { let result = call_tool_result_from_content("{}", Some(true)); assert_eq!(result.is_error, Some(false)); assert_eq!(result.content.len(), 1); } #[test] fn parse_arguments_handles_empty_and_json() { assert!( parse_arguments(" \n\t").unwrap().is_none(), "expected None for empty arguments" ); let value = parse_arguments(r#"{"server":"figma"}"#) .expect("parse json") .expect("value present"); assert_eq!(value["server"], json!("figma")); } #[test] fn template_with_server_serializes_server_field() { let entry = ResourceTemplateWithServer::new("srv".to_string(), template("memo://{id}", "memo")); let value = serde_json::to_value(&entry).expect("serialize template"); assert_eq!( value, json!({ "server": "srv", "uriTemplate": "memo://{id}", "name": "memo" }) ); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/handlers/shell.rs
codex-rs/core/src/tools/handlers/shell.rs
use async_trait::async_trait; use codex_protocol::models::ShellCommandToolCallParams; use codex_protocol::models::ShellToolCallParams; use std::sync::Arc; use crate::codex::TurnContext; use crate::exec::ExecParams; use crate::exec_env::create_env; use crate::function_tool::FunctionCallError; use crate::is_safe_command::is_known_safe_command; use crate::protocol::ExecCommandSource; use crate::shell::Shell; use crate::tools::context::ToolInvocation; use crate::tools::context::ToolOutput; use crate::tools::context::ToolPayload; use crate::tools::events::ToolEmitter; use crate::tools::events::ToolEventCtx; use crate::tools::handlers::apply_patch::intercept_apply_patch; use crate::tools::orchestrator::ToolOrchestrator; use crate::tools::registry::ToolHandler; use crate::tools::registry::ToolKind; use crate::tools::runtimes::shell::ShellRequest; use crate::tools::runtimes::shell::ShellRuntime; use crate::tools::sandboxing::ToolCtx; pub struct ShellHandler; pub struct ShellCommandHandler; impl ShellHandler { fn to_exec_params(params: ShellToolCallParams, turn_context: &TurnContext) -> ExecParams { ExecParams { command: params.command, cwd: turn_context.resolve_path(params.workdir.clone()), expiration: params.timeout_ms.into(), env: create_env(&turn_context.shell_environment_policy), sandbox_permissions: params.sandbox_permissions.unwrap_or_default(), justification: params.justification, arg0: None, } } } impl ShellCommandHandler { fn base_command(shell: &Shell, command: &str, login: Option<bool>) -> Vec<String> { let use_login_shell = login.unwrap_or(true); shell.derive_exec_args(command, use_login_shell) } fn to_exec_params( params: ShellCommandToolCallParams, session: &crate::codex::Session, turn_context: &TurnContext, ) -> ExecParams { let shell = session.user_shell(); let command = Self::base_command(shell.as_ref(), &params.command, params.login); ExecParams { command, cwd: turn_context.resolve_path(params.workdir.clone()), expiration: params.timeout_ms.into(), env: create_env(&turn_context.shell_environment_policy), sandbox_permissions: params.sandbox_permissions.unwrap_or_default(), justification: params.justification, arg0: None, } } } #[async_trait] impl ToolHandler for ShellHandler { fn kind(&self) -> ToolKind { ToolKind::Function } fn matches_kind(&self, payload: &ToolPayload) -> bool { matches!( payload, ToolPayload::Function { .. } | ToolPayload::LocalShell { .. } ) } async fn is_mutating(&self, invocation: &ToolInvocation) -> bool { match &invocation.payload { ToolPayload::Function { arguments } => { serde_json::from_str::<ShellToolCallParams>(arguments) .map(|params| !is_known_safe_command(&params.command)) .unwrap_or(true) } ToolPayload::LocalShell { params } => !is_known_safe_command(&params.command), _ => true, // unknown payloads => assume mutating } } async fn handle(&self, invocation: ToolInvocation) -> Result<ToolOutput, FunctionCallError> { let ToolInvocation { session, turn, tracker, call_id, tool_name, payload, } = invocation; match payload { ToolPayload::Function { arguments } => { let params: ShellToolCallParams = serde_json::from_str(&arguments).map_err(|e| { FunctionCallError::RespondToModel(format!( "failed to parse function arguments: {e:?}" )) })?; let exec_params = Self::to_exec_params(params, turn.as_ref()); Self::run_exec_like( tool_name.as_str(), exec_params, session, turn, tracker, call_id, false, ) .await } ToolPayload::LocalShell { params } => { let exec_params = Self::to_exec_params(params, turn.as_ref()); Self::run_exec_like( tool_name.as_str(), exec_params, session, turn, tracker, call_id, false, ) .await } _ => Err(FunctionCallError::RespondToModel(format!( "unsupported payload for shell handler: {tool_name}" ))), } } } #[async_trait] impl ToolHandler for ShellCommandHandler { fn kind(&self) -> ToolKind { ToolKind::Function } fn matches_kind(&self, payload: &ToolPayload) -> bool { matches!(payload, ToolPayload::Function { .. }) } async fn is_mutating(&self, invocation: &ToolInvocation) -> bool { let ToolPayload::Function { arguments } = &invocation.payload else { return true; }; serde_json::from_str::<ShellCommandToolCallParams>(arguments) .map(|params| { let shell = invocation.session.user_shell(); let command = Self::base_command(shell.as_ref(), &params.command, params.login); !is_known_safe_command(&command) }) .unwrap_or(true) } async fn handle(&self, invocation: ToolInvocation) -> Result<ToolOutput, FunctionCallError> { let ToolInvocation { session, turn, tracker, call_id, tool_name, payload, } = invocation; let ToolPayload::Function { arguments } = payload else { return Err(FunctionCallError::RespondToModel(format!( "unsupported payload for shell_command handler: {tool_name}" ))); }; let params: ShellCommandToolCallParams = serde_json::from_str(&arguments).map_err(|e| { FunctionCallError::RespondToModel(format!("failed to parse function arguments: {e:?}")) })?; let exec_params = Self::to_exec_params(params, session.as_ref(), turn.as_ref()); ShellHandler::run_exec_like( tool_name.as_str(), exec_params, session, turn, tracker, call_id, true, ) .await } } impl ShellHandler { async fn run_exec_like( tool_name: &str, exec_params: ExecParams, session: Arc<crate::codex::Session>, turn: Arc<TurnContext>, tracker: crate::tools::context::SharedTurnDiffTracker, call_id: String, freeform: bool, ) -> Result<ToolOutput, FunctionCallError> { // Approval policy guard for explicit escalation in non-OnRequest modes. if exec_params .sandbox_permissions .requires_escalated_permissions() && !matches!( turn.approval_policy, codex_protocol::protocol::AskForApproval::OnRequest ) { return Err(FunctionCallError::RespondToModel(format!( "approval policy is {policy:?}; reject command — you should not ask for escalated permissions if the approval policy is {policy:?}", policy = turn.approval_policy ))); } // Intercept apply_patch if present. if let Some(output) = intercept_apply_patch( &exec_params.command, &exec_params.cwd, exec_params.expiration.timeout_ms(), session.as_ref(), turn.as_ref(), Some(&tracker), &call_id, tool_name, ) .await? { return Ok(output); } let source = ExecCommandSource::Agent; let emitter = ToolEmitter::shell( exec_params.command.clone(), exec_params.cwd.clone(), source, freeform, ); let event_ctx = ToolEventCtx::new(session.as_ref(), turn.as_ref(), &call_id, None); emitter.begin(event_ctx).await; let features = session.features(); let exec_approval_requirement = session .services .exec_policy .create_exec_approval_requirement_for_command( &features, &exec_params.command, turn.approval_policy, &turn.sandbox_policy, exec_params.sandbox_permissions, ) .await; let req = ShellRequest { command: exec_params.command.clone(), cwd: exec_params.cwd.clone(), timeout_ms: exec_params.expiration.timeout_ms(), env: exec_params.env.clone(), sandbox_permissions: exec_params.sandbox_permissions, justification: exec_params.justification.clone(), exec_approval_requirement, }; let mut orchestrator = ToolOrchestrator::new(); let mut runtime = ShellRuntime::new(); let tool_ctx = ToolCtx { session: session.as_ref(), turn: turn.as_ref(), call_id: call_id.clone(), tool_name: tool_name.to_string(), }; let out = orchestrator .run(&mut runtime, &req, &tool_ctx, &turn, turn.approval_policy) .await; let event_ctx = ToolEventCtx::new(session.as_ref(), turn.as_ref(), &call_id, None); let content = emitter.finish(event_ctx, out).await?; Ok(ToolOutput::Function { content, content_items: None, success: Some(true), }) } } #[cfg(test)] mod tests { use std::path::PathBuf; use std::sync::Arc; use codex_protocol::models::ShellCommandToolCallParams; use pretty_assertions::assert_eq; use crate::codex::make_session_and_context; use crate::exec_env::create_env; use crate::is_safe_command::is_known_safe_command; use crate::powershell::try_find_powershell_executable_blocking; use crate::powershell::try_find_pwsh_executable_blocking; use crate::sandboxing::SandboxPermissions; use crate::shell::Shell; use crate::shell::ShellType; use crate::shell_snapshot::ShellSnapshot; use crate::tools::handlers::ShellCommandHandler; /// The logic for is_known_safe_command() has heuristics for known shells, /// so we must ensure the commands generated by [ShellCommandHandler] can be /// recognized as safe if the `command` is safe. #[test] fn commands_generated_by_shell_command_handler_can_be_matched_by_is_known_safe_command() { let bash_shell = Shell { shell_type: ShellType::Bash, shell_path: PathBuf::from("/bin/bash"), shell_snapshot: None, }; assert_safe(&bash_shell, "ls -la"); let zsh_shell = Shell { shell_type: ShellType::Zsh, shell_path: PathBuf::from("/bin/zsh"), shell_snapshot: None, }; assert_safe(&zsh_shell, "ls -la"); if let Some(path) = try_find_powershell_executable_blocking() { let powershell = Shell { shell_type: ShellType::PowerShell, shell_path: path.to_path_buf(), shell_snapshot: None, }; assert_safe(&powershell, "ls -Name"); } if let Some(path) = try_find_pwsh_executable_blocking() { let pwsh = Shell { shell_type: ShellType::PowerShell, shell_path: path.to_path_buf(), shell_snapshot: None, }; assert_safe(&pwsh, "ls -Name"); } } fn assert_safe(shell: &Shell, command: &str) { assert!(is_known_safe_command( &shell.derive_exec_args(command, /* use_login_shell */ true) )); assert!(is_known_safe_command( &shell.derive_exec_args(command, /* use_login_shell */ false) )); } #[tokio::test] async fn shell_command_handler_to_exec_params_uses_session_shell_and_turn_context() { let (session, turn_context) = make_session_and_context().await; let command = "echo hello".to_string(); let workdir = Some("subdir".to_string()); let login = None; let timeout_ms = Some(1234); let sandbox_permissions = SandboxPermissions::RequireEscalated; let justification = Some("because tests".to_string()); let expected_command = session.user_shell().derive_exec_args(&command, true); let expected_cwd = turn_context.resolve_path(workdir.clone()); let expected_env = create_env(&turn_context.shell_environment_policy); let params = ShellCommandToolCallParams { command, workdir, login, timeout_ms, sandbox_permissions: Some(sandbox_permissions), justification: justification.clone(), }; let exec_params = ShellCommandHandler::to_exec_params(params, &session, &turn_context); // ExecParams cannot derive Eq due to the CancellationToken field, so we manually compare the fields. assert_eq!(exec_params.command, expected_command); assert_eq!(exec_params.cwd, expected_cwd); assert_eq!(exec_params.env, expected_env); assert_eq!(exec_params.expiration.timeout_ms(), timeout_ms); assert_eq!(exec_params.sandbox_permissions, sandbox_permissions); assert_eq!(exec_params.justification, justification); assert_eq!(exec_params.arg0, None); } #[test] fn shell_command_handler_respects_explicit_login_flag() { let shell = Shell { shell_type: ShellType::Bash, shell_path: PathBuf::from("/bin/bash"), shell_snapshot: Some(Arc::new(ShellSnapshot { path: PathBuf::from("/tmp/snapshot.sh"), })), }; let login_command = ShellCommandHandler::base_command(&shell, "echo login shell", Some(true)); assert_eq!( login_command, shell.derive_exec_args("echo login shell", true) ); let non_login_command = ShellCommandHandler::base_command(&shell, "echo non login shell", Some(false)); assert_eq!( non_login_command, shell.derive_exec_args("echo non login shell", false) ); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/handlers/grep_files.rs
codex-rs/core/src/tools/handlers/grep_files.rs
use std::path::Path; use std::time::Duration; use async_trait::async_trait; use serde::Deserialize; use tokio::process::Command; use tokio::time::timeout; use crate::function_tool::FunctionCallError; use crate::tools::context::ToolInvocation; use crate::tools::context::ToolOutput; use crate::tools::context::ToolPayload; use crate::tools::registry::ToolHandler; use crate::tools::registry::ToolKind; pub struct GrepFilesHandler; const DEFAULT_LIMIT: usize = 100; const MAX_LIMIT: usize = 2000; const COMMAND_TIMEOUT: Duration = Duration::from_secs(30); fn default_limit() -> usize { DEFAULT_LIMIT } #[derive(Deserialize)] struct GrepFilesArgs { pattern: String, #[serde(default)] include: Option<String>, #[serde(default)] path: Option<String>, #[serde(default = "default_limit")] limit: usize, } #[async_trait] impl ToolHandler for GrepFilesHandler { fn kind(&self) -> ToolKind { ToolKind::Function } async fn handle(&self, invocation: ToolInvocation) -> Result<ToolOutput, FunctionCallError> { let ToolInvocation { payload, turn, .. } = invocation; let arguments = match payload { ToolPayload::Function { arguments } => arguments, _ => { return Err(FunctionCallError::RespondToModel( "grep_files handler received unsupported payload".to_string(), )); } }; let args: GrepFilesArgs = serde_json::from_str(&arguments).map_err(|err| { FunctionCallError::RespondToModel(format!( "failed to parse function arguments: {err:?}" )) })?; let pattern = args.pattern.trim(); if pattern.is_empty() { return Err(FunctionCallError::RespondToModel( "pattern must not be empty".to_string(), )); } if args.limit == 0 { return Err(FunctionCallError::RespondToModel( "limit must be greater than zero".to_string(), )); } let limit = args.limit.min(MAX_LIMIT); let search_path = turn.resolve_path(args.path.clone()); verify_path_exists(&search_path).await?; let include = args.include.as_deref().map(str::trim).and_then(|val| { if val.is_empty() { None } else { Some(val.to_string()) } }); let search_results = run_rg_search(pattern, include.as_deref(), &search_path, limit, &turn.cwd).await?; if search_results.is_empty() { Ok(ToolOutput::Function { content: "No matches found.".to_string(), content_items: None, success: Some(false), }) } else { Ok(ToolOutput::Function { content: search_results.join("\n"), content_items: None, success: Some(true), }) } } } async fn verify_path_exists(path: &Path) -> Result<(), FunctionCallError> { tokio::fs::metadata(path).await.map_err(|err| { FunctionCallError::RespondToModel(format!("unable to access `{}`: {err}", path.display())) })?; Ok(()) } async fn run_rg_search( pattern: &str, include: Option<&str>, search_path: &Path, limit: usize, cwd: &Path, ) -> Result<Vec<String>, FunctionCallError> { let mut command = Command::new("rg"); command .current_dir(cwd) .arg("--files-with-matches") .arg("--sortr=modified") .arg("--regexp") .arg(pattern) .arg("--no-messages"); if let Some(glob) = include { command.arg("--glob").arg(glob); } command.arg("--").arg(search_path); let output = timeout(COMMAND_TIMEOUT, command.output()) .await .map_err(|_| { FunctionCallError::RespondToModel("rg timed out after 30 seconds".to_string()) })? .map_err(|err| { FunctionCallError::RespondToModel(format!( "failed to launch rg: {err}. Ensure ripgrep is installed and on PATH." )) })?; match output.status.code() { Some(0) => Ok(parse_results(&output.stdout, limit)), Some(1) => Ok(Vec::new()), _ => { let stderr = String::from_utf8_lossy(&output.stderr); Err(FunctionCallError::RespondToModel(format!( "rg failed: {stderr}" ))) } } } fn parse_results(stdout: &[u8], limit: usize) -> Vec<String> { let mut results = Vec::new(); for line in stdout.split(|byte| *byte == b'\n') { if line.is_empty() { continue; } if let Ok(text) = std::str::from_utf8(line) { if text.is_empty() { continue; } results.push(text.to_string()); if results.len() == limit { break; } } } results } #[cfg(test)] mod tests { use super::*; use std::process::Command as StdCommand; use tempfile::tempdir; #[test] fn parses_basic_results() { let stdout = b"/tmp/file_a.rs\n/tmp/file_b.rs\n"; let parsed = parse_results(stdout, 10); assert_eq!( parsed, vec!["/tmp/file_a.rs".to_string(), "/tmp/file_b.rs".to_string()] ); } #[test] fn parse_truncates_after_limit() { let stdout = b"/tmp/file_a.rs\n/tmp/file_b.rs\n/tmp/file_c.rs\n"; let parsed = parse_results(stdout, 2); assert_eq!( parsed, vec!["/tmp/file_a.rs".to_string(), "/tmp/file_b.rs".to_string()] ); } #[tokio::test] async fn run_search_returns_results() -> anyhow::Result<()> { if !rg_available() { return Ok(()); } let temp = tempdir().expect("create temp dir"); let dir = temp.path(); std::fs::write(dir.join("match_one.txt"), "alpha beta gamma").unwrap(); std::fs::write(dir.join("match_two.txt"), "alpha delta").unwrap(); std::fs::write(dir.join("other.txt"), "omega").unwrap(); let results = run_rg_search("alpha", None, dir, 10, dir).await?; assert_eq!(results.len(), 2); assert!(results.iter().any(|path| path.ends_with("match_one.txt"))); assert!(results.iter().any(|path| path.ends_with("match_two.txt"))); Ok(()) } #[tokio::test] async fn run_search_with_glob_filter() -> anyhow::Result<()> { if !rg_available() { return Ok(()); } let temp = tempdir().expect("create temp dir"); let dir = temp.path(); std::fs::write(dir.join("match_one.rs"), "alpha beta gamma").unwrap(); std::fs::write(dir.join("match_two.txt"), "alpha delta").unwrap(); let results = run_rg_search("alpha", Some("*.rs"), dir, 10, dir).await?; assert_eq!(results.len(), 1); assert!(results.iter().all(|path| path.ends_with("match_one.rs"))); Ok(()) } #[tokio::test] async fn run_search_respects_limit() -> anyhow::Result<()> { if !rg_available() { return Ok(()); } let temp = tempdir().expect("create temp dir"); let dir = temp.path(); std::fs::write(dir.join("one.txt"), "alpha one").unwrap(); std::fs::write(dir.join("two.txt"), "alpha two").unwrap(); std::fs::write(dir.join("three.txt"), "alpha three").unwrap(); let results = run_rg_search("alpha", None, dir, 2, dir).await?; assert_eq!(results.len(), 2); Ok(()) } #[tokio::test] async fn run_search_handles_no_matches() -> anyhow::Result<()> { if !rg_available() { return Ok(()); } let temp = tempdir().expect("create temp dir"); let dir = temp.path(); std::fs::write(dir.join("one.txt"), "omega").unwrap(); let results = run_rg_search("alpha", None, dir, 5, dir).await?; assert!(results.is_empty()); Ok(()) } fn rg_available() -> bool { StdCommand::new("rg") .arg("--version") .output() .map(|output| output.status.success()) .unwrap_or(false) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/handlers/view_image.rs
codex-rs/core/src/tools/handlers/view_image.rs
use async_trait::async_trait; use serde::Deserialize; use tokio::fs; use crate::function_tool::FunctionCallError; use crate::protocol::EventMsg; use crate::protocol::ViewImageToolCallEvent; use crate::tools::context::ToolInvocation; use crate::tools::context::ToolOutput; use crate::tools::context::ToolPayload; use crate::tools::registry::ToolHandler; use crate::tools::registry::ToolKind; use codex_protocol::user_input::UserInput; pub struct ViewImageHandler; #[derive(Deserialize)] struct ViewImageArgs { path: String, } #[async_trait] impl ToolHandler for ViewImageHandler { fn kind(&self) -> ToolKind { ToolKind::Function } async fn handle(&self, invocation: ToolInvocation) -> Result<ToolOutput, FunctionCallError> { let ToolInvocation { session, turn, payload, call_id, .. } = invocation; let arguments = match payload { ToolPayload::Function { arguments } => arguments, _ => { return Err(FunctionCallError::RespondToModel( "view_image handler received unsupported payload".to_string(), )); } }; let args: ViewImageArgs = serde_json::from_str(&arguments).map_err(|e| { FunctionCallError::RespondToModel(format!("failed to parse function arguments: {e:?}")) })?; let abs_path = turn.resolve_path(Some(args.path)); let metadata = fs::metadata(&abs_path).await.map_err(|error| { FunctionCallError::RespondToModel(format!( "unable to locate image at `{}`: {error}", abs_path.display() )) })?; if !metadata.is_file() { return Err(FunctionCallError::RespondToModel(format!( "image path `{}` is not a file", abs_path.display() ))); } let event_path = abs_path.clone(); session .inject_input(vec![UserInput::LocalImage { path: abs_path }]) .await .map_err(|_| { FunctionCallError::RespondToModel( "unable to attach image (no active task)".to_string(), ) })?; session .send_event( turn.as_ref(), EventMsg::ViewImageToolCall(ViewImageToolCallEvent { call_id, path: event_path, }), ) .await; Ok(ToolOutput::Function { content: "attached local image path".to_string(), content_items: None, success: Some(true), }) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/handlers/plan.rs
codex-rs/core/src/tools/handlers/plan.rs
use crate::client_common::tools::ResponsesApiTool; use crate::client_common::tools::ToolSpec; use crate::codex::Session; use crate::codex::TurnContext; use crate::function_tool::FunctionCallError; use crate::tools::context::ToolInvocation; use crate::tools::context::ToolOutput; use crate::tools::context::ToolPayload; use crate::tools::registry::ToolHandler; use crate::tools::registry::ToolKind; use crate::tools::spec::JsonSchema; use async_trait::async_trait; use codex_protocol::plan_tool::UpdatePlanArgs; use codex_protocol::protocol::EventMsg; use std::collections::BTreeMap; use std::sync::LazyLock; pub struct PlanHandler; pub static PLAN_TOOL: LazyLock<ToolSpec> = LazyLock::new(|| { let mut plan_item_props = BTreeMap::new(); plan_item_props.insert("step".to_string(), JsonSchema::String { description: None }); plan_item_props.insert( "status".to_string(), JsonSchema::String { description: Some("One of: pending, in_progress, completed".to_string()), }, ); let plan_items_schema = JsonSchema::Array { description: Some("The list of steps".to_string()), items: Box::new(JsonSchema::Object { properties: plan_item_props, required: Some(vec!["step".to_string(), "status".to_string()]), additional_properties: Some(false.into()), }), }; let mut properties = BTreeMap::new(); properties.insert( "explanation".to_string(), JsonSchema::String { description: None }, ); properties.insert("plan".to_string(), plan_items_schema); ToolSpec::Function(ResponsesApiTool { name: "update_plan".to_string(), description: r#"Updates the task plan. Provide an optional explanation and a list of plan items, each with a step and status. At most one step can be in_progress at a time. "# .to_string(), strict: false, parameters: JsonSchema::Object { properties, required: Some(vec!["plan".to_string()]), additional_properties: Some(false.into()), }, }) }); #[async_trait] impl ToolHandler for PlanHandler { fn kind(&self) -> ToolKind { ToolKind::Function } async fn handle(&self, invocation: ToolInvocation) -> Result<ToolOutput, FunctionCallError> { let ToolInvocation { session, turn, call_id, payload, .. } = invocation; let arguments = match payload { ToolPayload::Function { arguments } => arguments, _ => { return Err(FunctionCallError::RespondToModel( "update_plan handler received unsupported payload".to_string(), )); } }; let content = handle_update_plan(session.as_ref(), turn.as_ref(), arguments, call_id).await?; Ok(ToolOutput::Function { content, content_items: None, success: Some(true), }) } } /// This function doesn't do anything useful. However, it gives the model a structured way to record its plan that clients can read and render. /// So it's the _inputs_ to this function that are useful to clients, not the outputs and neither are actually useful for the model other /// than forcing it to come up and document a plan (TBD how that affects performance). pub(crate) async fn handle_update_plan( session: &Session, turn_context: &TurnContext, arguments: String, _call_id: String, ) -> Result<String, FunctionCallError> { let args = parse_update_plan_arguments(&arguments)?; session .send_event(turn_context, EventMsg::PlanUpdate(args)) .await; Ok("Plan updated".to_string()) } fn parse_update_plan_arguments(arguments: &str) -> Result<UpdatePlanArgs, FunctionCallError> { serde_json::from_str::<UpdatePlanArgs>(arguments).map_err(|e| { FunctionCallError::RespondToModel(format!("failed to parse function arguments: {e}")) }) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/handlers/mod.rs
codex-rs/core/src/tools/handlers/mod.rs
pub mod apply_patch; mod grep_files; mod list_dir; mod mcp; mod mcp_resource; mod plan; mod read_file; mod shell; mod test_sync; mod unified_exec; mod view_image; pub use plan::PLAN_TOOL; pub use apply_patch::ApplyPatchHandler; pub use grep_files::GrepFilesHandler; pub use list_dir::ListDirHandler; pub use mcp::McpHandler; pub use mcp_resource::McpResourceHandler; pub use plan::PlanHandler; pub use read_file::ReadFileHandler; pub use shell::ShellCommandHandler; pub use shell::ShellHandler; pub use test_sync::TestSyncHandler; pub use unified_exec::UnifiedExecHandler; pub use view_image::ViewImageHandler;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/handlers/read_file.rs
codex-rs/core/src/tools/handlers/read_file.rs
use std::collections::VecDeque; use std::path::PathBuf; use async_trait::async_trait; use codex_utils_string::take_bytes_at_char_boundary; use serde::Deserialize; use crate::function_tool::FunctionCallError; use crate::tools::context::ToolInvocation; use crate::tools::context::ToolOutput; use crate::tools::context::ToolPayload; use crate::tools::registry::ToolHandler; use crate::tools::registry::ToolKind; pub struct ReadFileHandler; const MAX_LINE_LENGTH: usize = 500; const TAB_WIDTH: usize = 4; // TODO(jif) add support for block comments const COMMENT_PREFIXES: &[&str] = &["#", "//", "--"]; /// JSON arguments accepted by the `read_file` tool handler. #[derive(Deserialize)] struct ReadFileArgs { /// Absolute path to the file that will be read. file_path: String, /// 1-indexed line number to start reading from; defaults to 1. #[serde(default = "defaults::offset")] offset: usize, /// Maximum number of lines to return; defaults to 2000. #[serde(default = "defaults::limit")] limit: usize, /// Determines whether the handler reads a simple slice or indentation-aware block. #[serde(default)] mode: ReadMode, /// Optional indentation configuration used when `mode` is `Indentation`. #[serde(default)] indentation: Option<IndentationArgs>, } #[derive(Deserialize)] #[serde(rename_all = "snake_case")] enum ReadMode { Slice, Indentation, } /// Additional configuration for indentation-aware reads. #[derive(Deserialize, Clone)] struct IndentationArgs { /// Optional explicit anchor line; defaults to `offset` when omitted. #[serde(default)] anchor_line: Option<usize>, /// Maximum indentation depth to collect; `0` means unlimited. #[serde(default = "defaults::max_levels")] max_levels: usize, /// Whether to include sibling blocks at the same indentation level. #[serde(default = "defaults::include_siblings")] include_siblings: bool, /// Whether to include header lines above the anchor block. This made on a best effort basis. #[serde(default = "defaults::include_header")] include_header: bool, /// Optional hard cap on returned lines; defaults to the global `limit`. #[serde(default)] max_lines: Option<usize>, } #[derive(Clone, Debug)] struct LineRecord { number: usize, raw: String, display: String, indent: usize, } impl LineRecord { fn trimmed(&self) -> &str { self.raw.trim_start() } fn is_blank(&self) -> bool { self.trimmed().is_empty() } fn is_comment(&self) -> bool { COMMENT_PREFIXES .iter() .any(|prefix| self.raw.trim().starts_with(prefix)) } } #[async_trait] impl ToolHandler for ReadFileHandler { fn kind(&self) -> ToolKind { ToolKind::Function } async fn handle(&self, invocation: ToolInvocation) -> Result<ToolOutput, FunctionCallError> { let ToolInvocation { payload, .. } = invocation; let arguments = match payload { ToolPayload::Function { arguments } => arguments, _ => { return Err(FunctionCallError::RespondToModel( "read_file handler received unsupported payload".to_string(), )); } }; let args: ReadFileArgs = serde_json::from_str(&arguments).map_err(|err| { FunctionCallError::RespondToModel(format!( "failed to parse function arguments: {err:?}" )) })?; let ReadFileArgs { file_path, offset, limit, mode, indentation, } = args; if offset == 0 { return Err(FunctionCallError::RespondToModel( "offset must be a 1-indexed line number".to_string(), )); } if limit == 0 { return Err(FunctionCallError::RespondToModel( "limit must be greater than zero".to_string(), )); } let path = PathBuf::from(&file_path); if !path.is_absolute() { return Err(FunctionCallError::RespondToModel( "file_path must be an absolute path".to_string(), )); } let collected = match mode { ReadMode::Slice => slice::read(&path, offset, limit).await?, ReadMode::Indentation => { let indentation = indentation.unwrap_or_default(); indentation::read_block(&path, offset, limit, indentation).await? } }; Ok(ToolOutput::Function { content: collected.join("\n"), content_items: None, success: Some(true), }) } } mod slice { use crate::function_tool::FunctionCallError; use crate::tools::handlers::read_file::format_line; use std::path::Path; use tokio::fs::File; use tokio::io::AsyncBufReadExt; use tokio::io::BufReader; pub async fn read( path: &Path, offset: usize, limit: usize, ) -> Result<Vec<String>, FunctionCallError> { let file = File::open(path).await.map_err(|err| { FunctionCallError::RespondToModel(format!("failed to read file: {err}")) })?; let mut reader = BufReader::new(file); let mut collected = Vec::new(); let mut seen = 0usize; let mut buffer = Vec::new(); loop { buffer.clear(); let bytes_read = reader.read_until(b'\n', &mut buffer).await.map_err(|err| { FunctionCallError::RespondToModel(format!("failed to read file: {err}")) })?; if bytes_read == 0 { break; } if buffer.last() == Some(&b'\n') { buffer.pop(); if buffer.last() == Some(&b'\r') { buffer.pop(); } } seen += 1; if seen < offset { continue; } if collected.len() == limit { break; } let formatted = format_line(&buffer); collected.push(format!("L{seen}: {formatted}")); if collected.len() == limit { break; } } if seen < offset { return Err(FunctionCallError::RespondToModel( "offset exceeds file length".to_string(), )); } Ok(collected) } } mod indentation { use crate::function_tool::FunctionCallError; use crate::tools::handlers::read_file::IndentationArgs; use crate::tools::handlers::read_file::LineRecord; use crate::tools::handlers::read_file::TAB_WIDTH; use crate::tools::handlers::read_file::format_line; use crate::tools::handlers::read_file::trim_empty_lines; use std::collections::VecDeque; use std::path::Path; use tokio::fs::File; use tokio::io::AsyncBufReadExt; use tokio::io::BufReader; pub async fn read_block( path: &Path, offset: usize, limit: usize, options: IndentationArgs, ) -> Result<Vec<String>, FunctionCallError> { let anchor_line = options.anchor_line.unwrap_or(offset); if anchor_line == 0 { return Err(FunctionCallError::RespondToModel( "anchor_line must be a 1-indexed line number".to_string(), )); } let guard_limit = options.max_lines.unwrap_or(limit); if guard_limit == 0 { return Err(FunctionCallError::RespondToModel( "max_lines must be greater than zero".to_string(), )); } let collected = collect_file_lines(path).await?; if collected.is_empty() || anchor_line > collected.len() { return Err(FunctionCallError::RespondToModel( "anchor_line exceeds file length".to_string(), )); } let anchor_index = anchor_line - 1; let effective_indents = compute_effective_indents(&collected); let anchor_indent = effective_indents[anchor_index]; // Compute the min indent let min_indent = if options.max_levels == 0 { 0 } else { anchor_indent.saturating_sub(options.max_levels * TAB_WIDTH) }; // Cap requested lines by guard_limit and file length let final_limit = limit.min(guard_limit).min(collected.len()); if final_limit == 1 { return Ok(vec![format!( "L{}: {}", collected[anchor_index].number, collected[anchor_index].display )]); } // Cursors let mut i: isize = anchor_index as isize - 1; // up (inclusive) let mut j: usize = anchor_index + 1; // down (inclusive) let mut i_counter_min_indent = 0; let mut j_counter_min_indent = 0; let mut out = VecDeque::with_capacity(limit); out.push_back(&collected[anchor_index]); while out.len() < final_limit { let mut progressed = 0; // Up. if i >= 0 { let iu = i as usize; if effective_indents[iu] >= min_indent { out.push_front(&collected[iu]); progressed += 1; i -= 1; // We do not include the siblings (not applied to comments). if effective_indents[iu] == min_indent && !options.include_siblings { let allow_header_comment = options.include_header && collected[iu].is_comment(); let can_take_line = allow_header_comment || i_counter_min_indent == 0; if can_take_line { i_counter_min_indent += 1; } else { // This line shouldn't have been taken. out.pop_front(); progressed -= 1; i = -1; // consider using Option<usize> or a control flag instead of a sentinel } } // Short-cut. if out.len() >= final_limit { break; } } else { // Stop moving up. i = -1; } } // Down. if j < collected.len() { let ju = j; if effective_indents[ju] >= min_indent { out.push_back(&collected[ju]); progressed += 1; j += 1; // We do not include the siblings (applied to comments). if effective_indents[ju] == min_indent && !options.include_siblings { if j_counter_min_indent > 0 { // This line shouldn't have been taken. out.pop_back(); progressed -= 1; j = collected.len(); } j_counter_min_indent += 1; } } else { // Stop moving down. j = collected.len(); } } if progressed == 0 { break; } } // Trim empty lines trim_empty_lines(&mut out); Ok(out .into_iter() .map(|record| format!("L{}: {}", record.number, record.display)) .collect()) } async fn collect_file_lines(path: &Path) -> Result<Vec<LineRecord>, FunctionCallError> { let file = File::open(path).await.map_err(|err| { FunctionCallError::RespondToModel(format!("failed to read file: {err}")) })?; let mut reader = BufReader::new(file); let mut buffer = Vec::new(); let mut lines = Vec::new(); let mut number = 0usize; loop { buffer.clear(); let bytes_read = reader.read_until(b'\n', &mut buffer).await.map_err(|err| { FunctionCallError::RespondToModel(format!("failed to read file: {err}")) })?; if bytes_read == 0 { break; } if buffer.last() == Some(&b'\n') { buffer.pop(); if buffer.last() == Some(&b'\r') { buffer.pop(); } } number += 1; let raw = String::from_utf8_lossy(&buffer).into_owned(); let indent = measure_indent(&raw); let display = format_line(&buffer); lines.push(LineRecord { number, raw, display, indent, }); } Ok(lines) } fn compute_effective_indents(records: &[LineRecord]) -> Vec<usize> { let mut effective = Vec::with_capacity(records.len()); let mut previous_indent = 0usize; for record in records { if record.is_blank() { effective.push(previous_indent); } else { previous_indent = record.indent; effective.push(previous_indent); } } effective } fn measure_indent(line: &str) -> usize { line.chars() .take_while(|c| matches!(c, ' ' | '\t')) .map(|c| if c == '\t' { TAB_WIDTH } else { 1 }) .sum() } } fn format_line(bytes: &[u8]) -> String { let decoded = String::from_utf8_lossy(bytes); if decoded.len() > MAX_LINE_LENGTH { take_bytes_at_char_boundary(&decoded, MAX_LINE_LENGTH).to_string() } else { decoded.into_owned() } } fn trim_empty_lines(out: &mut VecDeque<&LineRecord>) { while matches!(out.front(), Some(line) if line.raw.trim().is_empty()) { out.pop_front(); } while matches!(out.back(), Some(line) if line.raw.trim().is_empty()) { out.pop_back(); } } mod defaults { use super::*; impl Default for IndentationArgs { fn default() -> Self { Self { anchor_line: None, max_levels: max_levels(), include_siblings: include_siblings(), include_header: include_header(), max_lines: None, } } } impl Default for ReadMode { fn default() -> Self { Self::Slice } } pub fn offset() -> usize { 1 } pub fn limit() -> usize { 2000 } pub fn max_levels() -> usize { 0 } pub fn include_siblings() -> bool { false } pub fn include_header() -> bool { true } } #[cfg(test)] mod tests { use super::indentation::read_block; use super::slice::read; use super::*; use pretty_assertions::assert_eq; use tempfile::NamedTempFile; #[tokio::test] async fn reads_requested_range() -> anyhow::Result<()> { let mut temp = NamedTempFile::new()?; use std::io::Write as _; write!( temp, "alpha beta gamma " )?; let lines = read(temp.path(), 2, 2).await?; assert_eq!(lines, vec!["L2: beta".to_string(), "L3: gamma".to_string()]); Ok(()) } #[tokio::test] async fn errors_when_offset_exceeds_length() -> anyhow::Result<()> { let mut temp = NamedTempFile::new()?; use std::io::Write as _; writeln!(temp, "only")?; let err = read(temp.path(), 3, 1) .await .expect_err("offset exceeds length"); assert_eq!( err, FunctionCallError::RespondToModel("offset exceeds file length".to_string()) ); Ok(()) } #[tokio::test] async fn reads_non_utf8_lines() -> anyhow::Result<()> { let mut temp = NamedTempFile::new()?; use std::io::Write as _; temp.as_file_mut().write_all(b"\xff\xfe\nplain\n")?; let lines = read(temp.path(), 1, 2).await?; let expected_first = format!("L1: {}{}", '\u{FFFD}', '\u{FFFD}'); assert_eq!(lines, vec![expected_first, "L2: plain".to_string()]); Ok(()) } #[tokio::test] async fn trims_crlf_endings() -> anyhow::Result<()> { let mut temp = NamedTempFile::new()?; use std::io::Write as _; write!(temp, "one\r\ntwo\r\n")?; let lines = read(temp.path(), 1, 2).await?; assert_eq!(lines, vec!["L1: one".to_string(), "L2: two".to_string()]); Ok(()) } #[tokio::test] async fn respects_limit_even_with_more_lines() -> anyhow::Result<()> { let mut temp = NamedTempFile::new()?; use std::io::Write as _; write!( temp, "first second third " )?; let lines = read(temp.path(), 1, 2).await?; assert_eq!( lines, vec!["L1: first".to_string(), "L2: second".to_string()] ); Ok(()) } #[tokio::test] async fn truncates_lines_longer_than_max_length() -> anyhow::Result<()> { let mut temp = NamedTempFile::new()?; use std::io::Write as _; let long_line = "x".repeat(MAX_LINE_LENGTH + 50); writeln!(temp, "{long_line}")?; let lines = read(temp.path(), 1, 1).await?; let expected = "x".repeat(MAX_LINE_LENGTH); assert_eq!(lines, vec![format!("L1: {expected}")]); Ok(()) } #[tokio::test] async fn indentation_mode_captures_block() -> anyhow::Result<()> { let mut temp = NamedTempFile::new()?; use std::io::Write as _; write!( temp, "fn outer() {{ if cond {{ inner(); }} tail(); }} " )?; let options = IndentationArgs { anchor_line: Some(3), include_siblings: false, max_levels: 1, ..Default::default() }; let lines = read_block(temp.path(), 3, 10, options).await?; assert_eq!( lines, vec![ "L2: if cond {".to_string(), "L3: inner();".to_string(), "L4: }".to_string() ] ); Ok(()) } #[tokio::test] async fn indentation_mode_expands_parents() -> anyhow::Result<()> { let mut temp = NamedTempFile::new()?; use std::io::Write as _; write!( temp, "mod root {{ fn outer() {{ if cond {{ inner(); }} }} }} " )?; let mut options = IndentationArgs { anchor_line: Some(4), max_levels: 2, ..Default::default() }; let lines = read_block(temp.path(), 4, 50, options.clone()).await?; assert_eq!( lines, vec![ "L2: fn outer() {".to_string(), "L3: if cond {".to_string(), "L4: inner();".to_string(), "L5: }".to_string(), "L6: }".to_string(), ] ); options.max_levels = 3; let expanded = read_block(temp.path(), 4, 50, options).await?; assert_eq!( expanded, vec![ "L1: mod root {".to_string(), "L2: fn outer() {".to_string(), "L3: if cond {".to_string(), "L4: inner();".to_string(), "L5: }".to_string(), "L6: }".to_string(), "L7: }".to_string(), ] ); Ok(()) } #[tokio::test] async fn indentation_mode_respects_sibling_flag() -> anyhow::Result<()> { let mut temp = NamedTempFile::new()?; use std::io::Write as _; write!( temp, "fn wrapper() {{ if first {{ do_first(); }} if second {{ do_second(); }} }} " )?; let mut options = IndentationArgs { anchor_line: Some(3), include_siblings: false, max_levels: 1, ..Default::default() }; let lines = read_block(temp.path(), 3, 50, options.clone()).await?; assert_eq!( lines, vec![ "L2: if first {".to_string(), "L3: do_first();".to_string(), "L4: }".to_string(), ] ); options.include_siblings = true; let with_siblings = read_block(temp.path(), 3, 50, options).await?; assert_eq!( with_siblings, vec![ "L2: if first {".to_string(), "L3: do_first();".to_string(), "L4: }".to_string(), "L5: if second {".to_string(), "L6: do_second();".to_string(), "L7: }".to_string(), ] ); Ok(()) } #[tokio::test] async fn indentation_mode_handles_python_sample() -> anyhow::Result<()> { let mut temp = NamedTempFile::new()?; use std::io::Write as _; write!( temp, "class Foo: def __init__(self, size): self.size = size def double(self, value): if value is None: return 0 result = value * self.size return result class Bar: def compute(self): helper = Foo(2) return helper.double(5) " )?; let options = IndentationArgs { anchor_line: Some(7), include_siblings: true, max_levels: 1, ..Default::default() }; let lines = read_block(temp.path(), 1, 200, options).await?; assert_eq!( lines, vec![ "L2: def __init__(self, size):".to_string(), "L3: self.size = size".to_string(), "L4: def double(self, value):".to_string(), "L5: if value is None:".to_string(), "L6: return 0".to_string(), "L7: result = value * self.size".to_string(), "L8: return result".to_string(), ] ); Ok(()) } #[tokio::test] #[ignore] async fn indentation_mode_handles_javascript_sample() -> anyhow::Result<()> { let mut temp = NamedTempFile::new()?; use std::io::Write as _; write!( temp, "export function makeThing() {{ const cache = new Map(); function ensure(key) {{ if (!cache.has(key)) {{ cache.set(key, []); }} return cache.get(key); }} const handlers = {{ init() {{ console.log(\"init\"); }}, run() {{ if (Math.random() > 0.5) {{ return \"heads\"; }} return \"tails\"; }}, }}; return {{ cache, handlers }}; }} export function other() {{ return makeThing(); }} " )?; let options = IndentationArgs { anchor_line: Some(15), max_levels: 1, ..Default::default() }; let lines = read_block(temp.path(), 15, 200, options).await?; assert_eq!( lines, vec![ "L10: init() {".to_string(), "L11: console.log(\"init\");".to_string(), "L12: },".to_string(), "L13: run() {".to_string(), "L14: if (Math.random() > 0.5) {".to_string(), "L15: return \"heads\";".to_string(), "L16: }".to_string(), "L17: return \"tails\";".to_string(), "L18: },".to_string(), ] ); Ok(()) } fn write_cpp_sample() -> anyhow::Result<NamedTempFile> { let mut temp = NamedTempFile::new()?; use std::io::Write as _; write!( temp, "#include <vector> #include <string> namespace sample {{ class Runner {{ public: void setup() {{ if (enabled_) {{ init(); }} }} // Run the code int run() const {{ switch (mode_) {{ case Mode::Fast: return fast(); case Mode::Slow: return slow(); default: return fallback(); }} }} private: bool enabled_ = false; Mode mode_ = Mode::Fast; int fast() const {{ return 1; }} }}; }} // namespace sample " )?; Ok(temp) } #[tokio::test] async fn indentation_mode_handles_cpp_sample_shallow() -> anyhow::Result<()> { let temp = write_cpp_sample()?; let options = IndentationArgs { include_siblings: false, anchor_line: Some(18), max_levels: 1, ..Default::default() }; let lines = read_block(temp.path(), 18, 200, options).await?; assert_eq!( lines, vec![ "L15: switch (mode_) {".to_string(), "L16: case Mode::Fast:".to_string(), "L17: return fast();".to_string(), "L18: case Mode::Slow:".to_string(), "L19: return slow();".to_string(), "L20: default:".to_string(), "L21: return fallback();".to_string(), "L22: }".to_string(), ] ); Ok(()) } #[tokio::test] async fn indentation_mode_handles_cpp_sample() -> anyhow::Result<()> { let temp = write_cpp_sample()?; let options = IndentationArgs { include_siblings: false, anchor_line: Some(18), max_levels: 2, ..Default::default() }; let lines = read_block(temp.path(), 18, 200, options).await?; assert_eq!( lines, vec![ "L13: // Run the code".to_string(), "L14: int run() const {".to_string(), "L15: switch (mode_) {".to_string(), "L16: case Mode::Fast:".to_string(), "L17: return fast();".to_string(), "L18: case Mode::Slow:".to_string(), "L19: return slow();".to_string(), "L20: default:".to_string(), "L21: return fallback();".to_string(), "L22: }".to_string(), "L23: }".to_string(), ] ); Ok(()) } #[tokio::test] async fn indentation_mode_handles_cpp_sample_no_headers() -> anyhow::Result<()> { let temp = write_cpp_sample()?; let options = IndentationArgs { include_siblings: false, include_header: false, anchor_line: Some(18), max_levels: 2, ..Default::default() }; let lines = read_block(temp.path(), 18, 200, options).await?; assert_eq!( lines, vec![ "L14: int run() const {".to_string(), "L15: switch (mode_) {".to_string(), "L16: case Mode::Fast:".to_string(), "L17: return fast();".to_string(), "L18: case Mode::Slow:".to_string(), "L19: return slow();".to_string(), "L20: default:".to_string(), "L21: return fallback();".to_string(), "L22: }".to_string(), "L23: }".to_string(), ] ); Ok(()) } #[tokio::test] async fn indentation_mode_handles_cpp_sample_siblings() -> anyhow::Result<()> { let temp = write_cpp_sample()?; let options = IndentationArgs { include_siblings: true, include_header: false, anchor_line: Some(18), max_levels: 2, ..Default::default() }; let lines = read_block(temp.path(), 18, 200, options).await?; assert_eq!( lines, vec![ "L7: void setup() {".to_string(), "L8: if (enabled_) {".to_string(), "L9: init();".to_string(), "L10: }".to_string(), "L11: }".to_string(), "L12: ".to_string(), "L13: // Run the code".to_string(), "L14: int run() const {".to_string(), "L15: switch (mode_) {".to_string(), "L16: case Mode::Fast:".to_string(), "L17: return fast();".to_string(), "L18: case Mode::Slow:".to_string(), "L19: return slow();".to_string(), "L20: default:".to_string(), "L21: return fallback();".to_string(), "L22: }".to_string(), "L23: }".to_string(), ] ); Ok(()) } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/tools/handlers/test_sync.rs
codex-rs/core/src/tools/handlers/test_sync.rs
use std::collections::HashMap; use std::collections::hash_map::Entry; use std::sync::Arc; use std::sync::OnceLock; use std::time::Duration; use async_trait::async_trait; use serde::Deserialize; use tokio::sync::Barrier; use tokio::time::sleep; use crate::function_tool::FunctionCallError; use crate::tools::context::ToolInvocation; use crate::tools::context::ToolOutput; use crate::tools::context::ToolPayload; use crate::tools::registry::ToolHandler; use crate::tools::registry::ToolKind; pub struct TestSyncHandler; const DEFAULT_TIMEOUT_MS: u64 = 1_000; static BARRIERS: OnceLock<tokio::sync::Mutex<HashMap<String, BarrierState>>> = OnceLock::new(); struct BarrierState { barrier: Arc<Barrier>, participants: usize, } #[derive(Debug, Deserialize)] struct BarrierArgs { id: String, participants: usize, #[serde(default = "default_timeout_ms")] timeout_ms: u64, } #[derive(Debug, Deserialize)] struct TestSyncArgs { #[serde(default)] sleep_before_ms: Option<u64>, #[serde(default)] sleep_after_ms: Option<u64>, #[serde(default)] barrier: Option<BarrierArgs>, } fn default_timeout_ms() -> u64 { DEFAULT_TIMEOUT_MS } fn barrier_map() -> &'static tokio::sync::Mutex<HashMap<String, BarrierState>> { BARRIERS.get_or_init(|| tokio::sync::Mutex::new(HashMap::new())) } #[async_trait] impl ToolHandler for TestSyncHandler { fn kind(&self) -> ToolKind { ToolKind::Function } async fn handle(&self, invocation: ToolInvocation) -> Result<ToolOutput, FunctionCallError> { let ToolInvocation { payload, .. } = invocation; let arguments = match payload { ToolPayload::Function { arguments } => arguments, _ => { return Err(FunctionCallError::RespondToModel( "test_sync_tool handler received unsupported payload".to_string(), )); } }; let args: TestSyncArgs = serde_json::from_str(&arguments).map_err(|err| { FunctionCallError::RespondToModel(format!( "failed to parse function arguments: {err:?}" )) })?; if let Some(delay) = args.sleep_before_ms && delay > 0 { sleep(Duration::from_millis(delay)).await; } if let Some(barrier) = args.barrier { wait_on_barrier(barrier).await?; } if let Some(delay) = args.sleep_after_ms && delay > 0 { sleep(Duration::from_millis(delay)).await; } Ok(ToolOutput::Function { content: "ok".to_string(), content_items: None, success: Some(true), }) } } async fn wait_on_barrier(args: BarrierArgs) -> Result<(), FunctionCallError> { if args.participants == 0 { return Err(FunctionCallError::RespondToModel( "barrier participants must be greater than zero".to_string(), )); } if args.timeout_ms == 0 { return Err(FunctionCallError::RespondToModel( "barrier timeout must be greater than zero".to_string(), )); } let barrier_id = args.id.clone(); let barrier = { let mut map = barrier_map().lock().await; match map.entry(barrier_id.clone()) { Entry::Occupied(entry) => { let state = entry.get(); if state.participants != args.participants { let existing = state.participants; return Err(FunctionCallError::RespondToModel(format!( "barrier {barrier_id} already registered with {existing} participants" ))); } state.barrier.clone() } Entry::Vacant(entry) => { let barrier = Arc::new(Barrier::new(args.participants)); entry.insert(BarrierState { barrier: barrier.clone(), participants: args.participants, }); barrier } } }; let timeout = Duration::from_millis(args.timeout_ms); let wait_result = tokio::time::timeout(timeout, barrier.wait()) .await .map_err(|_| { FunctionCallError::RespondToModel("test_sync_tool barrier wait timed out".to_string()) })?; if wait_result.is_leader() { let mut map = barrier_map().lock().await; if let Some(state) = map.get(&barrier_id) && Arc::ptr_eq(&state.barrier, &barrier) { map.remove(&barrier_id); } } Ok(()) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/context_manager/history_tests.rs
codex-rs/core/src/context_manager/history_tests.rs
use super::*; use crate::truncate; use crate::truncate::TruncationPolicy; use codex_git::GhostCommit; use codex_protocol::models::ContentItem; use codex_protocol::models::FunctionCallOutputPayload; use codex_protocol::models::LocalShellAction; use codex_protocol::models::LocalShellExecAction; use codex_protocol::models::LocalShellStatus; use codex_protocol::models::ReasoningItemContent; use codex_protocol::models::ReasoningItemReasoningSummary; use pretty_assertions::assert_eq; use regex_lite::Regex; const EXEC_FORMAT_MAX_BYTES: usize = 10_000; const EXEC_FORMAT_MAX_TOKENS: usize = 2_500; fn assistant_msg(text: &str) -> ResponseItem { ResponseItem::Message { id: None, role: "assistant".to_string(), content: vec![ContentItem::OutputText { text: text.to_string(), }], } } fn create_history_with_items(items: Vec<ResponseItem>) -> ContextManager { let mut h = ContextManager::new(); // Use a generous but fixed token budget; tests only rely on truncation // behavior, not on a specific model's token limit. h.record_items(items.iter(), TruncationPolicy::Tokens(10_000)); h } fn user_msg(text: &str) -> ResponseItem { ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::OutputText { text: text.to_string(), }], } } fn reasoning_msg(text: &str) -> ResponseItem { ResponseItem::Reasoning { id: String::new(), summary: vec![ReasoningItemReasoningSummary::SummaryText { text: "summary".to_string(), }], content: Some(vec![ReasoningItemContent::ReasoningText { text: text.to_string(), }]), encrypted_content: None, } } fn reasoning_with_encrypted_content(len: usize) -> ResponseItem { ResponseItem::Reasoning { id: String::new(), summary: vec![ReasoningItemReasoningSummary::SummaryText { text: "summary".to_string(), }], content: None, encrypted_content: Some("a".repeat(len)), } } fn truncate_exec_output(content: &str) -> String { truncate::truncate_text(content, TruncationPolicy::Tokens(EXEC_FORMAT_MAX_TOKENS)) } #[test] fn filters_non_api_messages() { let mut h = ContextManager::default(); let policy = TruncationPolicy::Tokens(10_000); // System message is not API messages; Other is ignored. let system = ResponseItem::Message { id: None, role: "system".to_string(), content: vec![ContentItem::OutputText { text: "ignored".to_string(), }], }; let reasoning = reasoning_msg("thinking..."); h.record_items([&system, &reasoning, &ResponseItem::Other], policy); // User and assistant should be retained. let u = user_msg("hi"); let a = assistant_msg("hello"); h.record_items([&u, &a], policy); let items = h.contents(); assert_eq!( items, vec![ ResponseItem::Reasoning { id: String::new(), summary: vec![ReasoningItemReasoningSummary::SummaryText { text: "summary".to_string(), }], content: Some(vec![ReasoningItemContent::ReasoningText { text: "thinking...".to_string(), }]), encrypted_content: None, }, ResponseItem::Message { id: None, role: "user".to_string(), content: vec![ContentItem::OutputText { text: "hi".to_string() }] }, ResponseItem::Message { id: None, role: "assistant".to_string(), content: vec![ContentItem::OutputText { text: "hello".to_string() }] } ] ); } #[test] fn non_last_reasoning_tokens_return_zero_when_no_user_messages() { let history = create_history_with_items(vec![reasoning_with_encrypted_content(800)]); assert_eq!(history.get_non_last_reasoning_items_tokens(), 0); } #[test] fn non_last_reasoning_tokens_ignore_entries_after_last_user() { let history = create_history_with_items(vec![ reasoning_with_encrypted_content(900), user_msg("first"), reasoning_with_encrypted_content(1_000), user_msg("second"), reasoning_with_encrypted_content(2_000), ]); // first: (900 * 0.75 - 650) / 4 = 6.25 tokens // second: (1000 * 0.75 - 650) / 4 = 25 tokens // first + second = 62.5 assert_eq!(history.get_non_last_reasoning_items_tokens(), 32); } #[test] fn get_history_for_prompt_drops_ghost_commits() { let items = vec![ResponseItem::GhostSnapshot { ghost_commit: GhostCommit::new("ghost-1".to_string(), None, Vec::new(), Vec::new()), }]; let mut history = create_history_with_items(items); let filtered = history.get_history_for_prompt(); assert_eq!(filtered, vec![]); } #[test] fn remove_first_item_removes_matching_output_for_function_call() { let items = vec![ ResponseItem::FunctionCall { id: None, name: "do_it".to_string(), arguments: "{}".to_string(), call_id: "call-1".to_string(), }, ResponseItem::FunctionCallOutput { call_id: "call-1".to_string(), output: FunctionCallOutputPayload { content: "ok".to_string(), ..Default::default() }, }, ]; let mut h = create_history_with_items(items); h.remove_first_item(); assert_eq!(h.contents(), vec![]); } #[test] fn remove_first_item_removes_matching_call_for_output() { let items = vec![ ResponseItem::FunctionCallOutput { call_id: "call-2".to_string(), output: FunctionCallOutputPayload { content: "ok".to_string(), ..Default::default() }, }, ResponseItem::FunctionCall { id: None, name: "do_it".to_string(), arguments: "{}".to_string(), call_id: "call-2".to_string(), }, ]; let mut h = create_history_with_items(items); h.remove_first_item(); assert_eq!(h.contents(), vec![]); } #[test] fn remove_first_item_handles_local_shell_pair() { let items = vec![ ResponseItem::LocalShellCall { id: None, call_id: Some("call-3".to_string()), status: LocalShellStatus::Completed, action: LocalShellAction::Exec(LocalShellExecAction { command: vec!["echo".to_string(), "hi".to_string()], timeout_ms: None, working_directory: None, env: None, user: None, }), }, ResponseItem::FunctionCallOutput { call_id: "call-3".to_string(), output: FunctionCallOutputPayload { content: "ok".to_string(), ..Default::default() }, }, ]; let mut h = create_history_with_items(items); h.remove_first_item(); assert_eq!(h.contents(), vec![]); } #[test] fn remove_first_item_handles_custom_tool_pair() { let items = vec![ ResponseItem::CustomToolCall { id: None, status: None, call_id: "tool-1".to_string(), name: "my_tool".to_string(), input: "{}".to_string(), }, ResponseItem::CustomToolCallOutput { call_id: "tool-1".to_string(), output: "ok".to_string(), }, ]; let mut h = create_history_with_items(items); h.remove_first_item(); assert_eq!(h.contents(), vec![]); } #[test] fn normalization_retains_local_shell_outputs() { let items = vec![ ResponseItem::LocalShellCall { id: None, call_id: Some("shell-1".to_string()), status: LocalShellStatus::Completed, action: LocalShellAction::Exec(LocalShellExecAction { command: vec!["echo".to_string(), "hi".to_string()], timeout_ms: None, working_directory: None, env: None, user: None, }), }, ResponseItem::FunctionCallOutput { call_id: "shell-1".to_string(), output: FunctionCallOutputPayload { content: "Total output lines: 1\n\nok".to_string(), ..Default::default() }, }, ]; let mut history = create_history_with_items(items.clone()); let normalized = history.get_history(); assert_eq!(normalized, items); } #[test] fn record_items_truncates_function_call_output_content() { let mut history = ContextManager::new(); // Any reasonably small token budget works; the test only cares that // truncation happens and the marker is present. let policy = TruncationPolicy::Tokens(1_000); let long_line = "a very long line to trigger truncation\n"; let long_output = long_line.repeat(2_500); let item = ResponseItem::FunctionCallOutput { call_id: "call-100".to_string(), output: FunctionCallOutputPayload { content: long_output.clone(), success: Some(true), ..Default::default() }, }; history.record_items([&item], policy); assert_eq!(history.items.len(), 1); match &history.items[0] { ResponseItem::FunctionCallOutput { output, .. } => { assert_ne!(output.content, long_output); assert!( output.content.contains("tokens truncated"), "expected token-based truncation marker, got {}", output.content ); assert!( output.content.contains("tokens truncated"), "expected truncation marker, got {}", output.content ); } other => panic!("unexpected history item: {other:?}"), } } #[test] fn record_items_truncates_custom_tool_call_output_content() { let mut history = ContextManager::new(); let policy = TruncationPolicy::Tokens(1_000); let line = "custom output that is very long\n"; let long_output = line.repeat(2_500); let item = ResponseItem::CustomToolCallOutput { call_id: "tool-200".to_string(), output: long_output.clone(), }; history.record_items([&item], policy); assert_eq!(history.items.len(), 1); match &history.items[0] { ResponseItem::CustomToolCallOutput { output, .. } => { assert_ne!(output, &long_output); assert!( output.contains("tokens truncated"), "expected token-based truncation marker, got {output}" ); assert!( output.contains("tokens truncated") || output.contains("bytes truncated"), "expected truncation marker, got {output}" ); } other => panic!("unexpected history item: {other:?}"), } } #[test] fn record_items_respects_custom_token_limit() { let mut history = ContextManager::new(); let policy = TruncationPolicy::Tokens(10); let long_output = "tokenized content repeated many times ".repeat(200); let item = ResponseItem::FunctionCallOutput { call_id: "call-custom-limit".to_string(), output: FunctionCallOutputPayload { content: long_output, success: Some(true), ..Default::default() }, }; history.record_items([&item], policy); let stored = match &history.items[0] { ResponseItem::FunctionCallOutput { output, .. } => output, other => panic!("unexpected history item: {other:?}"), }; assert!(stored.content.contains("tokens truncated")); } fn assert_truncated_message_matches(message: &str, line: &str, expected_removed: usize) { let pattern = truncated_message_pattern(line); let regex = Regex::new(&pattern).unwrap_or_else(|err| { panic!("failed to compile regex {pattern}: {err}"); }); let captures = regex .captures(message) .unwrap_or_else(|| panic!("message failed to match pattern {pattern}: {message}")); let body = captures .name("body") .expect("missing body capture") .as_str(); assert!( body.len() <= EXEC_FORMAT_MAX_BYTES, "body exceeds byte limit: {} bytes", body.len() ); let removed: usize = captures .name("removed") .expect("missing removed capture") .as_str() .parse() .unwrap_or_else(|err| panic!("invalid removed tokens: {err}")); assert_eq!(removed, expected_removed, "mismatched removed token count"); } fn truncated_message_pattern(line: &str) -> String { let escaped_line = regex_lite::escape(line); format!(r"(?s)^(?P<body>{escaped_line}.*?)(?:\r?)?…(?P<removed>\d+) tokens truncated…(?:.*)?$") } #[test] fn format_exec_output_truncates_large_error() { let line = "very long execution error line that should trigger truncation\n"; let large_error = line.repeat(2_500); // way beyond both byte and line limits let truncated = truncate_exec_output(&large_error); assert_truncated_message_matches(&truncated, line, 36250); assert_ne!(truncated, large_error); } #[test] fn format_exec_output_marks_byte_truncation_without_omitted_lines() { let long_line = "a".repeat(EXEC_FORMAT_MAX_BYTES + 10000); let truncated = truncate_exec_output(&long_line); assert_ne!(truncated, long_line); assert_truncated_message_matches(&truncated, "a", 2500); assert!( !truncated.contains("omitted"), "line omission marker should not appear when no lines were dropped: {truncated}" ); } #[test] fn format_exec_output_returns_original_when_within_limits() { let content = "example output\n".repeat(10); assert_eq!(truncate_exec_output(&content), content); } #[test] fn format_exec_output_reports_omitted_lines_and_keeps_head_and_tail() { let total_lines = 2_000; let filler = "x".repeat(64); let content: String = (0..total_lines) .map(|idx| format!("line-{idx}-{filler}\n")) .collect(); let truncated = truncate_exec_output(&content); assert_truncated_message_matches(&truncated, "line-0-", 34_723); assert!( truncated.contains("line-0-"), "expected head line to remain: {truncated}" ); let last_line = format!("line-{}-", total_lines - 1); assert!( truncated.contains(&last_line), "expected tail line to remain: {truncated}" ); } #[test] fn format_exec_output_prefers_line_marker_when_both_limits_exceeded() { let total_lines = 300; let long_line = "x".repeat(256); let content: String = (0..total_lines) .map(|idx| format!("line-{idx}-{long_line}\n")) .collect(); let truncated = truncate_exec_output(&content); assert_truncated_message_matches(&truncated, "line-0-", 17_423); } //TODO(aibrahim): run CI in release mode. #[cfg(not(debug_assertions))] #[test] fn normalize_adds_missing_output_for_function_call() { let items = vec![ResponseItem::FunctionCall { id: None, name: "do_it".to_string(), arguments: "{}".to_string(), call_id: "call-x".to_string(), }]; let mut h = create_history_with_items(items); h.normalize_history(); assert_eq!( h.contents(), vec![ ResponseItem::FunctionCall { id: None, name: "do_it".to_string(), arguments: "{}".to_string(), call_id: "call-x".to_string(), }, ResponseItem::FunctionCallOutput { call_id: "call-x".to_string(), output: FunctionCallOutputPayload { content: "aborted".to_string(), ..Default::default() }, }, ] ); } #[cfg(not(debug_assertions))] #[test] fn normalize_adds_missing_output_for_custom_tool_call() { let items = vec![ResponseItem::CustomToolCall { id: None, status: None, call_id: "tool-x".to_string(), name: "custom".to_string(), input: "{}".to_string(), }]; let mut h = create_history_with_items(items); h.normalize_history(); assert_eq!( h.contents(), vec![ ResponseItem::CustomToolCall { id: None, status: None, call_id: "tool-x".to_string(), name: "custom".to_string(), input: "{}".to_string(), }, ResponseItem::CustomToolCallOutput { call_id: "tool-x".to_string(), output: "aborted".to_string(), }, ] ); } #[cfg(not(debug_assertions))] #[test] fn normalize_adds_missing_output_for_local_shell_call_with_id() { let items = vec![ResponseItem::LocalShellCall { id: None, call_id: Some("shell-1".to_string()), status: LocalShellStatus::Completed, action: LocalShellAction::Exec(LocalShellExecAction { command: vec!["echo".to_string(), "hi".to_string()], timeout_ms: None, working_directory: None, env: None, user: None, }), }]; let mut h = create_history_with_items(items); h.normalize_history(); assert_eq!( h.contents(), vec![ ResponseItem::LocalShellCall { id: None, call_id: Some("shell-1".to_string()), status: LocalShellStatus::Completed, action: LocalShellAction::Exec(LocalShellExecAction { command: vec!["echo".to_string(), "hi".to_string()], timeout_ms: None, working_directory: None, env: None, user: None, }), }, ResponseItem::FunctionCallOutput { call_id: "shell-1".to_string(), output: FunctionCallOutputPayload { content: "aborted".to_string(), ..Default::default() }, }, ] ); } #[cfg(not(debug_assertions))] #[test] fn normalize_removes_orphan_function_call_output() { let items = vec![ResponseItem::FunctionCallOutput { call_id: "orphan-1".to_string(), output: FunctionCallOutputPayload { content: "ok".to_string(), ..Default::default() }, }]; let mut h = create_history_with_items(items); h.normalize_history(); assert_eq!(h.contents(), vec![]); } #[cfg(not(debug_assertions))] #[test] fn normalize_removes_orphan_custom_tool_call_output() { let items = vec![ResponseItem::CustomToolCallOutput { call_id: "orphan-2".to_string(), output: "ok".to_string(), }]; let mut h = create_history_with_items(items); h.normalize_history(); assert_eq!(h.contents(), vec![]); } #[cfg(not(debug_assertions))] #[test] fn normalize_mixed_inserts_and_removals() { let items = vec![ // Will get an inserted output ResponseItem::FunctionCall { id: None, name: "f1".to_string(), arguments: "{}".to_string(), call_id: "c1".to_string(), }, // Orphan output that should be removed ResponseItem::FunctionCallOutput { call_id: "c2".to_string(), output: FunctionCallOutputPayload { content: "ok".to_string(), ..Default::default() }, }, // Will get an inserted custom tool output ResponseItem::CustomToolCall { id: None, status: None, call_id: "t1".to_string(), name: "tool".to_string(), input: "{}".to_string(), }, // Local shell call also gets an inserted function call output ResponseItem::LocalShellCall { id: None, call_id: Some("s1".to_string()), status: LocalShellStatus::Completed, action: LocalShellAction::Exec(LocalShellExecAction { command: vec!["echo".to_string()], timeout_ms: None, working_directory: None, env: None, user: None, }), }, ]; let mut h = create_history_with_items(items); h.normalize_history(); assert_eq!( h.contents(), vec![ ResponseItem::FunctionCall { id: None, name: "f1".to_string(), arguments: "{}".to_string(), call_id: "c1".to_string(), }, ResponseItem::FunctionCallOutput { call_id: "c1".to_string(), output: FunctionCallOutputPayload { content: "aborted".to_string(), ..Default::default() }, }, ResponseItem::CustomToolCall { id: None, status: None, call_id: "t1".to_string(), name: "tool".to_string(), input: "{}".to_string(), }, ResponseItem::CustomToolCallOutput { call_id: "t1".to_string(), output: "aborted".to_string(), }, ResponseItem::LocalShellCall { id: None, call_id: Some("s1".to_string()), status: LocalShellStatus::Completed, action: LocalShellAction::Exec(LocalShellExecAction { command: vec!["echo".to_string()], timeout_ms: None, working_directory: None, env: None, user: None, }), }, ResponseItem::FunctionCallOutput { call_id: "s1".to_string(), output: FunctionCallOutputPayload { content: "aborted".to_string(), ..Default::default() }, }, ] ); } #[test] fn normalize_adds_missing_output_for_function_call_inserts_output() { let items = vec![ResponseItem::FunctionCall { id: None, name: "do_it".to_string(), arguments: "{}".to_string(), call_id: "call-x".to_string(), }]; let mut h = create_history_with_items(items); h.normalize_history(); assert_eq!( h.contents(), vec![ ResponseItem::FunctionCall { id: None, name: "do_it".to_string(), arguments: "{}".to_string(), call_id: "call-x".to_string(), }, ResponseItem::FunctionCallOutput { call_id: "call-x".to_string(), output: FunctionCallOutputPayload { content: "aborted".to_string(), ..Default::default() }, }, ] ); } #[cfg(debug_assertions)] #[test] #[should_panic] fn normalize_adds_missing_output_for_custom_tool_call_panics_in_debug() { let items = vec![ResponseItem::CustomToolCall { id: None, status: None, call_id: "tool-x".to_string(), name: "custom".to_string(), input: "{}".to_string(), }]; let mut h = create_history_with_items(items); h.normalize_history(); } #[cfg(debug_assertions)] #[test] #[should_panic] fn normalize_adds_missing_output_for_local_shell_call_with_id_panics_in_debug() { let items = vec![ResponseItem::LocalShellCall { id: None, call_id: Some("shell-1".to_string()), status: LocalShellStatus::Completed, action: LocalShellAction::Exec(LocalShellExecAction { command: vec!["echo".to_string(), "hi".to_string()], timeout_ms: None, working_directory: None, env: None, user: None, }), }]; let mut h = create_history_with_items(items); h.normalize_history(); } #[cfg(debug_assertions)] #[test] #[should_panic] fn normalize_removes_orphan_function_call_output_panics_in_debug() { let items = vec![ResponseItem::FunctionCallOutput { call_id: "orphan-1".to_string(), output: FunctionCallOutputPayload { content: "ok".to_string(), ..Default::default() }, }]; let mut h = create_history_with_items(items); h.normalize_history(); } #[cfg(debug_assertions)] #[test] #[should_panic] fn normalize_removes_orphan_custom_tool_call_output_panics_in_debug() { let items = vec![ResponseItem::CustomToolCallOutput { call_id: "orphan-2".to_string(), output: "ok".to_string(), }]; let mut h = create_history_with_items(items); h.normalize_history(); } #[cfg(debug_assertions)] #[test] #[should_panic] fn normalize_mixed_inserts_and_removals_panics_in_debug() { let items = vec![ ResponseItem::FunctionCall { id: None, name: "f1".to_string(), arguments: "{}".to_string(), call_id: "c1".to_string(), }, ResponseItem::FunctionCallOutput { call_id: "c2".to_string(), output: FunctionCallOutputPayload { content: "ok".to_string(), ..Default::default() }, }, ResponseItem::CustomToolCall { id: None, status: None, call_id: "t1".to_string(), name: "tool".to_string(), input: "{}".to_string(), }, ResponseItem::LocalShellCall { id: None, call_id: Some("s1".to_string()), status: LocalShellStatus::Completed, action: LocalShellAction::Exec(LocalShellExecAction { command: vec!["echo".to_string()], timeout_ms: None, working_directory: None, env: None, user: None, }), }, ]; let mut h = create_history_with_items(items); h.normalize_history(); }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/context_manager/history.rs
codex-rs/core/src/context_manager/history.rs
use crate::codex::TurnContext; use crate::context_manager::normalize; use crate::truncate::TruncationPolicy; use crate::truncate::approx_token_count; use crate::truncate::approx_tokens_from_byte_count; use crate::truncate::truncate_function_output_items_with_policy; use crate::truncate::truncate_text; use codex_protocol::models::ContentItem; use codex_protocol::models::FunctionCallOutputContentItem; use codex_protocol::models::FunctionCallOutputPayload; use codex_protocol::models::ResponseItem; use codex_protocol::protocol::TokenUsage; use codex_protocol::protocol::TokenUsageInfo; use std::ops::Deref; /// Transcript of conversation history #[derive(Debug, Clone, Default)] pub(crate) struct ContextManager { /// The oldest items are at the beginning of the vector. items: Vec<ResponseItem>, token_info: Option<TokenUsageInfo>, } impl ContextManager { pub(crate) fn new() -> Self { Self { items: Vec::new(), token_info: TokenUsageInfo::new_or_append(&None, &None, None), } } pub(crate) fn token_info(&self) -> Option<TokenUsageInfo> { self.token_info.clone() } pub(crate) fn set_token_info(&mut self, info: Option<TokenUsageInfo>) { self.token_info = info; } pub(crate) fn set_token_usage_full(&mut self, context_window: i64) { match &mut self.token_info { Some(info) => info.fill_to_context_window(context_window), None => { self.token_info = Some(TokenUsageInfo::full_context_window(context_window)); } } } /// `items` is ordered from oldest to newest. pub(crate) fn record_items<I>(&mut self, items: I, policy: TruncationPolicy) where I: IntoIterator, I::Item: std::ops::Deref<Target = ResponseItem>, { for item in items { let item_ref = item.deref(); let is_ghost_snapshot = matches!(item_ref, ResponseItem::GhostSnapshot { .. }); if !is_api_message(item_ref) && !is_ghost_snapshot { continue; } let processed = self.process_item(item_ref, policy); self.items.push(processed); } } pub(crate) fn get_history(&mut self) -> Vec<ResponseItem> { self.normalize_history(); self.contents() } // Returns the history prepared for sending to the model. // With extra response items filtered out and GhostCommits removed. pub(crate) fn get_history_for_prompt(&mut self) -> Vec<ResponseItem> { let mut history = self.get_history(); Self::remove_ghost_snapshots(&mut history); history } // Estimate token usage using byte-based heuristics from the truncation helpers. // This is a coarse lower bound, not a tokenizer-accurate count. pub(crate) fn estimate_token_count(&self, turn_context: &TurnContext) -> Option<i64> { let model_family = turn_context.client.get_model_family(); let base_tokens = i64::try_from(approx_token_count(model_family.base_instructions.as_str())) .unwrap_or(i64::MAX); let items_tokens = self.items.iter().fold(0i64, |acc, item| { acc + match item { ResponseItem::GhostSnapshot { .. } => 0, ResponseItem::Reasoning { encrypted_content: Some(content), .. } | ResponseItem::Compaction { encrypted_content: content, } => estimate_reasoning_length(content.len()) as i64, item => { let serialized = serde_json::to_string(item).unwrap_or_default(); i64::try_from(approx_token_count(&serialized)).unwrap_or(i64::MAX) } } }); Some(base_tokens.saturating_add(items_tokens)) } pub(crate) fn remove_first_item(&mut self) { if !self.items.is_empty() { // Remove the oldest item (front of the list). Items are ordered from // oldest → newest, so index 0 is the first entry recorded. let removed = self.items.remove(0); // If the removed item participates in a call/output pair, also remove // its corresponding counterpart to keep the invariants intact without // running a full normalization pass. normalize::remove_corresponding_for(&mut self.items, &removed); } } pub(crate) fn replace(&mut self, items: Vec<ResponseItem>) { self.items = items; } pub(crate) fn replace_last_turn_images(&mut self, placeholder: &str) { let Some(last_item) = self.items.last_mut() else { return; }; match last_item { ResponseItem::Message { role, content, .. } if role == "user" => { for item in content.iter_mut() { if matches!(item, ContentItem::InputImage { .. }) { *item = ContentItem::InputText { text: placeholder.to_string(), }; } } } ResponseItem::FunctionCallOutput { output, .. } => { let Some(content_items) = output.content_items.as_mut() else { return; }; for item in content_items.iter_mut() { if matches!(item, FunctionCallOutputContentItem::InputImage { .. }) { *item = FunctionCallOutputContentItem::InputText { text: placeholder.to_string(), }; } } } _ => {} } } pub(crate) fn update_token_info( &mut self, usage: &TokenUsage, model_context_window: Option<i64>, ) { self.token_info = TokenUsageInfo::new_or_append( &self.token_info, &Some(usage.clone()), model_context_window, ); } fn get_non_last_reasoning_items_tokens(&self) -> usize { // get reasoning items excluding all the ones after the last user message let Some(last_user_index) = self .items .iter() .rposition(|item| matches!(item, ResponseItem::Message { role, .. } if role == "user")) else { return 0usize; }; let total_reasoning_bytes = self .items .iter() .take(last_user_index) .filter_map(|item| { if let ResponseItem::Reasoning { encrypted_content: Some(content), .. } = item { Some(content.len()) } else { None } }) .map(estimate_reasoning_length) .fold(0usize, usize::saturating_add); let token_estimate = approx_tokens_from_byte_count(total_reasoning_bytes); token_estimate as usize } pub(crate) fn get_total_token_usage(&self) -> i64 { self.token_info .as_ref() .map(|info| info.last_token_usage.total_tokens) .unwrap_or(0) .saturating_add(self.get_non_last_reasoning_items_tokens() as i64) } /// This function enforces a couple of invariants on the in-memory history: /// 1. every call (function/custom) has a corresponding output entry /// 2. every output has a corresponding call entry fn normalize_history(&mut self) { // all function/tool calls must have a corresponding output normalize::ensure_call_outputs_present(&mut self.items); // all outputs must have a corresponding function/tool call normalize::remove_orphan_outputs(&mut self.items); } /// Returns a clone of the contents in the transcript. fn contents(&self) -> Vec<ResponseItem> { self.items.clone() } fn remove_ghost_snapshots(items: &mut Vec<ResponseItem>) { items.retain(|item| !matches!(item, ResponseItem::GhostSnapshot { .. })); } fn process_item(&self, item: &ResponseItem, policy: TruncationPolicy) -> ResponseItem { let policy_with_serialization_budget = policy.mul(1.2); match item { ResponseItem::FunctionCallOutput { call_id, output } => { let truncated = truncate_text(output.content.as_str(), policy_with_serialization_budget); let truncated_items = output.content_items.as_ref().map(|items| { truncate_function_output_items_with_policy( items, policy_with_serialization_budget, ) }); ResponseItem::FunctionCallOutput { call_id: call_id.clone(), output: FunctionCallOutputPayload { content: truncated, content_items: truncated_items, success: output.success, }, } } ResponseItem::CustomToolCallOutput { call_id, output } => { let truncated = truncate_text(output, policy_with_serialization_budget); ResponseItem::CustomToolCallOutput { call_id: call_id.clone(), output: truncated, } } ResponseItem::Message { .. } | ResponseItem::Reasoning { .. } | ResponseItem::LocalShellCall { .. } | ResponseItem::FunctionCall { .. } | ResponseItem::WebSearchCall { .. } | ResponseItem::CustomToolCall { .. } | ResponseItem::Compaction { .. } | ResponseItem::GhostSnapshot { .. } | ResponseItem::Other => item.clone(), } } } /// API messages include every non-system item (user/assistant messages, reasoning, /// tool calls, tool outputs, shell calls, and web-search calls). fn is_api_message(message: &ResponseItem) -> bool { match message { ResponseItem::Message { role, .. } => role.as_str() != "system", ResponseItem::FunctionCallOutput { .. } | ResponseItem::FunctionCall { .. } | ResponseItem::CustomToolCall { .. } | ResponseItem::CustomToolCallOutput { .. } | ResponseItem::LocalShellCall { .. } | ResponseItem::Reasoning { .. } | ResponseItem::WebSearchCall { .. } | ResponseItem::Compaction { .. } => true, ResponseItem::GhostSnapshot { .. } => false, ResponseItem::Other => false, } } fn estimate_reasoning_length(encoded_len: usize) -> usize { encoded_len .saturating_mul(3) .checked_div(4) .unwrap_or(0) .saturating_sub(650) } #[cfg(test)] #[path = "history_tests.rs"] mod tests;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/context_manager/mod.rs
codex-rs/core/src/context_manager/mod.rs
mod history; mod normalize; pub(crate) use history::ContextManager;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/context_manager/normalize.rs
codex-rs/core/src/context_manager/normalize.rs
use std::collections::HashSet; use codex_protocol::models::FunctionCallOutputPayload; use codex_protocol::models::ResponseItem; use crate::util::error_or_panic; use tracing::info; pub(crate) fn ensure_call_outputs_present(items: &mut Vec<ResponseItem>) { // Collect synthetic outputs to insert immediately after their calls. // Store the insertion position (index of call) alongside the item so // we can insert in reverse order and avoid index shifting. let mut missing_outputs_to_insert: Vec<(usize, ResponseItem)> = Vec::new(); for (idx, item) in items.iter().enumerate() { match item { ResponseItem::FunctionCall { call_id, .. } => { let has_output = items.iter().any(|i| match i { ResponseItem::FunctionCallOutput { call_id: existing, .. } => existing == call_id, _ => false, }); if !has_output { info!("Function call output is missing for call id: {call_id}"); missing_outputs_to_insert.push(( idx, ResponseItem::FunctionCallOutput { call_id: call_id.clone(), output: FunctionCallOutputPayload { content: "aborted".to_string(), ..Default::default() }, }, )); } } ResponseItem::CustomToolCall { call_id, .. } => { let has_output = items.iter().any(|i| match i { ResponseItem::CustomToolCallOutput { call_id: existing, .. } => existing == call_id, _ => false, }); if !has_output { error_or_panic(format!( "Custom tool call output is missing for call id: {call_id}" )); missing_outputs_to_insert.push(( idx, ResponseItem::CustomToolCallOutput { call_id: call_id.clone(), output: "aborted".to_string(), }, )); } } // LocalShellCall is represented in upstream streams by a FunctionCallOutput ResponseItem::LocalShellCall { call_id, .. } => { if let Some(call_id) = call_id.as_ref() { let has_output = items.iter().any(|i| match i { ResponseItem::FunctionCallOutput { call_id: existing, .. } => existing == call_id, _ => false, }); if !has_output { error_or_panic(format!( "Local shell call output is missing for call id: {call_id}" )); missing_outputs_to_insert.push(( idx, ResponseItem::FunctionCallOutput { call_id: call_id.clone(), output: FunctionCallOutputPayload { content: "aborted".to_string(), ..Default::default() }, }, )); } } } _ => {} } } // Insert synthetic outputs in reverse index order to avoid re-indexing. for (idx, output_item) in missing_outputs_to_insert.into_iter().rev() { items.insert(idx + 1, output_item); } } pub(crate) fn remove_orphan_outputs(items: &mut Vec<ResponseItem>) { let function_call_ids: HashSet<String> = items .iter() .filter_map(|i| match i { ResponseItem::FunctionCall { call_id, .. } => Some(call_id.clone()), _ => None, }) .collect(); let local_shell_call_ids: HashSet<String> = items .iter() .filter_map(|i| match i { ResponseItem::LocalShellCall { call_id: Some(call_id), .. } => Some(call_id.clone()), _ => None, }) .collect(); let custom_tool_call_ids: HashSet<String> = items .iter() .filter_map(|i| match i { ResponseItem::CustomToolCall { call_id, .. } => Some(call_id.clone()), _ => None, }) .collect(); items.retain(|item| match item { ResponseItem::FunctionCallOutput { call_id, .. } => { let has_match = function_call_ids.contains(call_id) || local_shell_call_ids.contains(call_id); if !has_match { error_or_panic(format!( "Orphan function call output for call id: {call_id}" )); } has_match } ResponseItem::CustomToolCallOutput { call_id, .. } => { let has_match = custom_tool_call_ids.contains(call_id); if !has_match { error_or_panic(format!( "Orphan custom tool call output for call id: {call_id}" )); } has_match } _ => true, }); } pub(crate) fn remove_corresponding_for(items: &mut Vec<ResponseItem>, item: &ResponseItem) { match item { ResponseItem::FunctionCall { call_id, .. } => { remove_first_matching(items, |i| { matches!( i, ResponseItem::FunctionCallOutput { call_id: existing, .. } if existing == call_id ) }); } ResponseItem::FunctionCallOutput { call_id, .. } => { if let Some(pos) = items.iter().position(|i| { matches!(i, ResponseItem::FunctionCall { call_id: existing, .. } if existing == call_id) }) { items.remove(pos); } else if let Some(pos) = items.iter().position(|i| { matches!(i, ResponseItem::LocalShellCall { call_id: Some(existing), .. } if existing == call_id) }) { items.remove(pos); } } ResponseItem::CustomToolCall { call_id, .. } => { remove_first_matching(items, |i| { matches!( i, ResponseItem::CustomToolCallOutput { call_id: existing, .. } if existing == call_id ) }); } ResponseItem::CustomToolCallOutput { call_id, .. } => { remove_first_matching( items, |i| matches!(i, ResponseItem::CustomToolCall { call_id: existing, .. } if existing == call_id), ); } ResponseItem::LocalShellCall { call_id: Some(call_id), .. } => { remove_first_matching(items, |i| { matches!( i, ResponseItem::FunctionCallOutput { call_id: existing, .. } if existing == call_id ) }); } _ => {} } } fn remove_first_matching<F>(items: &mut Vec<ResponseItem>, predicate: F) where F: Fn(&ResponseItem) -> bool, { if let Some(pos) = items.iter().position(predicate) { items.remove(pos); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/skills/injection.rs
codex-rs/core/src/skills/injection.rs
use std::collections::HashSet; use crate::skills::SkillLoadOutcome; use crate::skills::SkillMetadata; use crate::user_instructions::SkillInstructions; use codex_protocol::models::ResponseItem; use codex_protocol::user_input::UserInput; use tokio::fs; #[derive(Debug, Default)] pub(crate) struct SkillInjections { pub(crate) items: Vec<ResponseItem>, pub(crate) warnings: Vec<String>, } pub(crate) async fn build_skill_injections( inputs: &[UserInput], skills: Option<&SkillLoadOutcome>, ) -> SkillInjections { if inputs.is_empty() { return SkillInjections::default(); } let Some(outcome) = skills else { return SkillInjections::default(); }; let mentioned_skills = collect_explicit_skill_mentions(inputs, &outcome.skills); if mentioned_skills.is_empty() { return SkillInjections::default(); } let mut result = SkillInjections { items: Vec::with_capacity(mentioned_skills.len()), warnings: Vec::new(), }; for skill in mentioned_skills { match fs::read_to_string(&skill.path).await { Ok(contents) => { result.items.push(ResponseItem::from(SkillInstructions { name: skill.name, path: skill.path.to_string_lossy().into_owned(), contents, })); } Err(err) => { let message = format!( "Failed to load skill {} at {}: {err:#}", skill.name, skill.path.display() ); result.warnings.push(message); } } } result } fn collect_explicit_skill_mentions( inputs: &[UserInput], skills: &[SkillMetadata], ) -> Vec<SkillMetadata> { let mut selected: Vec<SkillMetadata> = Vec::new(); let mut seen: HashSet<String> = HashSet::new(); for input in inputs { if let UserInput::Skill { name, path } = input && seen.insert(name.clone()) && let Some(skill) = skills.iter().find(|s| s.name == *name && s.path == *path) { selected.push(skill.clone()); } } selected }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/skills/manager.rs
codex-rs/core/src/skills/manager.rs
use std::collections::HashMap; use std::path::Path; use std::path::PathBuf; use std::sync::RwLock; use crate::skills::SkillLoadOutcome; use crate::skills::loader::load_skills_from_roots; use crate::skills::loader::skill_roots_for_cwd; use crate::skills::system::install_system_skills; pub struct SkillsManager { codex_home: PathBuf, cache_by_cwd: RwLock<HashMap<PathBuf, SkillLoadOutcome>>, } impl SkillsManager { pub fn new(codex_home: PathBuf) -> Self { if let Err(err) = install_system_skills(&codex_home) { tracing::error!("failed to install system skills: {err}"); } Self { codex_home, cache_by_cwd: RwLock::new(HashMap::new()), } } pub fn skills_for_cwd(&self, cwd: &Path) -> SkillLoadOutcome { self.skills_for_cwd_with_options(cwd, false) } pub fn skills_for_cwd_with_options(&self, cwd: &Path, force_reload: bool) -> SkillLoadOutcome { let cached = match self.cache_by_cwd.read() { Ok(cache) => cache.get(cwd).cloned(), Err(err) => err.into_inner().get(cwd).cloned(), }; if !force_reload && let Some(outcome) = cached { return outcome; } let roots = skill_roots_for_cwd(&self.codex_home, cwd); let outcome = load_skills_from_roots(roots); match self.cache_by_cwd.write() { Ok(mut cache) => { cache.insert(cwd.to_path_buf(), outcome.clone()); } Err(err) => { err.into_inner().insert(cwd.to_path_buf(), outcome.clone()); } } outcome } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/skills/system.rs
codex-rs/core/src/skills/system.rs
use codex_utils_absolute_path::AbsolutePathBuf; use include_dir::Dir; use std::collections::hash_map::DefaultHasher; use std::fs; use std::hash::Hash; use std::hash::Hasher; use std::path::Path; use std::path::PathBuf; use thiserror::Error; const SYSTEM_SKILLS_DIR: Dir = include_dir::include_dir!("$CARGO_MANIFEST_DIR/src/skills/assets/samples"); const SYSTEM_SKILLS_DIR_NAME: &str = ".system"; const SKILLS_DIR_NAME: &str = "skills"; const SYSTEM_SKILLS_MARKER_FILENAME: &str = ".codex-system-skills.marker"; const SYSTEM_SKILLS_MARKER_SALT: &str = "v1"; /// Returns the on-disk cache location for embedded system skills. /// /// This is typically located at `CODEX_HOME/skills/.system`. pub(crate) fn system_cache_root_dir(codex_home: &Path) -> PathBuf { AbsolutePathBuf::try_from(codex_home) .and_then(|codex_home| system_cache_root_dir_abs(&codex_home)) .map(AbsolutePathBuf::into_path_buf) .unwrap_or_else(|_| { codex_home .join(SKILLS_DIR_NAME) .join(SYSTEM_SKILLS_DIR_NAME) }) } fn system_cache_root_dir_abs(codex_home: &AbsolutePathBuf) -> std::io::Result<AbsolutePathBuf> { codex_home .join(SKILLS_DIR_NAME)? .join(SYSTEM_SKILLS_DIR_NAME) } /// Installs embedded system skills into `CODEX_HOME/skills/.system`. /// /// Clears any existing system skills directory first and then writes the embedded /// skills directory into place. /// /// To avoid doing unnecessary work on every startup, a marker file is written /// with a fingerprint of the embedded directory. When the marker matches, the /// install is skipped. pub(crate) fn install_system_skills(codex_home: &Path) -> Result<(), SystemSkillsError> { let codex_home = AbsolutePathBuf::try_from(codex_home) .map_err(|source| SystemSkillsError::io("normalize codex home dir", source))?; let skills_root_dir = codex_home .join(SKILLS_DIR_NAME) .map_err(|source| SystemSkillsError::io("resolve skills root dir", source))?; fs::create_dir_all(skills_root_dir.as_path()) .map_err(|source| SystemSkillsError::io("create skills root dir", source))?; let dest_system = system_cache_root_dir_abs(&codex_home) .map_err(|source| SystemSkillsError::io("resolve system skills cache root dir", source))?; let marker_path = dest_system .join(SYSTEM_SKILLS_MARKER_FILENAME) .map_err(|source| SystemSkillsError::io("resolve system skills marker path", source))?; let expected_fingerprint = embedded_system_skills_fingerprint(); if dest_system.as_path().is_dir() && read_marker(&marker_path).is_ok_and(|marker| marker == expected_fingerprint) { return Ok(()); } if dest_system.as_path().exists() { fs::remove_dir_all(dest_system.as_path()) .map_err(|source| SystemSkillsError::io("remove existing system skills dir", source))?; } write_embedded_dir(&SYSTEM_SKILLS_DIR, &dest_system)?; fs::write(marker_path.as_path(), format!("{expected_fingerprint}\n")) .map_err(|source| SystemSkillsError::io("write system skills marker", source))?; Ok(()) } fn read_marker(path: &AbsolutePathBuf) -> Result<String, SystemSkillsError> { Ok(fs::read_to_string(path.as_path()) .map_err(|source| SystemSkillsError::io("read system skills marker", source))? .trim() .to_string()) } fn embedded_system_skills_fingerprint() -> String { let mut items: Vec<(String, Option<u64>)> = SYSTEM_SKILLS_DIR .entries() .iter() .map(|entry| match entry { include_dir::DirEntry::Dir(dir) => (dir.path().to_string_lossy().to_string(), None), include_dir::DirEntry::File(file) => { let mut file_hasher = DefaultHasher::new(); file.contents().hash(&mut file_hasher); ( file.path().to_string_lossy().to_string(), Some(file_hasher.finish()), ) } }) .collect(); items.sort_unstable_by(|(a, _), (b, _)| a.cmp(b)); let mut hasher = DefaultHasher::new(); SYSTEM_SKILLS_MARKER_SALT.hash(&mut hasher); for (path, contents_hash) in items { path.hash(&mut hasher); contents_hash.hash(&mut hasher); } format!("{:x}", hasher.finish()) } /// Writes the embedded `include_dir::Dir` to disk under `dest`. /// /// Preserves the embedded directory structure. fn write_embedded_dir(dir: &Dir<'_>, dest: &AbsolutePathBuf) -> Result<(), SystemSkillsError> { fs::create_dir_all(dest.as_path()) .map_err(|source| SystemSkillsError::io("create system skills dir", source))?; for entry in dir.entries() { match entry { include_dir::DirEntry::Dir(subdir) => { let subdir_dest = dest.join(subdir.path()).map_err(|source| { SystemSkillsError::io("resolve system skills subdir", source) })?; fs::create_dir_all(subdir_dest.as_path()).map_err(|source| { SystemSkillsError::io("create system skills subdir", source) })?; write_embedded_dir(subdir, dest)?; } include_dir::DirEntry::File(file) => { let path = dest.join(file.path()).map_err(|source| { SystemSkillsError::io("resolve system skills file", source) })?; if let Some(parent) = path.as_path().parent() { fs::create_dir_all(parent).map_err(|source| { SystemSkillsError::io("create system skills file parent", source) })?; } fs::write(path.as_path(), file.contents()) .map_err(|source| SystemSkillsError::io("write system skill file", source))?; } } } Ok(()) } #[derive(Debug, Error)] pub(crate) enum SystemSkillsError { #[error("io error while {action}: {source}")] Io { action: &'static str, #[source] source: std::io::Error, }, } impl SystemSkillsError { fn io(action: &'static str, source: std::io::Error) -> Self { Self::Io { action, source } } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/skills/render.rs
codex-rs/core/src/skills/render.rs
use crate::skills::model::SkillMetadata; pub fn render_skills_section(skills: &[SkillMetadata]) -> Option<String> { if skills.is_empty() { return None; } let mut lines: Vec<String> = Vec::new(); lines.push("## Skills".to_string()); lines.push("These skills are discovered at startup from multiple local sources. Each entry includes a name, description, and file path so you can open the source for full instructions.".to_string()); for skill in skills { let path_str = skill.path.to_string_lossy().replace('\\', "/"); let name = skill.name.as_str(); let description = skill.description.as_str(); lines.push(format!("- {name}: {description} (file: {path_str})")); } lines.push( r###"- Discovery: Available skills are listed in project docs and may also appear in a runtime "## Skills" section (name + description + file path). These are the sources of truth; skill bodies live on disk at the listed paths. - Trigger rules: If the user names a skill (with `$SkillName` or plain text) OR the task clearly matches a skill's description, you must use that skill for that turn. Multiple mentions mean use them all. Do not carry skills across turns unless re-mentioned. - Missing/blocked: If a named skill isn't in the list or the path can't be read, say so briefly and continue with the best fallback. - How to use a skill (progressive disclosure): 1) After deciding to use a skill, open its `SKILL.md`. Read only enough to follow the workflow. 2) If `SKILL.md` points to extra folders such as `references/`, load only the specific files needed for the request; don't bulk-load everything. 3) If `scripts/` exist, prefer running or patching them instead of retyping large code blocks. 4) If `assets/` or templates exist, reuse them instead of recreating from scratch. - Description as trigger: The YAML `description` in `SKILL.md` is the primary trigger signal; rely on it to decide applicability. If unsure, ask a brief clarification before proceeding. - Coordination and sequencing: - If multiple skills apply, choose the minimal set that covers the request and state the order you'll use them. - Announce which skill(s) you're using and why (one short line). If you skip an obvious skill, say why. - Context hygiene: - Keep context small: summarize long sections instead of pasting them; only load extra files when needed. - Avoid deeply nested references; prefer one-hop files explicitly linked from `SKILL.md`. - When variants exist (frameworks, providers, domains), pick only the relevant reference file(s) and note that choice. - Safety and fallback: If a skill can't be applied cleanly (missing files, unclear instructions), state the issue, pick the next-best approach, and continue."### .to_string(), ); Some(lines.join("\n")) }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/skills/model.rs
codex-rs/core/src/skills/model.rs
use std::path::PathBuf; use codex_protocol::protocol::SkillScope; #[derive(Debug, Clone, PartialEq, Eq)] pub struct SkillMetadata { pub name: String, pub description: String, pub short_description: Option<String>, pub path: PathBuf, pub scope: SkillScope, } #[derive(Debug, Clone, PartialEq, Eq)] pub struct SkillError { pub path: PathBuf, pub message: String, } #[derive(Debug, Clone, Default)] pub struct SkillLoadOutcome { pub skills: Vec<SkillMetadata>, pub errors: Vec<SkillError>, }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/skills/mod.rs
codex-rs/core/src/skills/mod.rs
pub mod injection; pub mod loader; pub mod manager; pub mod model; pub mod render; pub mod system; pub(crate) use injection::SkillInjections; pub(crate) use injection::build_skill_injections; pub use loader::load_skills; pub use manager::SkillsManager; pub use model::SkillError; pub use model::SkillLoadOutcome; pub use model::SkillMetadata; pub use render::render_skills_section;
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false
openai/codex
https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/core/src/skills/loader.rs
codex-rs/core/src/skills/loader.rs
use crate::config::Config; use crate::git_info::resolve_root_git_project_for_trust; use crate::skills::model::SkillError; use crate::skills::model::SkillLoadOutcome; use crate::skills::model::SkillMetadata; use crate::skills::system::system_cache_root_dir; use codex_protocol::protocol::SkillScope; use dunce::canonicalize as normalize_path; use serde::Deserialize; use std::collections::HashSet; use std::collections::VecDeque; use std::error::Error; use std::fmt; use std::fs; use std::path::Path; use std::path::PathBuf; use tracing::error; #[derive(Debug, Deserialize)] struct SkillFrontmatter { name: String, description: String, #[serde(default)] metadata: SkillFrontmatterMetadata, } #[derive(Debug, Default, Deserialize)] struct SkillFrontmatterMetadata { #[serde(default, rename = "short-description")] short_description: Option<String>, } const SKILLS_FILENAME: &str = "SKILL.md"; const SKILLS_DIR_NAME: &str = "skills"; const REPO_ROOT_CONFIG_DIR_NAME: &str = ".codex"; const ADMIN_SKILLS_ROOT: &str = "/etc/codex/skills"; const MAX_NAME_LEN: usize = 64; const MAX_DESCRIPTION_LEN: usize = 1024; const MAX_SHORT_DESCRIPTION_LEN: usize = MAX_DESCRIPTION_LEN; #[derive(Debug)] enum SkillParseError { Read(std::io::Error), MissingFrontmatter, InvalidYaml(serde_yaml::Error), MissingField(&'static str), InvalidField { field: &'static str, reason: String }, } impl fmt::Display for SkillParseError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { SkillParseError::Read(e) => write!(f, "failed to read file: {e}"), SkillParseError::MissingFrontmatter => { write!(f, "missing YAML frontmatter delimited by ---") } SkillParseError::InvalidYaml(e) => write!(f, "invalid YAML: {e}"), SkillParseError::MissingField(field) => write!(f, "missing field `{field}`"), SkillParseError::InvalidField { field, reason } => { write!(f, "invalid {field}: {reason}") } } } } impl Error for SkillParseError {} pub fn load_skills(config: &Config) -> SkillLoadOutcome { load_skills_from_roots(skill_roots(config)) } pub(crate) struct SkillRoot { pub(crate) path: PathBuf, pub(crate) scope: SkillScope, } pub(crate) fn load_skills_from_roots<I>(roots: I) -> SkillLoadOutcome where I: IntoIterator<Item = SkillRoot>, { let mut outcome = SkillLoadOutcome::default(); for root in roots { discover_skills_under_root(&root.path, root.scope, &mut outcome); } let mut seen: HashSet<String> = HashSet::new(); outcome .skills .retain(|skill| seen.insert(skill.name.clone())); outcome .skills .sort_by(|a, b| a.name.cmp(&b.name).then_with(|| a.path.cmp(&b.path))); outcome } pub(crate) fn user_skills_root(codex_home: &Path) -> SkillRoot { SkillRoot { path: codex_home.join(SKILLS_DIR_NAME), scope: SkillScope::User, } } pub(crate) fn system_skills_root(codex_home: &Path) -> SkillRoot { SkillRoot { path: system_cache_root_dir(codex_home), scope: SkillScope::System, } } pub(crate) fn admin_skills_root() -> SkillRoot { SkillRoot { path: PathBuf::from(ADMIN_SKILLS_ROOT), scope: SkillScope::Admin, } } pub(crate) fn repo_skills_root(cwd: &Path) -> Option<SkillRoot> { let base = if cwd.is_dir() { cwd } else { cwd.parent()? }; let base = normalize_path(base).unwrap_or_else(|_| base.to_path_buf()); let repo_root = resolve_root_git_project_for_trust(&base).map(|root| normalize_path(&root).unwrap_or(root)); let scope = SkillScope::Repo; if let Some(repo_root) = repo_root.as_deref() { for dir in base.ancestors() { let skills_root = dir.join(REPO_ROOT_CONFIG_DIR_NAME).join(SKILLS_DIR_NAME); if skills_root.is_dir() { return Some(SkillRoot { path: skills_root, scope, }); } if dir == repo_root { break; } } return None; } let skills_root = base.join(REPO_ROOT_CONFIG_DIR_NAME).join(SKILLS_DIR_NAME); skills_root.is_dir().then_some(SkillRoot { path: skills_root, scope, }) } pub(crate) fn skill_roots_for_cwd(codex_home: &Path, cwd: &Path) -> Vec<SkillRoot> { let mut roots = Vec::new(); if let Some(repo_root) = repo_skills_root(cwd) { roots.push(repo_root); } // Load order matters: we dedupe by name, keeping the first occurrence. // Priority order: repo, user, system, then admin. roots.push(user_skills_root(codex_home)); roots.push(system_skills_root(codex_home)); if cfg!(unix) { roots.push(admin_skills_root()); } roots } fn skill_roots(config: &Config) -> Vec<SkillRoot> { skill_roots_for_cwd(&config.codex_home, &config.cwd) } fn discover_skills_under_root(root: &Path, scope: SkillScope, outcome: &mut SkillLoadOutcome) { let Ok(root) = normalize_path(root) else { return; }; if !root.is_dir() { return; } let mut queue: VecDeque<PathBuf> = VecDeque::from([root]); while let Some(dir) = queue.pop_front() { let entries = match fs::read_dir(&dir) { Ok(entries) => entries, Err(e) => { error!("failed to read skills dir {}: {e:#}", dir.display()); continue; } }; for entry in entries.flatten() { let path = entry.path(); let file_name = match path.file_name().and_then(|f| f.to_str()) { Some(name) => name, None => continue, }; if file_name.starts_with('.') { continue; } let Ok(file_type) = entry.file_type() else { continue; }; if file_type.is_symlink() { continue; } if file_type.is_dir() { queue.push_back(path); continue; } if file_type.is_file() && file_name == SKILLS_FILENAME { match parse_skill_file(&path, scope) { Ok(skill) => { outcome.skills.push(skill); } Err(err) => { if scope != SkillScope::System { outcome.errors.push(SkillError { path, message: err.to_string(), }); } } } } } } } fn parse_skill_file(path: &Path, scope: SkillScope) -> Result<SkillMetadata, SkillParseError> { let contents = fs::read_to_string(path).map_err(SkillParseError::Read)?; let frontmatter = extract_frontmatter(&contents).ok_or(SkillParseError::MissingFrontmatter)?; let parsed: SkillFrontmatter = serde_yaml::from_str(&frontmatter).map_err(SkillParseError::InvalidYaml)?; let name = sanitize_single_line(&parsed.name); let description = sanitize_single_line(&parsed.description); let short_description = parsed .metadata .short_description .as_deref() .map(sanitize_single_line) .filter(|value| !value.is_empty()); validate_field(&name, MAX_NAME_LEN, "name")?; validate_field(&description, MAX_DESCRIPTION_LEN, "description")?; if let Some(short_description) = short_description.as_deref() { validate_field( short_description, MAX_SHORT_DESCRIPTION_LEN, "metadata.short-description", )?; } let resolved_path = normalize_path(path).unwrap_or_else(|_| path.to_path_buf()); Ok(SkillMetadata { name, description, short_description, path: resolved_path, scope, }) } fn sanitize_single_line(raw: &str) -> String { raw.split_whitespace().collect::<Vec<_>>().join(" ") } fn validate_field( value: &str, max_len: usize, field_name: &'static str, ) -> Result<(), SkillParseError> { if value.is_empty() { return Err(SkillParseError::MissingField(field_name)); } if value.chars().count() > max_len { return Err(SkillParseError::InvalidField { field: field_name, reason: format!("exceeds maximum length of {max_len} characters"), }); } Ok(()) } fn extract_frontmatter(contents: &str) -> Option<String> { let mut lines = contents.lines(); if !matches!(lines.next(), Some(line) if line.trim() == "---") { return None; } let mut frontmatter_lines: Vec<&str> = Vec::new(); let mut found_closing = false; for line in lines.by_ref() { if line.trim() == "---" { found_closing = true; break; } frontmatter_lines.push(line); } if frontmatter_lines.is_empty() || !found_closing { return None; } Some(frontmatter_lines.join("\n")) } #[cfg(test)] mod tests { use super::*; use crate::config::ConfigBuilder; use codex_protocol::protocol::SkillScope; use pretty_assertions::assert_eq; use std::path::Path; use std::process::Command; use tempfile::TempDir; async fn make_config(codex_home: &TempDir) -> Config { let mut config = ConfigBuilder::default() .codex_home(codex_home.path().to_path_buf()) .build() .await .expect("defaults for test should always succeed"); config.cwd = codex_home.path().to_path_buf(); config } fn write_skill(codex_home: &TempDir, dir: &str, name: &str, description: &str) -> PathBuf { write_skill_at(&codex_home.path().join("skills"), dir, name, description) } fn write_system_skill( codex_home: &TempDir, dir: &str, name: &str, description: &str, ) -> PathBuf { write_skill_at( &codex_home.path().join("skills/.system"), dir, name, description, ) } fn write_skill_at(root: &Path, dir: &str, name: &str, description: &str) -> PathBuf { let skill_dir = root.join(dir); fs::create_dir_all(&skill_dir).unwrap(); let indented_description = description.replace('\n', "\n "); let content = format!( "---\nname: {name}\ndescription: |-\n {indented_description}\n---\n\n# Body\n" ); let path = skill_dir.join(SKILLS_FILENAME); fs::write(&path, content).unwrap(); path } #[tokio::test] async fn loads_valid_skill() { let codex_home = tempfile::tempdir().expect("tempdir"); write_skill(&codex_home, "demo", "demo-skill", "does things\ncarefully"); let cfg = make_config(&codex_home).await; let outcome = load_skills(&cfg); assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", outcome.errors ); assert_eq!(outcome.skills.len(), 1); let skill = &outcome.skills[0]; assert_eq!(skill.name, "demo-skill"); assert_eq!(skill.description, "does things carefully"); assert_eq!(skill.short_description, None); let path_str = skill.path.to_string_lossy().replace('\\', "/"); assert!( path_str.ends_with("skills/demo/SKILL.md"), "unexpected path {path_str}" ); } #[tokio::test] async fn loads_short_description_from_metadata() { let codex_home = tempfile::tempdir().expect("tempdir"); let skill_dir = codex_home.path().join("skills/demo"); fs::create_dir_all(&skill_dir).unwrap(); let contents = "---\nname: demo-skill\ndescription: long description\nmetadata:\n short-description: short summary\n---\n\n# Body\n"; fs::write(skill_dir.join(SKILLS_FILENAME), contents).unwrap(); let cfg = make_config(&codex_home).await; let outcome = load_skills(&cfg); assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", outcome.errors ); assert_eq!(outcome.skills.len(), 1); assert_eq!( outcome.skills[0].short_description, Some("short summary".to_string()) ); } #[tokio::test] async fn enforces_short_description_length_limits() { let codex_home = tempfile::tempdir().expect("tempdir"); let skill_dir = codex_home.path().join("skills/demo"); fs::create_dir_all(&skill_dir).unwrap(); let too_long = "x".repeat(MAX_SHORT_DESCRIPTION_LEN + 1); let contents = format!( "---\nname: demo-skill\ndescription: long description\nmetadata:\n short-description: {too_long}\n---\n\n# Body\n" ); fs::write(skill_dir.join(SKILLS_FILENAME), contents).unwrap(); let cfg = make_config(&codex_home).await; let outcome = load_skills(&cfg); assert_eq!(outcome.skills.len(), 0); assert_eq!(outcome.errors.len(), 1); assert!( outcome.errors[0] .message .contains("invalid metadata.short-description"), "expected length error, got: {:?}", outcome.errors ); } #[tokio::test] async fn skips_hidden_and_invalid() { let codex_home = tempfile::tempdir().expect("tempdir"); let hidden_dir = codex_home.path().join("skills/.hidden"); fs::create_dir_all(&hidden_dir).unwrap(); fs::write( hidden_dir.join(SKILLS_FILENAME), "---\nname: hidden\ndescription: hidden\n---\n", ) .unwrap(); // Invalid because missing closing frontmatter. let invalid_dir = codex_home.path().join("skills/invalid"); fs::create_dir_all(&invalid_dir).unwrap(); fs::write(invalid_dir.join(SKILLS_FILENAME), "---\nname: bad").unwrap(); let cfg = make_config(&codex_home).await; let outcome = load_skills(&cfg); assert_eq!(outcome.skills.len(), 0); assert_eq!(outcome.errors.len(), 1); assert!( outcome.errors[0] .message .contains("missing YAML frontmatter"), "expected frontmatter error" ); } #[tokio::test] async fn enforces_length_limits() { let codex_home = tempfile::tempdir().expect("tempdir"); let max_desc = "\u{1F4A1}".repeat(MAX_DESCRIPTION_LEN); write_skill(&codex_home, "max-len", "max-len", &max_desc); let cfg = make_config(&codex_home).await; let outcome = load_skills(&cfg); assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", outcome.errors ); assert_eq!(outcome.skills.len(), 1); let too_long_desc = "\u{1F4A1}".repeat(MAX_DESCRIPTION_LEN + 1); write_skill(&codex_home, "too-long", "too-long", &too_long_desc); let outcome = load_skills(&cfg); assert_eq!(outcome.skills.len(), 1); assert_eq!(outcome.errors.len(), 1); assert!( outcome.errors[0].message.contains("invalid description"), "expected length error" ); } #[tokio::test] async fn loads_skills_from_repo_root() { let codex_home = tempfile::tempdir().expect("tempdir"); let repo_dir = tempfile::tempdir().expect("tempdir"); let status = Command::new("git") .arg("init") .current_dir(repo_dir.path()) .status() .expect("git init"); assert!(status.success(), "git init failed"); let skills_root = repo_dir .path() .join(REPO_ROOT_CONFIG_DIR_NAME) .join(SKILLS_DIR_NAME); write_skill_at(&skills_root, "repo", "repo-skill", "from repo"); let mut cfg = make_config(&codex_home).await; cfg.cwd = repo_dir.path().to_path_buf(); let repo_root = normalize_path(&skills_root).unwrap_or_else(|_| skills_root.clone()); let outcome = load_skills(&cfg); assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", outcome.errors ); assert_eq!(outcome.skills.len(), 1); let skill = &outcome.skills[0]; assert_eq!(skill.name, "repo-skill"); assert!(skill.path.starts_with(&repo_root)); } #[tokio::test] async fn loads_skills_from_nearest_codex_dir_under_repo_root() { let codex_home = tempfile::tempdir().expect("tempdir"); let repo_dir = tempfile::tempdir().expect("tempdir"); let status = Command::new("git") .arg("init") .current_dir(repo_dir.path()) .status() .expect("git init"); assert!(status.success(), "git init failed"); let nested_dir = repo_dir.path().join("nested/inner"); fs::create_dir_all(&nested_dir).unwrap(); write_skill_at( &repo_dir .path() .join(REPO_ROOT_CONFIG_DIR_NAME) .join(SKILLS_DIR_NAME), "root", "root-skill", "from root", ); write_skill_at( &repo_dir .path() .join("nested") .join(REPO_ROOT_CONFIG_DIR_NAME) .join(SKILLS_DIR_NAME), "nested", "nested-skill", "from nested", ); let mut cfg = make_config(&codex_home).await; cfg.cwd = nested_dir; let outcome = load_skills(&cfg); assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", outcome.errors ); assert_eq!(outcome.skills.len(), 1); assert_eq!(outcome.skills[0].name, "nested-skill"); } #[tokio::test] async fn loads_skills_from_codex_dir_when_not_git_repo() { let codex_home = tempfile::tempdir().expect("tempdir"); let work_dir = tempfile::tempdir().expect("tempdir"); write_skill_at( &work_dir .path() .join(REPO_ROOT_CONFIG_DIR_NAME) .join(SKILLS_DIR_NAME), "local", "local-skill", "from cwd", ); let mut cfg = make_config(&codex_home).await; cfg.cwd = work_dir.path().to_path_buf(); let outcome = load_skills(&cfg); assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", outcome.errors ); assert_eq!(outcome.skills.len(), 1); assert_eq!(outcome.skills[0].name, "local-skill"); assert_eq!(outcome.skills[0].scope, SkillScope::Repo); } #[tokio::test] async fn deduplicates_by_name_preferring_repo_over_user() { let codex_home = tempfile::tempdir().expect("tempdir"); let repo_dir = tempfile::tempdir().expect("tempdir"); let status = Command::new("git") .arg("init") .current_dir(repo_dir.path()) .status() .expect("git init"); assert!(status.success(), "git init failed"); write_skill(&codex_home, "user", "dupe-skill", "from user"); write_skill_at( &repo_dir .path() .join(REPO_ROOT_CONFIG_DIR_NAME) .join(SKILLS_DIR_NAME), "repo", "dupe-skill", "from repo", ); let mut cfg = make_config(&codex_home).await; cfg.cwd = repo_dir.path().to_path_buf(); let outcome = load_skills(&cfg); assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", outcome.errors ); assert_eq!(outcome.skills.len(), 1); assert_eq!(outcome.skills[0].name, "dupe-skill"); assert_eq!(outcome.skills[0].scope, SkillScope::Repo); } #[tokio::test] async fn loads_system_skills_when_present() { let codex_home = tempfile::tempdir().expect("tempdir"); write_system_skill(&codex_home, "system", "dupe-skill", "from system"); write_skill(&codex_home, "user", "dupe-skill", "from user"); let cfg = make_config(&codex_home).await; let outcome = load_skills(&cfg); assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", outcome.errors ); assert_eq!(outcome.skills.len(), 1); assert_eq!(outcome.skills[0].description, "from user"); assert_eq!(outcome.skills[0].scope, SkillScope::User); } #[tokio::test] async fn repo_skills_search_does_not_escape_repo_root() { let codex_home = tempfile::tempdir().expect("tempdir"); let outer_dir = tempfile::tempdir().expect("tempdir"); let repo_dir = outer_dir.path().join("repo"); fs::create_dir_all(&repo_dir).unwrap(); write_skill_at( &outer_dir .path() .join(REPO_ROOT_CONFIG_DIR_NAME) .join(SKILLS_DIR_NAME), "outer", "outer-skill", "from outer", ); let status = Command::new("git") .arg("init") .current_dir(&repo_dir) .status() .expect("git init"); assert!(status.success(), "git init failed"); let mut cfg = make_config(&codex_home).await; cfg.cwd = repo_dir; let outcome = load_skills(&cfg); assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", outcome.errors ); assert_eq!(outcome.skills.len(), 0); } #[tokio::test] async fn loads_skills_when_cwd_is_file_in_repo() { let codex_home = tempfile::tempdir().expect("tempdir"); let repo_dir = tempfile::tempdir().expect("tempdir"); let status = Command::new("git") .arg("init") .current_dir(repo_dir.path()) .status() .expect("git init"); assert!(status.success(), "git init failed"); write_skill_at( &repo_dir .path() .join(REPO_ROOT_CONFIG_DIR_NAME) .join(SKILLS_DIR_NAME), "repo", "repo-skill", "from repo", ); let file_path = repo_dir.path().join("some-file.txt"); fs::write(&file_path, "contents").unwrap(); let mut cfg = make_config(&codex_home).await; cfg.cwd = file_path; let outcome = load_skills(&cfg); assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", outcome.errors ); assert_eq!(outcome.skills.len(), 1); assert_eq!(outcome.skills[0].name, "repo-skill"); assert_eq!(outcome.skills[0].scope, SkillScope::Repo); } #[tokio::test] async fn non_git_repo_skills_search_does_not_walk_parents() { let codex_home = tempfile::tempdir().expect("tempdir"); let outer_dir = tempfile::tempdir().expect("tempdir"); let nested_dir = outer_dir.path().join("nested/inner"); fs::create_dir_all(&nested_dir).unwrap(); write_skill_at( &outer_dir .path() .join(REPO_ROOT_CONFIG_DIR_NAME) .join(SKILLS_DIR_NAME), "outer", "outer-skill", "from outer", ); let mut cfg = make_config(&codex_home).await; cfg.cwd = nested_dir; let outcome = load_skills(&cfg); assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", outcome.errors ); assert_eq!(outcome.skills.len(), 0); } #[tokio::test] async fn loads_skills_from_system_cache_when_present() { let codex_home = tempfile::tempdir().expect("tempdir"); let work_dir = tempfile::tempdir().expect("tempdir"); write_system_skill(&codex_home, "system", "system-skill", "from system"); let mut cfg = make_config(&codex_home).await; cfg.cwd = work_dir.path().to_path_buf(); let outcome = load_skills(&cfg); assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", outcome.errors ); assert_eq!(outcome.skills.len(), 1); assert_eq!(outcome.skills[0].name, "system-skill"); assert_eq!(outcome.skills[0].scope, SkillScope::System); } #[tokio::test] async fn skill_roots_include_admin_with_lowest_priority_on_unix() { let codex_home = tempfile::tempdir().expect("tempdir"); let cfg = make_config(&codex_home).await; let scopes: Vec<SkillScope> = skill_roots(&cfg) .into_iter() .map(|root| root.scope) .collect(); let mut expected = vec![SkillScope::User, SkillScope::System]; if cfg!(unix) { expected.push(SkillScope::Admin); } assert_eq!(scopes, expected); } #[tokio::test] async fn deduplicates_by_name_preferring_system_over_admin() { let system_dir = tempfile::tempdir().expect("tempdir"); let admin_dir = tempfile::tempdir().expect("tempdir"); write_skill_at(system_dir.path(), "system", "dupe-skill", "from system"); write_skill_at(admin_dir.path(), "admin", "dupe-skill", "from admin"); let outcome = load_skills_from_roots([ SkillRoot { path: system_dir.path().to_path_buf(), scope: SkillScope::System, }, SkillRoot { path: admin_dir.path().to_path_buf(), scope: SkillScope::Admin, }, ]); assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", outcome.errors ); assert_eq!(outcome.skills.len(), 1); assert_eq!(outcome.skills[0].name, "dupe-skill"); assert_eq!(outcome.skills[0].scope, SkillScope::System); } #[tokio::test] async fn deduplicates_by_name_preferring_user_over_system() { let codex_home = tempfile::tempdir().expect("tempdir"); let work_dir = tempfile::tempdir().expect("tempdir"); write_skill(&codex_home, "user", "dupe-skill", "from user"); write_system_skill(&codex_home, "system", "dupe-skill", "from system"); let mut cfg = make_config(&codex_home).await; cfg.cwd = work_dir.path().to_path_buf(); let outcome = load_skills(&cfg); assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", outcome.errors ); assert_eq!(outcome.skills.len(), 1); assert_eq!(outcome.skills[0].name, "dupe-skill"); assert_eq!(outcome.skills[0].scope, SkillScope::User); } #[tokio::test] async fn deduplicates_by_name_preferring_repo_over_system() { let codex_home = tempfile::tempdir().expect("tempdir"); let repo_dir = tempfile::tempdir().expect("tempdir"); let status = Command::new("git") .arg("init") .current_dir(repo_dir.path()) .status() .expect("git init"); assert!(status.success(), "git init failed"); write_skill_at( &repo_dir .path() .join(REPO_ROOT_CONFIG_DIR_NAME) .join(SKILLS_DIR_NAME), "repo", "dupe-skill", "from repo", ); write_system_skill(&codex_home, "system", "dupe-skill", "from system"); let mut cfg = make_config(&codex_home).await; cfg.cwd = repo_dir.path().to_path_buf(); let outcome = load_skills(&cfg); assert!( outcome.errors.is_empty(), "unexpected errors: {:?}", outcome.errors ); assert_eq!(outcome.skills.len(), 1); assert_eq!(outcome.skills[0].name, "dupe-skill"); assert_eq!(outcome.skills[0].scope, SkillScope::Repo); } }
rust
Apache-2.0
279283fe02bf0ce7f93a160db34dd8cf9c8f42c8
2026-01-04T15:31:59.292600Z
false