repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/execpolicy/src/error.rs | codex-rs/execpolicy/src/error.rs | use starlark::Error as StarlarkError;
use thiserror::Error;
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Error)]
pub enum Error {
#[error("invalid decision: {0}")]
InvalidDecision(String),
#[error("invalid pattern element: {0}")]
InvalidPattern(String),
#[error("invalid example: {0}")]
InvalidExample(String),
#[error(
"expected every example to match at least one rule. rules: {rules:?}; unmatched examples: \
{examples:?}"
)]
ExampleDidNotMatch {
rules: Vec<String>,
examples: Vec<String>,
},
#[error("expected example to not match rule `{rule}`: {example}")]
ExampleDidMatch { rule: String, example: String },
#[error("starlark error: {0}")]
Starlark(StarlarkError),
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/execpolicy/src/amend.rs | codex-rs/execpolicy/src/amend.rs | use std::fs::OpenOptions;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use serde_json;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum AmendError {
#[error("prefix rule requires at least one token")]
EmptyPrefix,
#[error("policy path has no parent: {path}")]
MissingParent { path: PathBuf },
#[error("failed to create policy directory {dir}: {source}")]
CreatePolicyDir {
dir: PathBuf,
source: std::io::Error,
},
#[error("failed to format prefix tokens: {source}")]
SerializePrefix { source: serde_json::Error },
#[error("failed to open policy file {path}: {source}")]
OpenPolicyFile {
path: PathBuf,
source: std::io::Error,
},
#[error("failed to write to policy file {path}: {source}")]
WritePolicyFile {
path: PathBuf,
source: std::io::Error,
},
#[error("failed to lock policy file {path}: {source}")]
LockPolicyFile {
path: PathBuf,
source: std::io::Error,
},
#[error("failed to seek policy file {path}: {source}")]
SeekPolicyFile {
path: PathBuf,
source: std::io::Error,
},
#[error("failed to read policy file {path}: {source}")]
ReadPolicyFile {
path: PathBuf,
source: std::io::Error,
},
#[error("failed to read metadata for policy file {path}: {source}")]
PolicyMetadata {
path: PathBuf,
source: std::io::Error,
},
}
/// Note this thread uses advisory file locking and performs blocking I/O, so it should be used with
/// [`tokio::task::spawn_blocking`] when called from an async context.
pub fn blocking_append_allow_prefix_rule(
policy_path: &Path,
prefix: &[String],
) -> Result<(), AmendError> {
if prefix.is_empty() {
return Err(AmendError::EmptyPrefix);
}
let tokens = prefix
.iter()
.map(serde_json::to_string)
.collect::<Result<Vec<_>, _>>()
.map_err(|source| AmendError::SerializePrefix { source })?;
let pattern = format!("[{}]", tokens.join(", "));
let rule = format!(r#"prefix_rule(pattern={pattern}, decision="allow")"#);
let dir = policy_path
.parent()
.ok_or_else(|| AmendError::MissingParent {
path: policy_path.to_path_buf(),
})?;
match std::fs::create_dir(dir) {
Ok(()) => {}
Err(ref source) if source.kind() == std::io::ErrorKind::AlreadyExists => {}
Err(source) => {
return Err(AmendError::CreatePolicyDir {
dir: dir.to_path_buf(),
source,
});
}
}
append_locked_line(policy_path, &rule)
}
fn append_locked_line(policy_path: &Path, line: &str) -> Result<(), AmendError> {
let mut file = OpenOptions::new()
.create(true)
.read(true)
.append(true)
.open(policy_path)
.map_err(|source| AmendError::OpenPolicyFile {
path: policy_path.to_path_buf(),
source,
})?;
file.lock().map_err(|source| AmendError::LockPolicyFile {
path: policy_path.to_path_buf(),
source,
})?;
let len = file
.metadata()
.map_err(|source| AmendError::PolicyMetadata {
path: policy_path.to_path_buf(),
source,
})?
.len();
// Ensure file ends in a newline before appending.
if len > 0 {
file.seek(SeekFrom::End(-1))
.map_err(|source| AmendError::SeekPolicyFile {
path: policy_path.to_path_buf(),
source,
})?;
let mut last = [0; 1];
file.read_exact(&mut last)
.map_err(|source| AmendError::ReadPolicyFile {
path: policy_path.to_path_buf(),
source,
})?;
if last[0] != b'\n' {
file.write_all(b"\n")
.map_err(|source| AmendError::WritePolicyFile {
path: policy_path.to_path_buf(),
source,
})?;
}
}
file.write_all(format!("{line}\n").as_bytes())
.map_err(|source| AmendError::WritePolicyFile {
path: policy_path.to_path_buf(),
source,
})?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use tempfile::tempdir;
#[test]
fn appends_rule_and_creates_directories() {
let tmp = tempdir().expect("create temp dir");
let policy_path = tmp.path().join("rules").join("default.rules");
blocking_append_allow_prefix_rule(
&policy_path,
&[String::from("echo"), String::from("Hello, world!")],
)
.expect("append rule");
let contents = std::fs::read_to_string(&policy_path).expect("default.rules should exist");
assert_eq!(
contents,
r#"prefix_rule(pattern=["echo", "Hello, world!"], decision="allow")
"#
);
}
#[test]
fn appends_rule_without_duplicate_newline() {
let tmp = tempdir().expect("create temp dir");
let policy_path = tmp.path().join("rules").join("default.rules");
std::fs::create_dir_all(policy_path.parent().unwrap()).expect("create policy dir");
std::fs::write(
&policy_path,
r#"prefix_rule(pattern=["ls"], decision="allow")
"#,
)
.expect("write seed rule");
blocking_append_allow_prefix_rule(
&policy_path,
&[String::from("echo"), String::from("Hello, world!")],
)
.expect("append rule");
let contents = std::fs::read_to_string(&policy_path).expect("read policy");
assert_eq!(
contents,
r#"prefix_rule(pattern=["ls"], decision="allow")
prefix_rule(pattern=["echo", "Hello, world!"], decision="allow")
"#
);
}
#[test]
fn inserts_newline_when_missing_before_append() {
let tmp = tempdir().expect("create temp dir");
let policy_path = tmp.path().join("rules").join("default.rules");
std::fs::create_dir_all(policy_path.parent().unwrap()).expect("create policy dir");
std::fs::write(
&policy_path,
r#"prefix_rule(pattern=["ls"], decision="allow")"#,
)
.expect("write seed rule without newline");
blocking_append_allow_prefix_rule(
&policy_path,
&[String::from("echo"), String::from("Hello, world!")],
)
.expect("append rule");
let contents = std::fs::read_to_string(&policy_path).expect("read policy");
assert_eq!(
contents,
r#"prefix_rule(pattern=["ls"], decision="allow")
prefix_rule(pattern=["echo", "Hello, world!"], decision="allow")
"#
);
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/execpolicy/src/main.rs | codex-rs/execpolicy/src/main.rs | use anyhow::Result;
use clap::Parser;
use codex_execpolicy::execpolicycheck::ExecPolicyCheckCommand;
/// CLI for evaluating exec policies
#[derive(Parser)]
#[command(name = "codex-execpolicy")]
enum Cli {
/// Evaluate a command against a policy.
Check(ExecPolicyCheckCommand),
}
fn main() -> Result<()> {
let cli = Cli::parse();
match cli {
Cli::Check(cmd) => cmd.run(),
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/execpolicy/src/rule.rs | codex-rs/execpolicy/src/rule.rs | use crate::decision::Decision;
use crate::error::Error;
use crate::error::Result;
use serde::Deserialize;
use serde::Serialize;
use shlex::try_join;
use std::any::Any;
use std::fmt::Debug;
use std::sync::Arc;
/// Matches a single command token, either a fixed string or one of several allowed alternatives.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum PatternToken {
Single(String),
Alts(Vec<String>),
}
impl PatternToken {
fn matches(&self, token: &str) -> bool {
match self {
Self::Single(expected) => expected == token,
Self::Alts(alternatives) => alternatives.iter().any(|alt| alt == token),
}
}
pub fn alternatives(&self) -> &[String] {
match self {
Self::Single(expected) => std::slice::from_ref(expected),
Self::Alts(alternatives) => alternatives,
}
}
}
/// Prefix matcher for commands with support for alternative match tokens.
/// First token is fixed since we key by the first token in policy.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct PrefixPattern {
pub first: Arc<str>,
pub rest: Arc<[PatternToken]>,
}
impl PrefixPattern {
pub fn matches_prefix(&self, cmd: &[String]) -> Option<Vec<String>> {
let pattern_length = self.rest.len() + 1;
if cmd.len() < pattern_length || cmd[0] != self.first.as_ref() {
return None;
}
for (pattern_token, cmd_token) in self.rest.iter().zip(&cmd[1..pattern_length]) {
if !pattern_token.matches(cmd_token) {
return None;
}
}
Some(cmd[..pattern_length].to_vec())
}
}
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum RuleMatch {
PrefixRuleMatch {
#[serde(rename = "matchedPrefix")]
matched_prefix: Vec<String>,
decision: Decision,
},
HeuristicsRuleMatch {
command: Vec<String>,
decision: Decision,
},
}
impl RuleMatch {
pub fn decision(&self) -> Decision {
match self {
Self::PrefixRuleMatch { decision, .. } => *decision,
Self::HeuristicsRuleMatch { decision, .. } => *decision,
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct PrefixRule {
pub pattern: PrefixPattern,
pub decision: Decision,
}
pub trait Rule: Any + Debug + Send + Sync {
fn program(&self) -> &str;
fn matches(&self, cmd: &[String]) -> Option<RuleMatch>;
}
pub type RuleRef = Arc<dyn Rule>;
impl Rule for PrefixRule {
fn program(&self) -> &str {
self.pattern.first.as_ref()
}
fn matches(&self, cmd: &[String]) -> Option<RuleMatch> {
self.pattern
.matches_prefix(cmd)
.map(|matched_prefix| RuleMatch::PrefixRuleMatch {
matched_prefix,
decision: self.decision,
})
}
}
/// Count how many rules match each provided example and error if any example is unmatched.
pub(crate) fn validate_match_examples(rules: &[RuleRef], matches: &[Vec<String>]) -> Result<()> {
let mut unmatched_examples = Vec::new();
for example in matches {
if rules.iter().any(|rule| rule.matches(example).is_some()) {
continue;
}
unmatched_examples.push(
try_join(example.iter().map(String::as_str))
.unwrap_or_else(|_| "unable to render example".to_string()),
);
}
if unmatched_examples.is_empty() {
Ok(())
} else {
Err(Error::ExampleDidNotMatch {
rules: rules.iter().map(|rule| format!("{rule:?}")).collect(),
examples: unmatched_examples,
})
}
}
/// Ensure that no rule matches any provided negative example.
pub(crate) fn validate_not_match_examples(
rules: &[RuleRef],
not_matches: &[Vec<String>],
) -> Result<()> {
for example in not_matches {
if let Some(rule) = rules.iter().find(|rule| rule.matches(example).is_some()) {
return Err(Error::ExampleDidMatch {
rule: format!("{rule:?}"),
example: try_join(example.iter().map(String::as_str))
.unwrap_or_else(|_| "unable to render example".to_string()),
});
}
}
Ok(())
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/execpolicy/src/execpolicycheck.rs | codex-rs/execpolicy/src/execpolicycheck.rs | use std::fs;
use std::path::PathBuf;
use anyhow::Context;
use anyhow::Result;
use clap::Parser;
use serde::Serialize;
use crate::Decision;
use crate::Policy;
use crate::PolicyParser;
use crate::RuleMatch;
/// Arguments for evaluating a command against one or more execpolicy files.
#[derive(Debug, Parser, Clone)]
pub struct ExecPolicyCheckCommand {
/// Paths to execpolicy rule files to evaluate (repeatable).
#[arg(short = 'r', long = "rules", value_name = "PATH", required = true)]
pub rules: Vec<PathBuf>,
/// Pretty-print the JSON output.
#[arg(long)]
pub pretty: bool,
/// Command tokens to check against the policy.
#[arg(
value_name = "COMMAND",
required = true,
trailing_var_arg = true,
allow_hyphen_values = true
)]
pub command: Vec<String>,
}
impl ExecPolicyCheckCommand {
/// Load the policies for this command, evaluate the command, and render JSON output.
pub fn run(&self) -> Result<()> {
let policy = load_policies(&self.rules)?;
let matched_rules = policy.matches_for_command(&self.command, None);
let json = format_matches_json(&matched_rules, self.pretty)?;
println!("{json}");
Ok(())
}
}
pub fn format_matches_json(matched_rules: &[RuleMatch], pretty: bool) -> Result<String> {
let output = ExecPolicyCheckOutput {
matched_rules,
decision: matched_rules.iter().map(RuleMatch::decision).max(),
};
if pretty {
serde_json::to_string_pretty(&output).map_err(Into::into)
} else {
serde_json::to_string(&output).map_err(Into::into)
}
}
pub fn load_policies(policy_paths: &[PathBuf]) -> Result<Policy> {
let mut parser = PolicyParser::new();
for policy_path in policy_paths {
let policy_file_contents = fs::read_to_string(policy_path)
.with_context(|| format!("failed to read policy at {}", policy_path.display()))?;
let policy_identifier = policy_path.to_string_lossy().to_string();
parser
.parse(&policy_identifier, &policy_file_contents)
.with_context(|| format!("failed to parse policy at {}", policy_path.display()))?;
}
Ok(parser.build())
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct ExecPolicyCheckOutput<'a> {
#[serde(rename = "matchedRules")]
matched_rules: &'a [RuleMatch],
#[serde(skip_serializing_if = "Option::is_none")]
decision: Option<Decision>,
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/execpolicy/tests/basic.rs | codex-rs/execpolicy/tests/basic.rs | use std::any::Any;
use std::sync::Arc;
use anyhow::Context;
use anyhow::Result;
use codex_execpolicy::Decision;
use codex_execpolicy::Error;
use codex_execpolicy::Evaluation;
use codex_execpolicy::Policy;
use codex_execpolicy::PolicyParser;
use codex_execpolicy::RuleMatch;
use codex_execpolicy::RuleRef;
use codex_execpolicy::rule::PatternToken;
use codex_execpolicy::rule::PrefixPattern;
use codex_execpolicy::rule::PrefixRule;
use pretty_assertions::assert_eq;
fn tokens(cmd: &[&str]) -> Vec<String> {
cmd.iter().map(std::string::ToString::to_string).collect()
}
fn allow_all(_: &[String]) -> Decision {
Decision::Allow
}
fn prompt_all(_: &[String]) -> Decision {
Decision::Prompt
}
#[derive(Clone, Debug, Eq, PartialEq)]
enum RuleSnapshot {
Prefix(PrefixRule),
}
fn rule_snapshots(rules: &[RuleRef]) -> Vec<RuleSnapshot> {
rules
.iter()
.map(|rule| {
let rule_any = rule.as_ref() as &dyn Any;
if let Some(prefix_rule) = rule_any.downcast_ref::<PrefixRule>() {
RuleSnapshot::Prefix(prefix_rule.clone())
} else {
panic!("unexpected rule type in RuleRef: {rule:?}");
}
})
.collect()
}
#[test]
fn basic_match() -> Result<()> {
let policy_src = r#"
prefix_rule(
pattern = ["git", "status"],
)
"#;
let mut parser = PolicyParser::new();
parser.parse("test.rules", policy_src)?;
let policy = parser.build();
let cmd = tokens(&["git", "status"]);
let evaluation = policy.check(&cmd, &allow_all);
assert_eq!(
Evaluation {
decision: Decision::Allow,
matched_rules: vec![RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["git", "status"]),
decision: Decision::Allow,
}],
},
evaluation
);
Ok(())
}
#[test]
fn add_prefix_rule_extends_policy() -> Result<()> {
let mut policy = Policy::empty();
policy.add_prefix_rule(&tokens(&["ls", "-l"]), Decision::Prompt)?;
let rules = rule_snapshots(policy.rules().get_vec("ls").context("missing ls rules")?);
assert_eq!(
vec![RuleSnapshot::Prefix(PrefixRule {
pattern: PrefixPattern {
first: Arc::from("ls"),
rest: vec![PatternToken::Single(String::from("-l"))].into(),
},
decision: Decision::Prompt,
})],
rules
);
let evaluation = policy.check(&tokens(&["ls", "-l", "/tmp"]), &allow_all);
assert_eq!(
Evaluation {
decision: Decision::Prompt,
matched_rules: vec![RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["ls", "-l"]),
decision: Decision::Prompt,
}],
},
evaluation
);
Ok(())
}
#[test]
fn add_prefix_rule_rejects_empty_prefix() -> Result<()> {
let mut policy = Policy::empty();
let result = policy.add_prefix_rule(&[], Decision::Allow);
match result.unwrap_err() {
Error::InvalidPattern(message) => assert_eq!(message, "prefix cannot be empty"),
other => panic!("expected InvalidPattern(..), got {other:?}"),
}
Ok(())
}
#[test]
fn parses_multiple_policy_files() -> Result<()> {
let first_policy = r#"
prefix_rule(
pattern = ["git"],
decision = "prompt",
)
"#;
let second_policy = r#"
prefix_rule(
pattern = ["git", "commit"],
decision = "forbidden",
)
"#;
let mut parser = PolicyParser::new();
parser.parse("first.rules", first_policy)?;
parser.parse("second.rules", second_policy)?;
let policy = parser.build();
let git_rules = rule_snapshots(policy.rules().get_vec("git").context("missing git rules")?);
assert_eq!(
vec![
RuleSnapshot::Prefix(PrefixRule {
pattern: PrefixPattern {
first: Arc::from("git"),
rest: Vec::<PatternToken>::new().into(),
},
decision: Decision::Prompt,
}),
RuleSnapshot::Prefix(PrefixRule {
pattern: PrefixPattern {
first: Arc::from("git"),
rest: vec![PatternToken::Single("commit".to_string())].into(),
},
decision: Decision::Forbidden,
}),
],
git_rules
);
let status_eval = policy.check(&tokens(&["git", "status"]), &allow_all);
assert_eq!(
Evaluation {
decision: Decision::Prompt,
matched_rules: vec![RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["git"]),
decision: Decision::Prompt,
}],
},
status_eval
);
let commit_eval = policy.check(&tokens(&["git", "commit", "-m", "hi"]), &allow_all);
assert_eq!(
Evaluation {
decision: Decision::Forbidden,
matched_rules: vec![
RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["git"]),
decision: Decision::Prompt,
},
RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["git", "commit"]),
decision: Decision::Forbidden,
},
],
},
commit_eval
);
Ok(())
}
#[test]
fn only_first_token_alias_expands_to_multiple_rules() -> Result<()> {
let policy_src = r#"
prefix_rule(
pattern = [["bash", "sh"], ["-c", "-l"]],
)
"#;
let mut parser = PolicyParser::new();
parser.parse("test.rules", policy_src)?;
let policy = parser.build();
let bash_rules = rule_snapshots(
policy
.rules()
.get_vec("bash")
.context("missing bash rules")?,
);
let sh_rules = rule_snapshots(policy.rules().get_vec("sh").context("missing sh rules")?);
assert_eq!(
vec![RuleSnapshot::Prefix(PrefixRule {
pattern: PrefixPattern {
first: Arc::from("bash"),
rest: vec![PatternToken::Alts(vec!["-c".to_string(), "-l".to_string()])].into(),
},
decision: Decision::Allow,
})],
bash_rules
);
assert_eq!(
vec![RuleSnapshot::Prefix(PrefixRule {
pattern: PrefixPattern {
first: Arc::from("sh"),
rest: vec![PatternToken::Alts(vec!["-c".to_string(), "-l".to_string()])].into(),
},
decision: Decision::Allow,
})],
sh_rules
);
let bash_eval = policy.check(&tokens(&["bash", "-c", "echo", "hi"]), &allow_all);
assert_eq!(
Evaluation {
decision: Decision::Allow,
matched_rules: vec![RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["bash", "-c"]),
decision: Decision::Allow,
}],
},
bash_eval
);
let sh_eval = policy.check(&tokens(&["sh", "-l", "echo", "hi"]), &allow_all);
assert_eq!(
Evaluation {
decision: Decision::Allow,
matched_rules: vec![RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["sh", "-l"]),
decision: Decision::Allow,
}],
},
sh_eval
);
Ok(())
}
#[test]
fn tail_aliases_are_not_cartesian_expanded() -> Result<()> {
let policy_src = r#"
prefix_rule(
pattern = ["npm", ["i", "install"], ["--legacy-peer-deps", "--no-save"]],
)
"#;
let mut parser = PolicyParser::new();
parser.parse("test.rules", policy_src)?;
let policy = parser.build();
let rules = rule_snapshots(policy.rules().get_vec("npm").context("missing npm rules")?);
assert_eq!(
vec![RuleSnapshot::Prefix(PrefixRule {
pattern: PrefixPattern {
first: Arc::from("npm"),
rest: vec![
PatternToken::Alts(vec!["i".to_string(), "install".to_string()]),
PatternToken::Alts(vec![
"--legacy-peer-deps".to_string(),
"--no-save".to_string(),
]),
]
.into(),
},
decision: Decision::Allow,
})],
rules
);
let npm_i = policy.check(&tokens(&["npm", "i", "--legacy-peer-deps"]), &allow_all);
assert_eq!(
Evaluation {
decision: Decision::Allow,
matched_rules: vec![RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["npm", "i", "--legacy-peer-deps"]),
decision: Decision::Allow,
}],
},
npm_i
);
let npm_install = policy.check(
&tokens(&["npm", "install", "--no-save", "leftpad"]),
&allow_all,
);
assert_eq!(
Evaluation {
decision: Decision::Allow,
matched_rules: vec![RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["npm", "install", "--no-save"]),
decision: Decision::Allow,
}],
},
npm_install
);
Ok(())
}
#[test]
fn match_and_not_match_examples_are_enforced() -> Result<()> {
let policy_src = r#"
prefix_rule(
pattern = ["git", "status"],
match = [["git", "status"], "git status"],
not_match = [
["git", "--config", "color.status=always", "status"],
"git --config color.status=always status",
],
)
"#;
let mut parser = PolicyParser::new();
parser.parse("test.rules", policy_src)?;
let policy = parser.build();
let match_eval = policy.check(&tokens(&["git", "status"]), &allow_all);
assert_eq!(
Evaluation {
decision: Decision::Allow,
matched_rules: vec![RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["git", "status"]),
decision: Decision::Allow,
}],
},
match_eval
);
let no_match_eval = policy.check(
&tokens(&["git", "--config", "color.status=always", "status"]),
&allow_all,
);
assert_eq!(
Evaluation {
decision: Decision::Allow,
matched_rules: vec![RuleMatch::HeuristicsRuleMatch {
command: tokens(&["git", "--config", "color.status=always", "status",]),
decision: Decision::Allow,
}],
},
no_match_eval
);
Ok(())
}
#[test]
fn strictest_decision_wins_across_matches() -> Result<()> {
let policy_src = r#"
prefix_rule(
pattern = ["git"],
decision = "prompt",
)
prefix_rule(
pattern = ["git", "commit"],
decision = "forbidden",
)
"#;
let mut parser = PolicyParser::new();
parser.parse("test.rules", policy_src)?;
let policy = parser.build();
let commit = policy.check(&tokens(&["git", "commit", "-m", "hi"]), &allow_all);
assert_eq!(
Evaluation {
decision: Decision::Forbidden,
matched_rules: vec![
RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["git"]),
decision: Decision::Prompt,
},
RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["git", "commit"]),
decision: Decision::Forbidden,
},
],
},
commit
);
Ok(())
}
#[test]
fn strictest_decision_across_multiple_commands() -> Result<()> {
let policy_src = r#"
prefix_rule(
pattern = ["git"],
decision = "prompt",
)
prefix_rule(
pattern = ["git", "commit"],
decision = "forbidden",
)
"#;
let mut parser = PolicyParser::new();
parser.parse("test.rules", policy_src)?;
let policy = parser.build();
let commands = vec![
tokens(&["git", "status"]),
tokens(&["git", "commit", "-m", "hi"]),
];
let evaluation = policy.check_multiple(&commands, &allow_all);
assert_eq!(
Evaluation {
decision: Decision::Forbidden,
matched_rules: vec![
RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["git"]),
decision: Decision::Prompt,
},
RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["git"]),
decision: Decision::Prompt,
},
RuleMatch::PrefixRuleMatch {
matched_prefix: tokens(&["git", "commit"]),
decision: Decision::Forbidden,
},
],
},
evaluation
);
Ok(())
}
#[test]
fn heuristics_match_is_returned_when_no_policy_matches() {
let policy = Policy::empty();
let command = tokens(&["python"]);
let evaluation = policy.check(&command, &prompt_all);
assert_eq!(
Evaluation {
decision: Decision::Prompt,
matched_rules: vec![RuleMatch::HeuristicsRuleMatch {
command,
decision: Decision::Prompt,
}],
},
evaluation
);
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/app-server-test-client/src/main.rs | codex-rs/app-server-test-client/src/main.rs | use std::collections::VecDeque;
use std::io::BufRead;
use std::io::BufReader;
use std::io::Write;
use std::process::Child;
use std::process::ChildStdin;
use std::process::ChildStdout;
use std::process::Command;
use std::process::Stdio;
use std::thread;
use std::time::Duration;
use anyhow::Context;
use anyhow::Result;
use anyhow::bail;
use clap::Parser;
use clap::Subcommand;
use codex_app_server_protocol::AddConversationListenerParams;
use codex_app_server_protocol::AddConversationSubscriptionResponse;
use codex_app_server_protocol::ApprovalDecision;
use codex_app_server_protocol::AskForApproval;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::ClientRequest;
use codex_app_server_protocol::CommandExecutionRequestApprovalParams;
use codex_app_server_protocol::CommandExecutionRequestApprovalResponse;
use codex_app_server_protocol::FileChangeRequestApprovalParams;
use codex_app_server_protocol::FileChangeRequestApprovalResponse;
use codex_app_server_protocol::GetAccountRateLimitsResponse;
use codex_app_server_protocol::InitializeParams;
use codex_app_server_protocol::InitializeResponse;
use codex_app_server_protocol::InputItem;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCRequest;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::LoginChatGptCompleteNotification;
use codex_app_server_protocol::LoginChatGptResponse;
use codex_app_server_protocol::NewConversationParams;
use codex_app_server_protocol::NewConversationResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::SandboxPolicy;
use codex_app_server_protocol::SendUserMessageParams;
use codex_app_server_protocol::SendUserMessageResponse;
use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_protocol::ConversationId;
use codex_protocol::protocol::Event;
use codex_protocol::protocol::EventMsg;
use serde::Serialize;
use serde::de::DeserializeOwned;
use serde_json::Value;
use uuid::Uuid;
/// Minimal launcher that initializes the Codex app-server and logs the handshake.
#[derive(Parser)]
#[command(author = "Codex", version, about = "Bootstrap Codex app-server", long_about = None)]
struct Cli {
/// Path to the `codex` CLI binary.
#[arg(long, env = "CODEX_BIN", default_value = "codex")]
codex_bin: String,
#[command(subcommand)]
command: CliCommand,
}
#[derive(Subcommand)]
enum CliCommand {
/// Send a user message through the Codex app-server.
SendMessage {
/// User message to send to Codex.
#[arg()]
user_message: String,
},
/// Send a user message through the app-server V2 thread/turn APIs.
SendMessageV2 {
/// User message to send to Codex.
#[arg()]
user_message: String,
},
/// Start a V2 turn that elicits an ExecCommand approval.
#[command(name = "trigger-cmd-approval")]
TriggerCmdApproval {
/// Optional prompt; defaults to a simple python command.
#[arg()]
user_message: Option<String>,
},
/// Start a V2 turn that elicits an ApplyPatch approval.
#[command(name = "trigger-patch-approval")]
TriggerPatchApproval {
/// Optional prompt; defaults to creating a file via apply_patch.
#[arg()]
user_message: Option<String>,
},
/// Start a V2 turn that should not elicit an ExecCommand approval.
#[command(name = "no-trigger-cmd-approval")]
NoTriggerCmdApproval,
/// Send two sequential V2 turns in the same thread to test follow-up behavior.
SendFollowUpV2 {
/// Initial user message for the first turn.
#[arg()]
first_message: String,
/// Follow-up user message for the second turn.
#[arg()]
follow_up_message: String,
},
/// Trigger the ChatGPT login flow and wait for completion.
TestLogin,
/// Fetch the current account rate limits from the Codex app-server.
GetAccountRateLimits,
}
fn main() -> Result<()> {
let Cli { codex_bin, command } = Cli::parse();
match command {
CliCommand::SendMessage { user_message } => send_message(codex_bin, user_message),
CliCommand::SendMessageV2 { user_message } => send_message_v2(codex_bin, user_message),
CliCommand::TriggerCmdApproval { user_message } => {
trigger_cmd_approval(codex_bin, user_message)
}
CliCommand::TriggerPatchApproval { user_message } => {
trigger_patch_approval(codex_bin, user_message)
}
CliCommand::NoTriggerCmdApproval => no_trigger_cmd_approval(codex_bin),
CliCommand::SendFollowUpV2 {
first_message,
follow_up_message,
} => send_follow_up_v2(codex_bin, first_message, follow_up_message),
CliCommand::TestLogin => test_login(codex_bin),
CliCommand::GetAccountRateLimits => get_account_rate_limits(codex_bin),
}
}
fn send_message(codex_bin: String, user_message: String) -> Result<()> {
let mut client = CodexClient::spawn(codex_bin)?;
let initialize = client.initialize()?;
println!("< initialize response: {initialize:?}");
let conversation = client.new_conversation()?;
println!("< newConversation response: {conversation:?}");
let subscription = client.add_conversation_listener(&conversation.conversation_id)?;
println!("< addConversationListener response: {subscription:?}");
let send_response = client.send_user_message(&conversation.conversation_id, &user_message)?;
println!("< sendUserMessage response: {send_response:?}");
client.stream_conversation(&conversation.conversation_id)?;
client.remove_conversation_listener(subscription.subscription_id)?;
Ok(())
}
fn send_message_v2(codex_bin: String, user_message: String) -> Result<()> {
send_message_v2_with_policies(codex_bin, user_message, None, None)
}
fn trigger_cmd_approval(codex_bin: String, user_message: Option<String>) -> Result<()> {
let default_prompt =
"Run `touch /tmp/should-trigger-approval` so I can confirm the file exists.";
let message = user_message.unwrap_or_else(|| default_prompt.to_string());
send_message_v2_with_policies(
codex_bin,
message,
Some(AskForApproval::OnRequest),
Some(SandboxPolicy::ReadOnly),
)
}
fn trigger_patch_approval(codex_bin: String, user_message: Option<String>) -> Result<()> {
let default_prompt =
"Create a file named APPROVAL_DEMO.txt containing a short hello message using apply_patch.";
let message = user_message.unwrap_or_else(|| default_prompt.to_string());
send_message_v2_with_policies(
codex_bin,
message,
Some(AskForApproval::OnRequest),
Some(SandboxPolicy::ReadOnly),
)
}
fn no_trigger_cmd_approval(codex_bin: String) -> Result<()> {
let prompt = "Run `touch should_not_trigger_approval.txt`";
send_message_v2_with_policies(codex_bin, prompt.to_string(), None, None)
}
fn send_message_v2_with_policies(
codex_bin: String,
user_message: String,
approval_policy: Option<AskForApproval>,
sandbox_policy: Option<SandboxPolicy>,
) -> Result<()> {
let mut client = CodexClient::spawn(codex_bin)?;
let initialize = client.initialize()?;
println!("< initialize response: {initialize:?}");
let thread_response = client.thread_start(ThreadStartParams::default())?;
println!("< thread/start response: {thread_response:?}");
let mut turn_params = TurnStartParams {
thread_id: thread_response.thread.id.clone(),
input: vec![V2UserInput::Text { text: user_message }],
..Default::default()
};
turn_params.approval_policy = approval_policy;
turn_params.sandbox_policy = sandbox_policy;
let turn_response = client.turn_start(turn_params)?;
println!("< turn/start response: {turn_response:?}");
client.stream_turn(&thread_response.thread.id, &turn_response.turn.id)?;
Ok(())
}
fn send_follow_up_v2(
codex_bin: String,
first_message: String,
follow_up_message: String,
) -> Result<()> {
let mut client = CodexClient::spawn(codex_bin)?;
let initialize = client.initialize()?;
println!("< initialize response: {initialize:?}");
let thread_response = client.thread_start(ThreadStartParams::default())?;
println!("< thread/start response: {thread_response:?}");
let first_turn_params = TurnStartParams {
thread_id: thread_response.thread.id.clone(),
input: vec![V2UserInput::Text {
text: first_message,
}],
..Default::default()
};
let first_turn_response = client.turn_start(first_turn_params)?;
println!("< turn/start response (initial): {first_turn_response:?}");
client.stream_turn(&thread_response.thread.id, &first_turn_response.turn.id)?;
let follow_up_params = TurnStartParams {
thread_id: thread_response.thread.id.clone(),
input: vec![V2UserInput::Text {
text: follow_up_message,
}],
..Default::default()
};
let follow_up_response = client.turn_start(follow_up_params)?;
println!("< turn/start response (follow-up): {follow_up_response:?}");
client.stream_turn(&thread_response.thread.id, &follow_up_response.turn.id)?;
Ok(())
}
fn test_login(codex_bin: String) -> Result<()> {
let mut client = CodexClient::spawn(codex_bin)?;
let initialize = client.initialize()?;
println!("< initialize response: {initialize:?}");
let login_response = client.login_chat_gpt()?;
println!("< loginChatGpt response: {login_response:?}");
println!(
"Open the following URL in your browser to continue:\n{}",
login_response.auth_url
);
let completion = client.wait_for_login_completion(&login_response.login_id)?;
println!("< loginChatGptComplete notification: {completion:?}");
if completion.success {
println!("Login succeeded.");
Ok(())
} else {
bail!(
"login failed: {}",
completion
.error
.as_deref()
.unwrap_or("unknown error from loginChatGptComplete")
);
}
}
fn get_account_rate_limits(codex_bin: String) -> Result<()> {
let mut client = CodexClient::spawn(codex_bin)?;
let initialize = client.initialize()?;
println!("< initialize response: {initialize:?}");
let response = client.get_account_rate_limits()?;
println!("< account/rateLimits/read response: {response:?}");
Ok(())
}
struct CodexClient {
child: Child,
stdin: Option<ChildStdin>,
stdout: BufReader<ChildStdout>,
pending_notifications: VecDeque<JSONRPCNotification>,
}
impl CodexClient {
fn spawn(codex_bin: String) -> Result<Self> {
let mut codex_app_server = Command::new(&codex_bin)
.arg("app-server")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::inherit())
.spawn()
.with_context(|| format!("failed to start `{codex_bin}` app-server"))?;
let stdin = codex_app_server
.stdin
.take()
.context("codex app-server stdin unavailable")?;
let stdout = codex_app_server
.stdout
.take()
.context("codex app-server stdout unavailable")?;
Ok(Self {
child: codex_app_server,
stdin: Some(stdin),
stdout: BufReader::new(stdout),
pending_notifications: VecDeque::new(),
})
}
fn initialize(&mut self) -> Result<InitializeResponse> {
let request_id = self.request_id();
let request = ClientRequest::Initialize {
request_id: request_id.clone(),
params: InitializeParams {
client_info: ClientInfo {
name: "codex-toy-app-server".to_string(),
title: Some("Codex Toy App Server".to_string()),
version: env!("CARGO_PKG_VERSION").to_string(),
},
},
};
self.send_request(request, request_id, "initialize")
}
fn new_conversation(&mut self) -> Result<NewConversationResponse> {
let request_id = self.request_id();
let request = ClientRequest::NewConversation {
request_id: request_id.clone(),
params: NewConversationParams::default(),
};
self.send_request(request, request_id, "newConversation")
}
fn add_conversation_listener(
&mut self,
conversation_id: &ConversationId,
) -> Result<AddConversationSubscriptionResponse> {
let request_id = self.request_id();
let request = ClientRequest::AddConversationListener {
request_id: request_id.clone(),
params: AddConversationListenerParams {
conversation_id: *conversation_id,
experimental_raw_events: false,
},
};
self.send_request(request, request_id, "addConversationListener")
}
fn remove_conversation_listener(&mut self, subscription_id: Uuid) -> Result<()> {
let request_id = self.request_id();
let request = ClientRequest::RemoveConversationListener {
request_id: request_id.clone(),
params: codex_app_server_protocol::RemoveConversationListenerParams { subscription_id },
};
self.send_request::<codex_app_server_protocol::RemoveConversationSubscriptionResponse>(
request,
request_id,
"removeConversationListener",
)?;
Ok(())
}
fn send_user_message(
&mut self,
conversation_id: &ConversationId,
message: &str,
) -> Result<SendUserMessageResponse> {
let request_id = self.request_id();
let request = ClientRequest::SendUserMessage {
request_id: request_id.clone(),
params: SendUserMessageParams {
conversation_id: *conversation_id,
items: vec![InputItem::Text {
text: message.to_string(),
}],
},
};
self.send_request(request, request_id, "sendUserMessage")
}
fn thread_start(&mut self, params: ThreadStartParams) -> Result<ThreadStartResponse> {
let request_id = self.request_id();
let request = ClientRequest::ThreadStart {
request_id: request_id.clone(),
params,
};
self.send_request(request, request_id, "thread/start")
}
fn turn_start(&mut self, params: TurnStartParams) -> Result<TurnStartResponse> {
let request_id = self.request_id();
let request = ClientRequest::TurnStart {
request_id: request_id.clone(),
params,
};
self.send_request(request, request_id, "turn/start")
}
fn login_chat_gpt(&mut self) -> Result<LoginChatGptResponse> {
let request_id = self.request_id();
let request = ClientRequest::LoginChatGpt {
request_id: request_id.clone(),
params: None,
};
self.send_request(request, request_id, "loginChatGpt")
}
fn get_account_rate_limits(&mut self) -> Result<GetAccountRateLimitsResponse> {
let request_id = self.request_id();
let request = ClientRequest::GetAccountRateLimits {
request_id: request_id.clone(),
params: None,
};
self.send_request(request, request_id, "account/rateLimits/read")
}
fn stream_conversation(&mut self, conversation_id: &ConversationId) -> Result<()> {
loop {
let notification = self.next_notification()?;
if !notification.method.starts_with("codex/event/") {
continue;
}
if let Some(event) = self.extract_event(notification, conversation_id)? {
match &event.msg {
EventMsg::AgentMessage(event) => {
println!("{}", event.message);
}
EventMsg::AgentMessageDelta(event) => {
print!("{}", event.delta);
std::io::stdout().flush().ok();
}
EventMsg::TaskComplete(event) => {
println!("\n[task complete: {event:?}]");
break;
}
EventMsg::TurnAborted(event) => {
println!("\n[turn aborted: {:?}]", event.reason);
break;
}
EventMsg::Error(event) => {
println!("[error] {event:?}");
}
_ => {
println!("[UNKNOWN EVENT] {:?}", event.msg);
}
}
}
}
Ok(())
}
fn wait_for_login_completion(
&mut self,
expected_login_id: &Uuid,
) -> Result<LoginChatGptCompleteNotification> {
loop {
let notification = self.next_notification()?;
if let Ok(server_notification) = ServerNotification::try_from(notification) {
match server_notification {
ServerNotification::LoginChatGptComplete(completion) => {
if &completion.login_id == expected_login_id {
return Ok(completion);
}
println!(
"[ignoring loginChatGptComplete for unexpected login_id: {}]",
completion.login_id
);
}
ServerNotification::AuthStatusChange(status) => {
println!("< authStatusChange notification: {status:?}");
}
ServerNotification::AccountRateLimitsUpdated(snapshot) => {
println!("< accountRateLimitsUpdated notification: {snapshot:?}");
}
ServerNotification::SessionConfigured(_) => {
// SessionConfigured notifications are unrelated to login; skip.
}
_ => {}
}
}
// Not a server notification (likely a conversation event); keep waiting.
}
}
fn stream_turn(&mut self, thread_id: &str, turn_id: &str) -> Result<()> {
loop {
let notification = self.next_notification()?;
let Ok(server_notification) = ServerNotification::try_from(notification) else {
continue;
};
match server_notification {
ServerNotification::ThreadStarted(payload) => {
if payload.thread.id == thread_id {
println!("< thread/started notification: {:?}", payload.thread);
}
}
ServerNotification::TurnStarted(payload) => {
if payload.turn.id == turn_id {
println!("< turn/started notification: {:?}", payload.turn.status);
}
}
ServerNotification::AgentMessageDelta(delta) => {
print!("{}", delta.delta);
std::io::stdout().flush().ok();
}
ServerNotification::CommandExecutionOutputDelta(delta) => {
print!("{}", delta.delta);
std::io::stdout().flush().ok();
}
ServerNotification::TerminalInteraction(delta) => {
println!("[stdin sent: {}]", delta.stdin);
std::io::stdout().flush().ok();
}
ServerNotification::ItemStarted(payload) => {
println!("\n< item started: {:?}", payload.item);
}
ServerNotification::ItemCompleted(payload) => {
println!("< item completed: {:?}", payload.item);
}
ServerNotification::TurnCompleted(payload) => {
if payload.turn.id == turn_id {
println!("\n< turn/completed notification: {:?}", payload.turn.status);
if payload.turn.status == TurnStatus::Failed
&& let Some(error) = payload.turn.error
{
println!("[turn error] {}", error.message);
}
break;
}
}
ServerNotification::McpToolCallProgress(payload) => {
println!("< MCP tool progress: {}", payload.message);
}
_ => {
println!("[UNKNOWN SERVER NOTIFICATION] {server_notification:?}");
}
}
}
Ok(())
}
fn extract_event(
&self,
notification: JSONRPCNotification,
conversation_id: &ConversationId,
) -> Result<Option<Event>> {
let params = notification
.params
.context("event notification missing params")?;
let mut map = match params {
Value::Object(map) => map,
other => bail!("unexpected params shape: {other:?}"),
};
let conversation_value = map
.remove("conversationId")
.context("event missing conversationId")?;
let notification_conversation: ConversationId = serde_json::from_value(conversation_value)
.context("conversationId was not a valid UUID")?;
if ¬ification_conversation != conversation_id {
return Ok(None);
}
let event_value = Value::Object(map);
let event: Event =
serde_json::from_value(event_value).context("failed to decode event payload")?;
Ok(Some(event))
}
fn send_request<T>(
&mut self,
request: ClientRequest,
request_id: RequestId,
method: &str,
) -> Result<T>
where
T: DeserializeOwned,
{
self.write_request(&request)?;
self.wait_for_response(request_id, method)
}
fn write_request(&mut self, request: &ClientRequest) -> Result<()> {
let request_json = serde_json::to_string(request)?;
let request_pretty = serde_json::to_string_pretty(request)?;
print_multiline_with_prefix("> ", &request_pretty);
if let Some(stdin) = self.stdin.as_mut() {
writeln!(stdin, "{request_json}")?;
stdin
.flush()
.context("failed to flush request to codex app-server")?;
} else {
bail!("codex app-server stdin closed");
}
Ok(())
}
fn wait_for_response<T>(&mut self, request_id: RequestId, method: &str) -> Result<T>
where
T: DeserializeOwned,
{
loop {
let message = self.read_jsonrpc_message()?;
match message {
JSONRPCMessage::Response(JSONRPCResponse { id, result }) => {
if id == request_id {
return serde_json::from_value(result)
.with_context(|| format!("{method} response missing payload"));
}
}
JSONRPCMessage::Error(err) => {
if err.id == request_id {
bail!("{method} failed: {err:?}");
}
}
JSONRPCMessage::Notification(notification) => {
self.pending_notifications.push_back(notification);
}
JSONRPCMessage::Request(request) => {
self.handle_server_request(request)?;
}
}
}
}
fn next_notification(&mut self) -> Result<JSONRPCNotification> {
if let Some(notification) = self.pending_notifications.pop_front() {
return Ok(notification);
}
loop {
let message = self.read_jsonrpc_message()?;
match message {
JSONRPCMessage::Notification(notification) => return Ok(notification),
JSONRPCMessage::Response(_) | JSONRPCMessage::Error(_) => {
// No outstanding requests, so ignore stray responses/errors for now.
continue;
}
JSONRPCMessage::Request(request) => {
self.handle_server_request(request)?;
}
}
}
}
fn read_jsonrpc_message(&mut self) -> Result<JSONRPCMessage> {
loop {
let mut response_line = String::new();
let bytes = self
.stdout
.read_line(&mut response_line)
.context("failed to read from codex app-server")?;
if bytes == 0 {
bail!("codex app-server closed stdout");
}
let trimmed = response_line.trim();
if trimmed.is_empty() {
continue;
}
let parsed: Value =
serde_json::from_str(trimmed).context("response was not valid JSON-RPC")?;
let pretty = serde_json::to_string_pretty(&parsed)?;
print_multiline_with_prefix("< ", &pretty);
let message: JSONRPCMessage = serde_json::from_value(parsed)
.context("response was not a valid JSON-RPC message")?;
return Ok(message);
}
}
fn request_id(&self) -> RequestId {
RequestId::String(Uuid::new_v4().to_string())
}
fn handle_server_request(&mut self, request: JSONRPCRequest) -> Result<()> {
let server_request = ServerRequest::try_from(request)
.context("failed to deserialize ServerRequest from JSONRPCRequest")?;
match server_request {
ServerRequest::CommandExecutionRequestApproval { request_id, params } => {
self.handle_command_execution_request_approval(request_id, params)?;
}
ServerRequest::FileChangeRequestApproval { request_id, params } => {
self.approve_file_change_request(request_id, params)?;
}
other => {
bail!("received unsupported server request: {other:?}");
}
}
Ok(())
}
fn handle_command_execution_request_approval(
&mut self,
request_id: RequestId,
params: CommandExecutionRequestApprovalParams,
) -> Result<()> {
let CommandExecutionRequestApprovalParams {
thread_id,
turn_id,
item_id,
reason,
proposed_execpolicy_amendment,
} = params;
println!(
"\n< commandExecution approval requested for thread {thread_id}, turn {turn_id}, item {item_id}"
);
if let Some(reason) = reason.as_deref() {
println!("< reason: {reason}");
}
if let Some(execpolicy_amendment) = proposed_execpolicy_amendment.as_ref() {
println!("< proposed execpolicy amendment: {execpolicy_amendment:?}");
}
let response = CommandExecutionRequestApprovalResponse {
decision: ApprovalDecision::Accept,
};
self.send_server_request_response(request_id, &response)?;
println!("< approved commandExecution request for item {item_id}");
Ok(())
}
fn approve_file_change_request(
&mut self,
request_id: RequestId,
params: FileChangeRequestApprovalParams,
) -> Result<()> {
let FileChangeRequestApprovalParams {
thread_id,
turn_id,
item_id,
reason,
grant_root,
} = params;
println!(
"\n< fileChange approval requested for thread {thread_id}, turn {turn_id}, item {item_id}"
);
if let Some(reason) = reason.as_deref() {
println!("< reason: {reason}");
}
if let Some(grant_root) = grant_root.as_deref() {
println!("< grant root: {}", grant_root.display());
}
let response = FileChangeRequestApprovalResponse {
decision: ApprovalDecision::Accept,
};
self.send_server_request_response(request_id, &response)?;
println!("< approved fileChange request for item {item_id}");
Ok(())
}
fn send_server_request_response<T>(&mut self, request_id: RequestId, response: &T) -> Result<()>
where
T: Serialize,
{
let message = JSONRPCMessage::Response(JSONRPCResponse {
id: request_id,
result: serde_json::to_value(response)?,
});
self.write_jsonrpc_message(message)
}
fn write_jsonrpc_message(&mut self, message: JSONRPCMessage) -> Result<()> {
let payload = serde_json::to_string(&message)?;
let pretty = serde_json::to_string_pretty(&message)?;
print_multiline_with_prefix("> ", &pretty);
if let Some(stdin) = self.stdin.as_mut() {
writeln!(stdin, "{payload}")?;
stdin
.flush()
.context("failed to flush response to codex app-server")?;
return Ok(());
}
bail!("codex app-server stdin closed")
}
}
fn print_multiline_with_prefix(prefix: &str, payload: &str) {
for line in payload.lines() {
println!("{prefix}{line}");
}
}
impl Drop for CodexClient {
fn drop(&mut self) {
let _ = self.stdin.take();
if let Ok(Some(status)) = self.child.try_wait() {
println!("[codex app-server exited: {status}]");
return;
}
thread::sleep(Duration::from_millis(100));
if let Ok(Some(status)) = self.child.try_wait() {
println!("[codex app-server exited: {status}]");
return;
}
let _ = self.child.kill();
let _ = self.child.wait();
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/rmcp-client/src/lib.rs | codex-rs/rmcp-client/src/lib.rs | mod auth_status;
mod find_codex_home;
mod logging_client_handler;
mod oauth;
mod perform_oauth_login;
mod program_resolver;
mod rmcp_client;
mod utils;
pub use auth_status::determine_streamable_http_auth_status;
pub use auth_status::supports_oauth_login;
pub use codex_protocol::protocol::McpAuthStatus;
pub use oauth::OAuthCredentialsStoreMode;
pub use oauth::StoredOAuthTokens;
pub use oauth::WrappedOAuthTokenResponse;
pub use oauth::delete_oauth_tokens;
pub(crate) use oauth::load_oauth_tokens;
pub use oauth::save_oauth_tokens;
pub use perform_oauth_login::OauthLoginHandle;
pub use perform_oauth_login::perform_oauth_login;
pub use perform_oauth_login::perform_oauth_login_return_url;
pub use rmcp::model::ElicitationAction;
pub use rmcp_client::Elicitation;
pub use rmcp_client::ElicitationResponse;
pub use rmcp_client::RmcpClient;
pub use rmcp_client::SendElicitation;
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/rmcp-client/src/perform_oauth_login.rs | codex-rs/rmcp-client/src/perform_oauth_login.rs | use std::collections::HashMap;
use std::string::String;
use std::sync::Arc;
use std::time::Duration;
use anyhow::Context;
use anyhow::Result;
use anyhow::anyhow;
use reqwest::ClientBuilder;
use rmcp::transport::auth::OAuthState;
use tiny_http::Response;
use tiny_http::Server;
use tokio::sync::oneshot;
use tokio::time::timeout;
use urlencoding::decode;
use crate::OAuthCredentialsStoreMode;
use crate::StoredOAuthTokens;
use crate::WrappedOAuthTokenResponse;
use crate::oauth::compute_expires_at_millis;
use crate::save_oauth_tokens;
use crate::utils::apply_default_headers;
use crate::utils::build_default_headers;
struct OauthHeaders {
http_headers: Option<HashMap<String, String>>,
env_http_headers: Option<HashMap<String, String>>,
}
struct CallbackServerGuard {
server: Arc<Server>,
}
impl Drop for CallbackServerGuard {
fn drop(&mut self) {
self.server.unblock();
}
}
pub async fn perform_oauth_login(
server_name: &str,
server_url: &str,
store_mode: OAuthCredentialsStoreMode,
http_headers: Option<HashMap<String, String>>,
env_http_headers: Option<HashMap<String, String>>,
scopes: &[String],
) -> Result<()> {
let headers = OauthHeaders {
http_headers,
env_http_headers,
};
OauthLoginFlow::new(
server_name,
server_url,
store_mode,
headers,
scopes,
true,
None,
)
.await?
.finish()
.await
}
pub async fn perform_oauth_login_return_url(
server_name: &str,
server_url: &str,
store_mode: OAuthCredentialsStoreMode,
http_headers: Option<HashMap<String, String>>,
env_http_headers: Option<HashMap<String, String>>,
scopes: &[String],
timeout_secs: Option<i64>,
) -> Result<OauthLoginHandle> {
let headers = OauthHeaders {
http_headers,
env_http_headers,
};
let flow = OauthLoginFlow::new(
server_name,
server_url,
store_mode,
headers,
scopes,
false,
timeout_secs,
)
.await?;
let authorization_url = flow.authorization_url();
let completion = flow.spawn();
Ok(OauthLoginHandle::new(authorization_url, completion))
}
fn spawn_callback_server(server: Arc<Server>, tx: oneshot::Sender<(String, String)>) {
tokio::task::spawn_blocking(move || {
while let Ok(request) = server.recv() {
let path = request.url().to_string();
if let Some(OauthCallbackResult { code, state }) = parse_oauth_callback(&path) {
let response =
Response::from_string("Authentication complete. You may close this window.");
if let Err(err) = request.respond(response) {
eprintln!("Failed to respond to OAuth callback: {err}");
}
if let Err(err) = tx.send((code, state)) {
eprintln!("Failed to send OAuth callback: {err:?}");
}
break;
} else {
let response =
Response::from_string("Invalid OAuth callback").with_status_code(400);
if let Err(err) = request.respond(response) {
eprintln!("Failed to respond to OAuth callback: {err}");
}
}
}
});
}
struct OauthCallbackResult {
code: String,
state: String,
}
fn parse_oauth_callback(path: &str) -> Option<OauthCallbackResult> {
let (route, query) = path.split_once('?')?;
if route != "/callback" {
return None;
}
let mut code = None;
let mut state = None;
for pair in query.split('&') {
let (key, value) = pair.split_once('=')?;
let decoded = decode(value).ok()?.into_owned();
match key {
"code" => code = Some(decoded),
"state" => state = Some(decoded),
_ => {}
}
}
Some(OauthCallbackResult {
code: code?,
state: state?,
})
}
pub struct OauthLoginHandle {
authorization_url: String,
completion: oneshot::Receiver<Result<()>>,
}
impl OauthLoginHandle {
fn new(authorization_url: String, completion: oneshot::Receiver<Result<()>>) -> Self {
Self {
authorization_url,
completion,
}
}
pub fn authorization_url(&self) -> &str {
&self.authorization_url
}
pub fn into_parts(self) -> (String, oneshot::Receiver<Result<()>>) {
(self.authorization_url, self.completion)
}
pub async fn wait(self) -> Result<()> {
self.completion
.await
.map_err(|err| anyhow!("OAuth login task was cancelled: {err}"))?
}
}
struct OauthLoginFlow {
auth_url: String,
oauth_state: OAuthState,
rx: oneshot::Receiver<(String, String)>,
guard: CallbackServerGuard,
server_name: String,
server_url: String,
store_mode: OAuthCredentialsStoreMode,
launch_browser: bool,
timeout: Duration,
}
impl OauthLoginFlow {
async fn new(
server_name: &str,
server_url: &str,
store_mode: OAuthCredentialsStoreMode,
headers: OauthHeaders,
scopes: &[String],
launch_browser: bool,
timeout_secs: Option<i64>,
) -> Result<Self> {
const DEFAULT_OAUTH_TIMEOUT_SECS: i64 = 300;
let server = Arc::new(Server::http("127.0.0.1:0").map_err(|err| anyhow!(err))?);
let guard = CallbackServerGuard {
server: Arc::clone(&server),
};
let redirect_uri = match server.server_addr() {
tiny_http::ListenAddr::IP(std::net::SocketAddr::V4(addr)) => {
let ip = addr.ip();
let port = addr.port();
format!("http://{ip}:{port}/callback")
}
tiny_http::ListenAddr::IP(std::net::SocketAddr::V6(addr)) => {
let ip = addr.ip();
let port = addr.port();
format!("http://[{ip}]:{port}/callback")
}
#[cfg(not(target_os = "windows"))]
_ => return Err(anyhow!("unable to determine callback address")),
};
let (tx, rx) = oneshot::channel();
spawn_callback_server(server, tx);
let OauthHeaders {
http_headers,
env_http_headers,
} = headers;
let default_headers = build_default_headers(http_headers, env_http_headers)?;
let http_client = apply_default_headers(ClientBuilder::new(), &default_headers).build()?;
let mut oauth_state = OAuthState::new(server_url, Some(http_client)).await?;
let scope_refs: Vec<&str> = scopes.iter().map(String::as_str).collect();
oauth_state
.start_authorization(&scope_refs, &redirect_uri, Some("Codex"))
.await?;
let auth_url = oauth_state.get_authorization_url().await?;
let timeout_secs = timeout_secs.unwrap_or(DEFAULT_OAUTH_TIMEOUT_SECS).max(1);
let timeout = Duration::from_secs(timeout_secs as u64);
Ok(Self {
auth_url,
oauth_state,
rx,
guard,
server_name: server_name.to_string(),
server_url: server_url.to_string(),
store_mode,
launch_browser,
timeout,
})
}
fn authorization_url(&self) -> String {
self.auth_url.clone()
}
async fn finish(mut self) -> Result<()> {
if self.launch_browser {
let server_name = &self.server_name;
let auth_url = &self.auth_url;
println!(
"Authorize `{server_name}` by opening this URL in your browser:\n{auth_url}\n"
);
if webbrowser::open(auth_url).is_err() {
println!("(Browser launch failed; please copy the URL above manually.)");
}
}
let result = async {
let (code, csrf_state) = timeout(self.timeout, &mut self.rx)
.await
.context("timed out waiting for OAuth callback")?
.context("OAuth callback was cancelled")?;
self.oauth_state
.handle_callback(&code, &csrf_state)
.await
.context("failed to handle OAuth callback")?;
let (client_id, credentials_opt) = self
.oauth_state
.get_credentials()
.await
.context("failed to retrieve OAuth credentials")?;
let credentials = credentials_opt
.ok_or_else(|| anyhow!("OAuth provider did not return credentials"))?;
let expires_at = compute_expires_at_millis(&credentials);
let stored = StoredOAuthTokens {
server_name: self.server_name.clone(),
url: self.server_url.clone(),
client_id,
token_response: WrappedOAuthTokenResponse(credentials),
expires_at,
};
save_oauth_tokens(&self.server_name, &stored, self.store_mode)?;
Ok(())
}
.await;
drop(self.guard);
result
}
fn spawn(self) -> oneshot::Receiver<Result<()>> {
let server_name_for_logging = self.server_name.clone();
let (tx, rx) = oneshot::channel();
tokio::spawn(async move {
let result = self.finish().await;
if let Err(err) = &result {
eprintln!(
"Failed to complete OAuth login for '{server_name_for_logging}': {err:#}"
);
}
let _ = tx.send(result);
});
rx
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/rmcp-client/src/auth_status.rs | codex-rs/rmcp-client/src/auth_status.rs | use std::collections::HashMap;
use std::time::Duration;
use anyhow::Error;
use anyhow::Result;
use codex_protocol::protocol::McpAuthStatus;
use reqwest::Client;
use reqwest::StatusCode;
use reqwest::Url;
use reqwest::header::HeaderMap;
use serde::Deserialize;
use tracing::debug;
use crate::OAuthCredentialsStoreMode;
use crate::oauth::has_oauth_tokens;
use crate::utils::apply_default_headers;
use crate::utils::build_default_headers;
const DISCOVERY_TIMEOUT: Duration = Duration::from_secs(5);
const OAUTH_DISCOVERY_HEADER: &str = "MCP-Protocol-Version";
const OAUTH_DISCOVERY_VERSION: &str = "2024-11-05";
/// Determine the authentication status for a streamable HTTP MCP server.
pub async fn determine_streamable_http_auth_status(
server_name: &str,
url: &str,
bearer_token_env_var: Option<&str>,
http_headers: Option<HashMap<String, String>>,
env_http_headers: Option<HashMap<String, String>>,
store_mode: OAuthCredentialsStoreMode,
) -> Result<McpAuthStatus> {
if bearer_token_env_var.is_some() {
return Ok(McpAuthStatus::BearerToken);
}
if has_oauth_tokens(server_name, url, store_mode)? {
return Ok(McpAuthStatus::OAuth);
}
let default_headers = build_default_headers(http_headers, env_http_headers)?;
match supports_oauth_login_with_headers(url, &default_headers).await {
Ok(true) => Ok(McpAuthStatus::NotLoggedIn),
Ok(false) => Ok(McpAuthStatus::Unsupported),
Err(error) => {
debug!(
"failed to detect OAuth support for MCP server `{server_name}` at {url}: {error:?}"
);
Ok(McpAuthStatus::Unsupported)
}
}
}
/// Attempt to determine whether a streamable HTTP MCP server advertises OAuth login.
pub async fn supports_oauth_login(url: &str) -> Result<bool> {
supports_oauth_login_with_headers(url, &HeaderMap::new()).await
}
async fn supports_oauth_login_with_headers(url: &str, default_headers: &HeaderMap) -> Result<bool> {
let base_url = Url::parse(url)?;
let builder = Client::builder().timeout(DISCOVERY_TIMEOUT);
let client = apply_default_headers(builder, default_headers).build()?;
let mut last_error: Option<Error> = None;
for candidate_path in discovery_paths(base_url.path()) {
let mut discovery_url = base_url.clone();
discovery_url.set_path(&candidate_path);
let response = match client
.get(discovery_url.clone())
.header(OAUTH_DISCOVERY_HEADER, OAUTH_DISCOVERY_VERSION)
.send()
.await
{
Ok(response) => response,
Err(err) => {
last_error = Some(err.into());
continue;
}
};
if response.status() != StatusCode::OK {
continue;
}
let metadata = match response.json::<OAuthDiscoveryMetadata>().await {
Ok(metadata) => metadata,
Err(err) => {
last_error = Some(err.into());
continue;
}
};
if metadata.authorization_endpoint.is_some() && metadata.token_endpoint.is_some() {
return Ok(true);
}
}
if let Some(err) = last_error {
debug!("OAuth discovery requests failed for {url}: {err:?}");
}
Ok(false)
}
#[derive(Debug, Deserialize)]
struct OAuthDiscoveryMetadata {
#[serde(default)]
authorization_endpoint: Option<String>,
#[serde(default)]
token_endpoint: Option<String>,
}
/// Implements RFC 8414 section 3.1 for discovering well-known oauth endpoints.
/// This is a requirement for MCP servers to support OAuth.
/// https://datatracker.ietf.org/doc/html/rfc8414#section-3.1
/// https://github.com/modelcontextprotocol/rust-sdk/blob/main/crates/rmcp/src/transport/auth.rs#L182
fn discovery_paths(base_path: &str) -> Vec<String> {
let trimmed = base_path.trim_start_matches('/').trim_end_matches('/');
let canonical = "/.well-known/oauth-authorization-server".to_string();
if trimmed.is_empty() {
return vec![canonical];
}
let mut candidates = Vec::new();
let mut push_unique = |candidate: String| {
if !candidates.contains(&candidate) {
candidates.push(candidate);
}
};
push_unique(format!("{canonical}/{trimmed}"));
push_unique(format!("/{trimmed}/.well-known/oauth-authorization-server"));
push_unique(canonical);
candidates
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/rmcp-client/src/oauth.rs | codex-rs/rmcp-client/src/oauth.rs | //! This file handles all logic related to managing MCP OAuth credentials.
//! All credentials are stored using the keyring crate which uses os-specific keyring services.
//! https://crates.io/crates/keyring
//! macOS: macOS keychain.
//! Windows: Windows Credential Manager
//! Linux: DBus-based Secret Service, the kernel keyutils, and a combo of the two
//! FreeBSD, OpenBSD: DBus-based Secret Service
//!
//! For Linux, we use linux-native-async-persistent which uses both keyutils and async-secret-service (see below) for storage.
//! See the docs for the keyutils_persistent module for a full explanation of why both are used. Because this store uses the
//! async-secret-service, you must specify the additional features required by that store
//!
//! async-secret-service provides access to the DBus-based Secret Service storage on Linux, FreeBSD, and OpenBSD. This is an asynchronous
//! keystore that always encrypts secrets when they are transferred across the bus. If DBus isn't installed the keystore will fall back to the json
//! file because we don't use the "vendored" feature.
//!
//! If the keyring is not available or fails, we fall back to CODEX_HOME/.credentials.json which is consistent with other coding CLI agents.
use anyhow::Context;
use anyhow::Error;
use anyhow::Result;
use oauth2::AccessToken;
use oauth2::EmptyExtraTokenFields;
use oauth2::RefreshToken;
use oauth2::Scope;
use oauth2::TokenResponse;
use oauth2::basic::BasicTokenType;
use rmcp::transport::auth::OAuthTokenResponse;
use serde::Deserialize;
use serde::Serialize;
use serde_json::Value;
use serde_json::map::Map as JsonMap;
use sha2::Digest;
use sha2::Sha256;
use std::collections::BTreeMap;
use std::fs;
use std::io::ErrorKind;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
use tracing::warn;
use codex_keyring_store::DefaultKeyringStore;
use codex_keyring_store::KeyringStore;
use rmcp::transport::auth::AuthorizationManager;
use tokio::sync::Mutex;
use crate::find_codex_home::find_codex_home;
const KEYRING_SERVICE: &str = "Codex MCP Credentials";
const REFRESH_SKEW_MILLIS: u64 = 30_000;
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct StoredOAuthTokens {
pub server_name: String,
pub url: String,
pub client_id: String,
pub token_response: WrappedOAuthTokenResponse,
#[serde(default)]
pub expires_at: Option<u64>,
}
/// Determine where Codex should store and read MCP credentials.
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum OAuthCredentialsStoreMode {
/// `Keyring` when available; otherwise, `File`.
/// Credentials stored in the keyring will only be readable by Codex unless the user explicitly grants access via OS-level keyring access.
#[default]
Auto,
/// CODEX_HOME/.credentials.json
/// This file will be readable to Codex and other applications running as the same user.
File,
/// Keyring when available, otherwise fail.
Keyring,
}
/// Wrap OAuthTokenResponse to allow for partial equality comparison.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WrappedOAuthTokenResponse(pub OAuthTokenResponse);
impl PartialEq for WrappedOAuthTokenResponse {
fn eq(&self, other: &Self) -> bool {
match (serde_json::to_string(self), serde_json::to_string(other)) {
(Ok(s1), Ok(s2)) => s1 == s2,
_ => false,
}
}
}
pub(crate) fn load_oauth_tokens(
server_name: &str,
url: &str,
store_mode: OAuthCredentialsStoreMode,
) -> Result<Option<StoredOAuthTokens>> {
let keyring_store = DefaultKeyringStore;
match store_mode {
OAuthCredentialsStoreMode::Auto => {
load_oauth_tokens_from_keyring_with_fallback_to_file(&keyring_store, server_name, url)
}
OAuthCredentialsStoreMode::File => load_oauth_tokens_from_file(server_name, url),
OAuthCredentialsStoreMode::Keyring => {
load_oauth_tokens_from_keyring(&keyring_store, server_name, url)
.with_context(|| "failed to read OAuth tokens from keyring".to_string())
}
}
}
pub(crate) fn has_oauth_tokens(
server_name: &str,
url: &str,
store_mode: OAuthCredentialsStoreMode,
) -> Result<bool> {
Ok(load_oauth_tokens(server_name, url, store_mode)?.is_some())
}
fn refresh_expires_in_from_timestamp(tokens: &mut StoredOAuthTokens) {
let Some(expires_at) = tokens.expires_at else {
return;
};
match expires_in_from_timestamp(expires_at) {
Some(seconds) => {
let duration = Duration::from_secs(seconds);
tokens.token_response.0.set_expires_in(Some(&duration));
}
None => {
tokens.token_response.0.set_expires_in(None);
}
}
}
fn load_oauth_tokens_from_keyring_with_fallback_to_file<K: KeyringStore>(
keyring_store: &K,
server_name: &str,
url: &str,
) -> Result<Option<StoredOAuthTokens>> {
match load_oauth_tokens_from_keyring(keyring_store, server_name, url) {
Ok(Some(tokens)) => Ok(Some(tokens)),
Ok(None) => load_oauth_tokens_from_file(server_name, url),
Err(error) => {
warn!("failed to read OAuth tokens from keyring: {error}");
load_oauth_tokens_from_file(server_name, url)
.with_context(|| format!("failed to read OAuth tokens from keyring: {error}"))
}
}
}
fn load_oauth_tokens_from_keyring<K: KeyringStore>(
keyring_store: &K,
server_name: &str,
url: &str,
) -> Result<Option<StoredOAuthTokens>> {
let key = compute_store_key(server_name, url)?;
match keyring_store.load(KEYRING_SERVICE, &key) {
Ok(Some(serialized)) => {
let mut tokens: StoredOAuthTokens = serde_json::from_str(&serialized)
.context("failed to deserialize OAuth tokens from keyring")?;
refresh_expires_in_from_timestamp(&mut tokens);
Ok(Some(tokens))
}
Ok(None) => Ok(None),
Err(error) => Err(Error::new(error.into_error())),
}
}
pub fn save_oauth_tokens(
server_name: &str,
tokens: &StoredOAuthTokens,
store_mode: OAuthCredentialsStoreMode,
) -> Result<()> {
let keyring_store = DefaultKeyringStore;
match store_mode {
OAuthCredentialsStoreMode::Auto => save_oauth_tokens_with_keyring_with_fallback_to_file(
&keyring_store,
server_name,
tokens,
),
OAuthCredentialsStoreMode::File => save_oauth_tokens_to_file(tokens),
OAuthCredentialsStoreMode::Keyring => {
save_oauth_tokens_with_keyring(&keyring_store, server_name, tokens)
}
}
}
fn save_oauth_tokens_with_keyring<K: KeyringStore>(
keyring_store: &K,
server_name: &str,
tokens: &StoredOAuthTokens,
) -> Result<()> {
let serialized = serde_json::to_string(tokens).context("failed to serialize OAuth tokens")?;
let key = compute_store_key(server_name, &tokens.url)?;
match keyring_store.save(KEYRING_SERVICE, &key, &serialized) {
Ok(()) => {
if let Err(error) = delete_oauth_tokens_from_file(&key) {
warn!("failed to remove OAuth tokens from fallback storage: {error:?}");
}
Ok(())
}
Err(error) => {
let message = format!(
"failed to write OAuth tokens to keyring: {}",
error.message()
);
warn!("{message}");
Err(Error::new(error.into_error()).context(message))
}
}
}
fn save_oauth_tokens_with_keyring_with_fallback_to_file<K: KeyringStore>(
keyring_store: &K,
server_name: &str,
tokens: &StoredOAuthTokens,
) -> Result<()> {
match save_oauth_tokens_with_keyring(keyring_store, server_name, tokens) {
Ok(()) => Ok(()),
Err(error) => {
let message = error.to_string();
warn!("falling back to file storage for OAuth tokens: {message}");
save_oauth_tokens_to_file(tokens)
.with_context(|| format!("failed to write OAuth tokens to keyring: {message}"))
}
}
}
pub fn delete_oauth_tokens(
server_name: &str,
url: &str,
store_mode: OAuthCredentialsStoreMode,
) -> Result<bool> {
let keyring_store = DefaultKeyringStore;
delete_oauth_tokens_from_keyring_and_file(&keyring_store, store_mode, server_name, url)
}
fn delete_oauth_tokens_from_keyring_and_file<K: KeyringStore>(
keyring_store: &K,
store_mode: OAuthCredentialsStoreMode,
server_name: &str,
url: &str,
) -> Result<bool> {
let key = compute_store_key(server_name, url)?;
let keyring_result = keyring_store.delete(KEYRING_SERVICE, &key);
let keyring_removed = match keyring_result {
Ok(removed) => removed,
Err(error) => {
let message = error.message();
warn!("failed to delete OAuth tokens from keyring: {message}");
match store_mode {
OAuthCredentialsStoreMode::Auto | OAuthCredentialsStoreMode::Keyring => {
return Err(error.into_error())
.context("failed to delete OAuth tokens from keyring");
}
OAuthCredentialsStoreMode::File => false,
}
}
};
let file_removed = delete_oauth_tokens_from_file(&key)?;
Ok(keyring_removed || file_removed)
}
#[derive(Clone)]
pub(crate) struct OAuthPersistor {
inner: Arc<OAuthPersistorInner>,
}
struct OAuthPersistorInner {
server_name: String,
url: String,
authorization_manager: Arc<Mutex<AuthorizationManager>>,
store_mode: OAuthCredentialsStoreMode,
last_credentials: Mutex<Option<StoredOAuthTokens>>,
}
impl OAuthPersistor {
pub(crate) fn new(
server_name: String,
url: String,
authorization_manager: Arc<Mutex<AuthorizationManager>>,
store_mode: OAuthCredentialsStoreMode,
initial_credentials: Option<StoredOAuthTokens>,
) -> Self {
Self {
inner: Arc::new(OAuthPersistorInner {
server_name,
url,
authorization_manager,
store_mode,
last_credentials: Mutex::new(initial_credentials),
}),
}
}
/// Persists the latest stored credentials if they have changed.
/// Deletes the credentials if they are no longer present.
pub(crate) async fn persist_if_needed(&self) -> Result<()> {
let (client_id, maybe_credentials) = {
let manager = self.inner.authorization_manager.clone();
let guard = manager.lock().await;
guard.get_credentials().await
}?;
match maybe_credentials {
Some(credentials) => {
let mut last_credentials = self.inner.last_credentials.lock().await;
let new_token_response = WrappedOAuthTokenResponse(credentials.clone());
let same_token = last_credentials
.as_ref()
.map(|prev| prev.token_response == new_token_response)
.unwrap_or(false);
let expires_at = if same_token {
last_credentials.as_ref().and_then(|prev| prev.expires_at)
} else {
compute_expires_at_millis(&credentials)
};
let stored = StoredOAuthTokens {
server_name: self.inner.server_name.clone(),
url: self.inner.url.clone(),
client_id,
token_response: new_token_response,
expires_at,
};
if last_credentials.as_ref() != Some(&stored) {
save_oauth_tokens(&self.inner.server_name, &stored, self.inner.store_mode)?;
*last_credentials = Some(stored);
}
}
None => {
let mut last_serialized = self.inner.last_credentials.lock().await;
if last_serialized.take().is_some()
&& let Err(error) = delete_oauth_tokens(
&self.inner.server_name,
&self.inner.url,
self.inner.store_mode,
)
{
warn!(
"failed to remove OAuth tokens for server {}: {error}",
self.inner.server_name
);
}
}
}
Ok(())
}
pub(crate) async fn refresh_if_needed(&self) -> Result<()> {
let expires_at = {
let guard = self.inner.last_credentials.lock().await;
guard.as_ref().and_then(|tokens| tokens.expires_at)
};
if !token_needs_refresh(expires_at) {
return Ok(());
}
{
let manager = self.inner.authorization_manager.clone();
let guard = manager.lock().await;
guard.refresh_token().await.with_context(|| {
format!(
"failed to refresh OAuth tokens for server {}",
self.inner.server_name
)
})?;
}
self.persist_if_needed().await
}
}
const FALLBACK_FILENAME: &str = ".credentials.json";
const MCP_SERVER_TYPE: &str = "http";
type FallbackFile = BTreeMap<String, FallbackTokenEntry>;
#[derive(Debug, Clone, Serialize, Deserialize)]
struct FallbackTokenEntry {
server_name: String,
server_url: String,
client_id: String,
access_token: String,
#[serde(default)]
expires_at: Option<u64>,
#[serde(default)]
refresh_token: Option<String>,
#[serde(default)]
scopes: Vec<String>,
}
fn load_oauth_tokens_from_file(server_name: &str, url: &str) -> Result<Option<StoredOAuthTokens>> {
let Some(store) = read_fallback_file()? else {
return Ok(None);
};
let key = compute_store_key(server_name, url)?;
for entry in store.values() {
let entry_key = compute_store_key(&entry.server_name, &entry.server_url)?;
if entry_key != key {
continue;
}
let mut token_response = OAuthTokenResponse::new(
AccessToken::new(entry.access_token.clone()),
BasicTokenType::Bearer,
EmptyExtraTokenFields {},
);
if let Some(refresh) = entry.refresh_token.clone() {
token_response.set_refresh_token(Some(RefreshToken::new(refresh)));
}
let scopes = entry.scopes.clone();
if !scopes.is_empty() {
token_response.set_scopes(Some(scopes.into_iter().map(Scope::new).collect()));
}
let mut stored = StoredOAuthTokens {
server_name: entry.server_name.clone(),
url: entry.server_url.clone(),
client_id: entry.client_id.clone(),
token_response: WrappedOAuthTokenResponse(token_response),
expires_at: entry.expires_at,
};
refresh_expires_in_from_timestamp(&mut stored);
return Ok(Some(stored));
}
Ok(None)
}
fn save_oauth_tokens_to_file(tokens: &StoredOAuthTokens) -> Result<()> {
let key = compute_store_key(&tokens.server_name, &tokens.url)?;
let mut store = read_fallback_file()?.unwrap_or_default();
let token_response = &tokens.token_response.0;
let expires_at = tokens
.expires_at
.or_else(|| compute_expires_at_millis(token_response));
let refresh_token = token_response
.refresh_token()
.map(|token| token.secret().to_string());
let scopes = token_response
.scopes()
.map(|s| s.iter().map(|s| s.to_string()).collect())
.unwrap_or_default();
let entry = FallbackTokenEntry {
server_name: tokens.server_name.clone(),
server_url: tokens.url.clone(),
client_id: tokens.client_id.clone(),
access_token: token_response.access_token().secret().to_string(),
expires_at,
refresh_token,
scopes,
};
store.insert(key, entry);
write_fallback_file(&store)
}
fn delete_oauth_tokens_from_file(key: &str) -> Result<bool> {
let mut store = match read_fallback_file()? {
Some(store) => store,
None => return Ok(false),
};
let removed = store.remove(key).is_some();
if removed {
write_fallback_file(&store)?;
}
Ok(removed)
}
pub(crate) fn compute_expires_at_millis(response: &OAuthTokenResponse) -> Option<u64> {
let expires_in = response.expires_in()?;
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_else(|_| Duration::from_secs(0));
let expiry = now.checked_add(expires_in)?;
let millis = expiry.as_millis();
if millis > u128::from(u64::MAX) {
Some(u64::MAX)
} else {
Some(millis as u64)
}
}
fn expires_in_from_timestamp(expires_at: u64) -> Option<u64> {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_else(|_| Duration::from_secs(0));
let now_ms = now.as_millis() as u64;
if expires_at <= now_ms {
None
} else {
Some((expires_at - now_ms) / 1000)
}
}
fn token_needs_refresh(expires_at: Option<u64>) -> bool {
let Some(expires_at) = expires_at else {
return false;
};
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_else(|_| Duration::from_secs(0))
.as_millis() as u64;
now.saturating_add(REFRESH_SKEW_MILLIS) >= expires_at
}
fn compute_store_key(server_name: &str, server_url: &str) -> Result<String> {
let mut payload = JsonMap::new();
payload.insert(
"type".to_string(),
Value::String(MCP_SERVER_TYPE.to_string()),
);
payload.insert("url".to_string(), Value::String(server_url.to_string()));
payload.insert("headers".to_string(), Value::Object(JsonMap::new()));
let truncated = sha_256_prefix(&Value::Object(payload))?;
Ok(format!("{server_name}|{truncated}"))
}
fn fallback_file_path() -> Result<PathBuf> {
let mut path = find_codex_home()?;
path.push(FALLBACK_FILENAME);
Ok(path)
}
fn read_fallback_file() -> Result<Option<FallbackFile>> {
let path = fallback_file_path()?;
let contents = match fs::read_to_string(&path) {
Ok(contents) => contents,
Err(err) if err.kind() == ErrorKind::NotFound => return Ok(None),
Err(err) => {
return Err(err).context(format!(
"failed to read credentials file at {}",
path.display()
));
}
};
match serde_json::from_str::<FallbackFile>(&contents) {
Ok(store) => Ok(Some(store)),
Err(e) => Err(e).context(format!(
"failed to parse credentials file at {}",
path.display()
)),
}
}
fn write_fallback_file(store: &FallbackFile) -> Result<()> {
let path = fallback_file_path()?;
if store.is_empty() {
if path.exists() {
fs::remove_file(path)?;
}
return Ok(());
}
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)?;
}
let serialized = serde_json::to_string(store)?;
fs::write(&path, serialized)?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let perms = fs::Permissions::from_mode(0o600);
fs::set_permissions(&path, perms)?;
}
Ok(())
}
fn sha_256_prefix(value: &Value) -> Result<String> {
let serialized =
serde_json::to_string(&value).context("failed to serialize MCP OAuth key payload")?;
let mut hasher = Sha256::new();
hasher.update(serialized.as_bytes());
let digest = hasher.finalize();
let hex = format!("{digest:x}");
let truncated = &hex[..16];
Ok(truncated.to_string())
}
#[cfg(test)]
mod tests {
use super::*;
use anyhow::Result;
use keyring::Error as KeyringError;
use pretty_assertions::assert_eq;
use std::sync::Mutex;
use std::sync::MutexGuard;
use std::sync::OnceLock;
use std::sync::PoisonError;
use tempfile::tempdir;
use codex_keyring_store::tests::MockKeyringStore;
struct TempCodexHome {
_guard: MutexGuard<'static, ()>,
_dir: tempfile::TempDir,
}
impl TempCodexHome {
fn new() -> Self {
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
let guard = LOCK
.get_or_init(Mutex::default)
.lock()
.unwrap_or_else(PoisonError::into_inner);
let dir = tempdir().expect("create CODEX_HOME temp dir");
unsafe {
std::env::set_var("CODEX_HOME", dir.path());
}
Self {
_guard: guard,
_dir: dir,
}
}
}
impl Drop for TempCodexHome {
fn drop(&mut self) {
unsafe {
std::env::remove_var("CODEX_HOME");
}
}
}
#[test]
fn load_oauth_tokens_reads_from_keyring_when_available() -> Result<()> {
let _env = TempCodexHome::new();
let store = MockKeyringStore::default();
let tokens = sample_tokens();
let expected = tokens.clone();
let serialized = serde_json::to_string(&tokens)?;
let key = super::compute_store_key(&tokens.server_name, &tokens.url)?;
store.save(KEYRING_SERVICE, &key, &serialized)?;
let loaded =
super::load_oauth_tokens_from_keyring(&store, &tokens.server_name, &tokens.url)?
.expect("tokens should load from keyring");
assert_tokens_match_without_expiry(&loaded, &expected);
Ok(())
}
#[test]
fn load_oauth_tokens_falls_back_when_missing_in_keyring() -> Result<()> {
let _env = TempCodexHome::new();
let store = MockKeyringStore::default();
let tokens = sample_tokens();
let expected = tokens.clone();
super::save_oauth_tokens_to_file(&tokens)?;
let loaded = super::load_oauth_tokens_from_keyring_with_fallback_to_file(
&store,
&tokens.server_name,
&tokens.url,
)?
.expect("tokens should load from fallback");
assert_tokens_match_without_expiry(&loaded, &expected);
Ok(())
}
#[test]
fn load_oauth_tokens_falls_back_when_keyring_errors() -> Result<()> {
let _env = TempCodexHome::new();
let store = MockKeyringStore::default();
let tokens = sample_tokens();
let expected = tokens.clone();
let key = super::compute_store_key(&tokens.server_name, &tokens.url)?;
store.set_error(&key, KeyringError::Invalid("error".into(), "load".into()));
super::save_oauth_tokens_to_file(&tokens)?;
let loaded = super::load_oauth_tokens_from_keyring_with_fallback_to_file(
&store,
&tokens.server_name,
&tokens.url,
)?
.expect("tokens should load from fallback");
assert_tokens_match_without_expiry(&loaded, &expected);
Ok(())
}
#[test]
fn save_oauth_tokens_prefers_keyring_when_available() -> Result<()> {
let _env = TempCodexHome::new();
let store = MockKeyringStore::default();
let tokens = sample_tokens();
let key = super::compute_store_key(&tokens.server_name, &tokens.url)?;
super::save_oauth_tokens_to_file(&tokens)?;
super::save_oauth_tokens_with_keyring_with_fallback_to_file(
&store,
&tokens.server_name,
&tokens,
)?;
let fallback_path = super::fallback_file_path()?;
assert!(!fallback_path.exists(), "fallback file should be removed");
let stored = store.saved_value(&key).expect("value saved to keyring");
assert_eq!(serde_json::from_str::<StoredOAuthTokens>(&stored)?, tokens);
Ok(())
}
#[test]
fn save_oauth_tokens_writes_fallback_when_keyring_fails() -> Result<()> {
let _env = TempCodexHome::new();
let store = MockKeyringStore::default();
let tokens = sample_tokens();
let key = super::compute_store_key(&tokens.server_name, &tokens.url)?;
store.set_error(&key, KeyringError::Invalid("error".into(), "save".into()));
super::save_oauth_tokens_with_keyring_with_fallback_to_file(
&store,
&tokens.server_name,
&tokens,
)?;
let fallback_path = super::fallback_file_path()?;
assert!(fallback_path.exists(), "fallback file should be created");
let saved = super::read_fallback_file()?.expect("fallback file should load");
let key = super::compute_store_key(&tokens.server_name, &tokens.url)?;
let entry = saved.get(&key).expect("entry for key");
assert_eq!(entry.server_name, tokens.server_name);
assert_eq!(entry.server_url, tokens.url);
assert_eq!(entry.client_id, tokens.client_id);
assert_eq!(
entry.access_token,
tokens.token_response.0.access_token().secret().as_str()
);
assert!(store.saved_value(&key).is_none());
Ok(())
}
#[test]
fn delete_oauth_tokens_removes_all_storage() -> Result<()> {
let _env = TempCodexHome::new();
let store = MockKeyringStore::default();
let tokens = sample_tokens();
let serialized = serde_json::to_string(&tokens)?;
let key = super::compute_store_key(&tokens.server_name, &tokens.url)?;
store.save(KEYRING_SERVICE, &key, &serialized)?;
super::save_oauth_tokens_to_file(&tokens)?;
let removed = super::delete_oauth_tokens_from_keyring_and_file(
&store,
OAuthCredentialsStoreMode::Auto,
&tokens.server_name,
&tokens.url,
)?;
assert!(removed);
assert!(!store.contains(&key));
assert!(!super::fallback_file_path()?.exists());
Ok(())
}
#[test]
fn delete_oauth_tokens_file_mode_removes_keyring_only_entry() -> Result<()> {
let _env = TempCodexHome::new();
let store = MockKeyringStore::default();
let tokens = sample_tokens();
let serialized = serde_json::to_string(&tokens)?;
let key = super::compute_store_key(&tokens.server_name, &tokens.url)?;
store.save(KEYRING_SERVICE, &key, &serialized)?;
assert!(store.contains(&key));
let removed = super::delete_oauth_tokens_from_keyring_and_file(
&store,
OAuthCredentialsStoreMode::Auto,
&tokens.server_name,
&tokens.url,
)?;
assert!(removed);
assert!(!store.contains(&key));
assert!(!super::fallback_file_path()?.exists());
Ok(())
}
#[test]
fn delete_oauth_tokens_propagates_keyring_errors() -> Result<()> {
let _env = TempCodexHome::new();
let store = MockKeyringStore::default();
let tokens = sample_tokens();
let key = super::compute_store_key(&tokens.server_name, &tokens.url)?;
store.set_error(&key, KeyringError::Invalid("error".into(), "delete".into()));
super::save_oauth_tokens_to_file(&tokens).unwrap();
let result = super::delete_oauth_tokens_from_keyring_and_file(
&store,
OAuthCredentialsStoreMode::Auto,
&tokens.server_name,
&tokens.url,
);
assert!(result.is_err());
assert!(super::fallback_file_path().unwrap().exists());
Ok(())
}
#[test]
fn refresh_expires_in_from_timestamp_restores_future_durations() {
let mut tokens = sample_tokens();
let expires_at = tokens.expires_at.expect("expires_at should be set");
tokens.token_response.0.set_expires_in(None);
super::refresh_expires_in_from_timestamp(&mut tokens);
let actual = tokens
.token_response
.0
.expires_in()
.expect("expires_in should be restored")
.as_secs();
let expected = super::expires_in_from_timestamp(expires_at)
.expect("expires_at should still be in the future");
let diff = actual.abs_diff(expected);
assert!(diff <= 1, "expires_in drift too large: diff={diff}");
}
#[test]
fn refresh_expires_in_from_timestamp_clears_expired_tokens() {
let mut tokens = sample_tokens();
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_else(|_| Duration::from_secs(0));
let expired_at = now.as_millis() as u64;
tokens.expires_at = Some(expired_at.saturating_sub(1000));
let duration = Duration::from_secs(600);
tokens.token_response.0.set_expires_in(Some(&duration));
super::refresh_expires_in_from_timestamp(&mut tokens);
assert!(tokens.token_response.0.expires_in().is_none());
}
fn assert_tokens_match_without_expiry(
actual: &StoredOAuthTokens,
expected: &StoredOAuthTokens,
) {
assert_eq!(actual.server_name, expected.server_name);
assert_eq!(actual.url, expected.url);
assert_eq!(actual.client_id, expected.client_id);
assert_eq!(actual.expires_at, expected.expires_at);
assert_token_response_match_without_expiry(
&actual.token_response,
&expected.token_response,
);
}
fn assert_token_response_match_without_expiry(
actual: &WrappedOAuthTokenResponse,
expected: &WrappedOAuthTokenResponse,
) {
let actual_response = &actual.0;
let expected_response = &expected.0;
assert_eq!(
actual_response.access_token().secret(),
expected_response.access_token().secret()
);
assert_eq!(actual_response.token_type(), expected_response.token_type());
assert_eq!(
actual_response.refresh_token().map(RefreshToken::secret),
expected_response.refresh_token().map(RefreshToken::secret),
);
assert_eq!(actual_response.scopes(), expected_response.scopes());
assert_eq!(
actual_response.extra_fields(),
expected_response.extra_fields()
);
assert_eq!(
actual_response.expires_in().is_some(),
expected_response.expires_in().is_some()
);
}
fn sample_tokens() -> StoredOAuthTokens {
let mut response = OAuthTokenResponse::new(
AccessToken::new("access-token".to_string()),
BasicTokenType::Bearer,
EmptyExtraTokenFields {},
);
response.set_refresh_token(Some(RefreshToken::new("refresh-token".to_string())));
response.set_scopes(Some(vec![
Scope::new("scope-a".to_string()),
Scope::new("scope-b".to_string()),
]));
let expires_in = Duration::from_secs(3600);
response.set_expires_in(Some(&expires_in));
let expires_at = super::compute_expires_at_millis(&response);
StoredOAuthTokens {
server_name: "test-server".to_string(),
url: "https://example.test".to_string(),
client_id: "client-id".to_string(),
token_response: WrappedOAuthTokenResponse(response),
expires_at,
}
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/rmcp-client/src/program_resolver.rs | codex-rs/rmcp-client/src/program_resolver.rs | //! Platform-specific program resolution for MCP server execution.
//!
//! This module provides a unified interface for resolving executable paths
//! across different operating systems. The key challenge it addresses is that
//! Windows cannot execute script files (e.g., `.cmd`, `.bat`) directly through
//! `Command::new()` without their file extensions, while Unix systems handle
//! scripts natively through shebangs.
//!
//! The `resolve` function abstracts these platform differences:
//! - On Unix: Returns the program unchanged (OS handles script execution)
//! - On Windows: Uses the `which` crate to resolve full paths including extensions
use std::collections::HashMap;
use std::ffi::OsString;
#[cfg(windows)]
use std::env;
#[cfg(windows)]
use tracing::debug;
/// Resolves a program to its executable path on Unix systems.
///
/// Unix systems handle PATH resolution and script execution natively through
/// the kernel's shebang (`#!`) mechanism, so this function simply returns
/// the program name unchanged.
#[cfg(unix)]
pub fn resolve(program: OsString, _env: &HashMap<String, String>) -> std::io::Result<OsString> {
Ok(program)
}
/// Resolves a program to its executable path on Windows systems.
///
/// Windows requires explicit file extensions for script execution. This function
/// uses the `which` crate to search the `PATH` environment variable and find
/// the full path to the executable, including necessary script extensions
/// (`.cmd`, `.bat`, etc.) defined in `PATHEXT`.
///
/// This enables tools like `npx`, `pnpm`, and `yarn` to work correctly on Windows
/// without requiring users to specify full paths or extensions in their configuration.
#[cfg(windows)]
pub fn resolve(program: OsString, env: &HashMap<String, String>) -> std::io::Result<OsString> {
// Get current directory for relative path resolution
let cwd = env::current_dir()
.map_err(|e| std::io::Error::other(format!("Failed to get current directory: {e}")))?;
// Extract PATH from environment for search locations
let search_path = env.get("PATH");
// Attempt resolution via which crate
match which::which_in(&program, search_path, &cwd) {
Ok(resolved) => {
debug!("Resolved {:?} to {:?}", program, resolved);
Ok(resolved.into_os_string())
}
Err(e) => {
debug!(
"Failed to resolve {:?}: {}. Using original path",
program, e
);
// Fallback to original program - let Command::new() handle the error
Ok(program)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::create_env_for_mcp_server;
use anyhow::Result;
use std::fs;
use std::path::Path;
use tempfile::TempDir;
use tokio::process::Command;
/// Unix: Verifies the OS handles script execution without file extensions.
#[cfg(unix)]
#[tokio::test]
async fn test_unix_executes_script_without_extension() -> Result<()> {
let env = TestExecutableEnv::new()?;
let mut cmd = Command::new(&env.program_name);
cmd.envs(&env.mcp_env);
let output = cmd.output().await;
assert!(output.is_ok(), "Unix should execute scripts directly");
Ok(())
}
/// Windows: Verifies scripts fail to execute without the proper extension.
#[cfg(windows)]
#[tokio::test]
async fn test_windows_fails_without_extension() -> Result<()> {
let env = TestExecutableEnv::new()?;
let mut cmd = Command::new(&env.program_name);
cmd.envs(&env.mcp_env);
let output = cmd.output().await;
assert!(
output.is_err(),
"Windows requires .cmd/.bat extension for direct execution"
);
Ok(())
}
/// Windows: Verifies scripts with an explicit extension execute correctly.
#[cfg(windows)]
#[tokio::test]
async fn test_windows_succeeds_with_extension() -> Result<()> {
let env = TestExecutableEnv::new()?;
// Append the `.cmd` extension to the program name
let program_with_ext = format!("{}.cmd", env.program_name);
let mut cmd = Command::new(&program_with_ext);
cmd.envs(&env.mcp_env);
let output = cmd.output().await;
assert!(
output.is_ok(),
"Windows should execute scripts when the extension is provided"
);
Ok(())
}
/// Verifies program resolution enables successful execution on all platforms.
#[tokio::test]
async fn test_resolved_program_executes_successfully() -> Result<()> {
let env = TestExecutableEnv::new()?;
let program = OsString::from(&env.program_name);
// Apply platform-specific resolution
let resolved = resolve(program, &env.mcp_env)?;
// Verify resolved path executes successfully
let mut cmd = Command::new(resolved);
cmd.envs(&env.mcp_env);
let output = cmd.output().await;
assert!(
output.is_ok(),
"Resolved program should execute successfully"
);
Ok(())
}
// Test fixture for creating temporary executables in a controlled environment.
struct TestExecutableEnv {
// Held to prevent the temporary directory from being deleted.
_temp_dir: TempDir,
program_name: String,
mcp_env: HashMap<String, String>,
}
impl TestExecutableEnv {
const TEST_PROGRAM: &'static str = "test_mcp_server";
fn new() -> Result<Self> {
let temp_dir = TempDir::new()?;
let dir_path = temp_dir.path();
Self::create_executable(dir_path)?;
// Build a clean environment with the temp dir in the PATH.
let mut extra_env = HashMap::new();
extra_env.insert("PATH".to_string(), Self::build_path(dir_path));
#[cfg(windows)]
extra_env.insert("PATHEXT".to_string(), Self::ensure_cmd_extension());
let mcp_env = create_env_for_mcp_server(Some(extra_env), &[]);
Ok(Self {
_temp_dir: temp_dir,
program_name: Self::TEST_PROGRAM.to_string(),
mcp_env,
})
}
/// Creates a simple, platform-specific executable script.
fn create_executable(dir: &Path) -> Result<()> {
#[cfg(windows)]
{
let file = dir.join(format!("{}.cmd", Self::TEST_PROGRAM));
fs::write(&file, "@echo off\nexit 0")?;
}
#[cfg(unix)]
{
let file = dir.join(Self::TEST_PROGRAM);
fs::write(&file, "#!/bin/sh\nexit 0")?;
Self::set_executable(&file)?;
}
Ok(())
}
#[cfg(unix)]
fn set_executable(path: &Path) -> Result<()> {
use std::os::unix::fs::PermissionsExt;
let mut perms = fs::metadata(path)?.permissions();
perms.set_mode(0o755);
fs::set_permissions(path, perms)?;
Ok(())
}
/// Prepends the given directory to the system's PATH variable.
fn build_path(dir: &Path) -> String {
let current = std::env::var("PATH").unwrap_or_default();
let sep = if cfg!(windows) { ";" } else { ":" };
format!("{}{sep}{current}", dir.to_string_lossy())
}
/// Ensures `.CMD` is in the `PATHEXT` variable on Windows for script discovery.
#[cfg(windows)]
fn ensure_cmd_extension() -> String {
let current = std::env::var("PATHEXT").unwrap_or_default();
if current.to_uppercase().contains(".CMD") {
current
} else {
format!(".CMD;{current}")
}
}
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/rmcp-client/src/utils.rs | codex-rs/rmcp-client/src/utils.rs | use std::collections::HashMap;
use std::env;
use std::time::Duration;
use anyhow::Context;
use anyhow::Result;
use anyhow::anyhow;
use mcp_types::CallToolResult;
use reqwest::ClientBuilder;
use reqwest::header::HeaderMap;
use reqwest::header::HeaderName;
use reqwest::header::HeaderValue;
use rmcp::model::CallToolResult as RmcpCallToolResult;
use rmcp::service::ServiceError;
use serde_json::Value;
use tokio::time;
pub(crate) async fn run_with_timeout<F, T>(
fut: F,
timeout: Option<Duration>,
label: &str,
) -> Result<T>
where
F: std::future::Future<Output = Result<T, ServiceError>>,
{
if let Some(duration) = timeout {
let result = time::timeout(duration, fut)
.await
.with_context(|| anyhow!("timed out awaiting {label} after {duration:?}"))?;
result.map_err(|err| anyhow!("{label} failed: {err}"))
} else {
fut.await.map_err(|err| anyhow!("{label} failed: {err}"))
}
}
pub(crate) fn convert_call_tool_result(result: RmcpCallToolResult) -> Result<CallToolResult> {
let mut value = serde_json::to_value(result)?;
if let Some(obj) = value.as_object_mut()
&& (obj.get("content").is_none()
|| obj.get("content").is_some_and(serde_json::Value::is_null))
{
obj.insert("content".to_string(), Value::Array(Vec::new()));
}
serde_json::from_value(value).context("failed to convert call tool result")
}
/// Convert from mcp-types to Rust SDK types.
///
/// The Rust SDK types are the same as our mcp-types crate because they are both
/// derived from the same MCP specification.
/// As a result, it should be safe to convert directly from one to the other.
pub(crate) fn convert_to_rmcp<T, U>(value: T) -> Result<U>
where
T: serde::Serialize,
U: serde::de::DeserializeOwned,
{
let json = serde_json::to_value(value)?;
serde_json::from_value(json).map_err(|err| anyhow!(err))
}
/// Convert from Rust SDK types to mcp-types.
///
/// The Rust SDK types are the same as our mcp-types crate because they are both
/// derived from the same MCP specification.
/// As a result, it should be safe to convert directly from one to the other.
pub(crate) fn convert_to_mcp<T, U>(value: T) -> Result<U>
where
T: serde::Serialize,
U: serde::de::DeserializeOwned,
{
let json = serde_json::to_value(value)?;
serde_json::from_value(json).map_err(|err| anyhow!(err))
}
pub(crate) fn create_env_for_mcp_server(
extra_env: Option<HashMap<String, String>>,
env_vars: &[String],
) -> HashMap<String, String> {
DEFAULT_ENV_VARS
.iter()
.copied()
.chain(env_vars.iter().map(String::as_str))
.filter_map(|var| env::var(var).ok().map(|value| (var.to_string(), value)))
.chain(extra_env.unwrap_or_default())
.collect()
}
pub(crate) fn build_default_headers(
http_headers: Option<HashMap<String, String>>,
env_http_headers: Option<HashMap<String, String>>,
) -> Result<HeaderMap> {
let mut headers = HeaderMap::new();
if let Some(static_headers) = http_headers {
for (name, value) in static_headers {
let header_name = match HeaderName::from_bytes(name.as_bytes()) {
Ok(name) => name,
Err(err) => {
tracing::warn!("invalid HTTP header name `{name}`: {err}");
continue;
}
};
let header_value = match HeaderValue::from_str(value.as_str()) {
Ok(value) => value,
Err(err) => {
tracing::warn!("invalid HTTP header value for `{name}`: {err}");
continue;
}
};
headers.insert(header_name, header_value);
}
}
if let Some(env_headers) = env_http_headers {
for (name, env_var) in env_headers {
if let Ok(value) = env::var(&env_var) {
if value.trim().is_empty() {
continue;
}
let header_name = match HeaderName::from_bytes(name.as_bytes()) {
Ok(name) => name,
Err(err) => {
tracing::warn!("invalid HTTP header name `{name}`: {err}");
continue;
}
};
let header_value = match HeaderValue::from_str(value.as_str()) {
Ok(value) => value,
Err(err) => {
tracing::warn!(
"invalid HTTP header value read from {env_var} for `{name}`: {err}"
);
continue;
}
};
headers.insert(header_name, header_value);
}
}
}
Ok(headers)
}
pub(crate) fn apply_default_headers(
builder: ClientBuilder,
default_headers: &HeaderMap,
) -> ClientBuilder {
if default_headers.is_empty() {
builder
} else {
builder.default_headers(default_headers.clone())
}
}
#[cfg(unix)]
pub(crate) const DEFAULT_ENV_VARS: &[&str] = &[
"HOME",
"LOGNAME",
"PATH",
"SHELL",
"USER",
"__CF_USER_TEXT_ENCODING",
"LANG",
"LC_ALL",
"TERM",
"TMPDIR",
"TZ",
];
#[cfg(windows)]
pub(crate) const DEFAULT_ENV_VARS: &[&str] = &[
// Core path resolution
"PATH",
"PATHEXT",
// Shell and system roots
"COMSPEC",
"SYSTEMROOT",
"SYSTEMDRIVE",
// User context and profiles
"USERNAME",
"USERDOMAIN",
"USERPROFILE",
"HOMEDRIVE",
"HOMEPATH",
// Program locations
"PROGRAMFILES",
"PROGRAMFILES(X86)",
"PROGRAMW6432",
"PROGRAMDATA",
// App data and caches
"LOCALAPPDATA",
"APPDATA",
// Temp locations
"TEMP",
"TMP",
// Common shells/pwsh hints
"POWERSHELL",
"PWSH",
];
#[cfg(test)]
mod tests {
use super::*;
use mcp_types::ContentBlock;
use pretty_assertions::assert_eq;
use rmcp::model::CallToolResult as RmcpCallToolResult;
use serde_json::json;
use serial_test::serial;
use std::ffi::OsString;
struct EnvVarGuard {
key: String,
original: Option<OsString>,
}
impl EnvVarGuard {
fn set(key: &str, value: &str) -> Self {
let original = std::env::var_os(key);
unsafe {
std::env::set_var(key, value);
}
Self {
key: key.to_string(),
original,
}
}
}
impl Drop for EnvVarGuard {
fn drop(&mut self) {
if let Some(value) = &self.original {
unsafe {
std::env::set_var(&self.key, value);
}
} else {
unsafe {
std::env::remove_var(&self.key);
}
}
}
}
#[tokio::test]
async fn create_env_honors_overrides() {
let value = "custom".to_string();
let env =
create_env_for_mcp_server(Some(HashMap::from([("TZ".into(), value.clone())])), &[]);
assert_eq!(env.get("TZ"), Some(&value));
}
#[test]
#[serial(extra_rmcp_env)]
fn create_env_includes_additional_whitelisted_variables() {
let custom_var = "EXTRA_RMCP_ENV";
let value = "from-env";
let _guard = EnvVarGuard::set(custom_var, value);
let env = create_env_for_mcp_server(None, &[custom_var.to_string()]);
assert_eq!(env.get(custom_var), Some(&value.to_string()));
}
#[test]
fn convert_call_tool_result_defaults_missing_content() -> Result<()> {
let structured_content = json!({ "key": "value" });
let rmcp_result = RmcpCallToolResult {
content: vec![],
structured_content: Some(structured_content.clone()),
is_error: Some(true),
meta: None,
};
let result = convert_call_tool_result(rmcp_result)?;
assert!(result.content.is_empty());
assert_eq!(result.structured_content, Some(structured_content));
assert_eq!(result.is_error, Some(true));
Ok(())
}
#[test]
fn convert_call_tool_result_preserves_existing_content() -> Result<()> {
let rmcp_result = RmcpCallToolResult::success(vec![rmcp::model::Content::text("hello")]);
let result = convert_call_tool_result(rmcp_result)?;
assert_eq!(result.content.len(), 1);
match &result.content[0] {
ContentBlock::TextContent(text_content) => {
assert_eq!(text_content.text, "hello");
assert_eq!(text_content.r#type, "text");
}
other => panic!("expected text content got {other:?}"),
}
assert_eq!(result.structured_content, None);
assert_eq!(result.is_error, Some(false));
Ok(())
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/rmcp-client/src/find_codex_home.rs | codex-rs/rmcp-client/src/find_codex_home.rs | use dirs::home_dir;
use std::path::PathBuf;
/// This was copied from codex-core but codex-core depends on this crate.
/// TODO: move this to a shared crate lower in the dependency tree.
///
///
/// Returns the path to the Codex configuration directory, which can be
/// specified by the `CODEX_HOME` environment variable. If not set, defaults to
/// `~/.codex`.
///
/// - If `CODEX_HOME` is set, the value will be canonicalized and this
/// function will Err if the path does not exist.
/// - If `CODEX_HOME` is not set, this function does not verify that the
/// directory exists.
pub(crate) fn find_codex_home() -> std::io::Result<PathBuf> {
// Honor the `CODEX_HOME` environment variable when it is set to allow users
// (and tests) to override the default location.
if let Ok(val) = std::env::var("CODEX_HOME")
&& !val.is_empty()
{
return PathBuf::from(val).canonicalize();
}
let mut p = home_dir().ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::NotFound,
"Could not find home directory",
)
})?;
p.push(".codex");
Ok(p)
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/rmcp-client/src/rmcp_client.rs | codex-rs/rmcp-client/src/rmcp_client.rs | use std::collections::HashMap;
use std::ffi::OsString;
use std::io;
use std::path::PathBuf;
use std::process::Stdio;
use std::sync::Arc;
use std::time::Duration;
use anyhow::Result;
use anyhow::anyhow;
use futures::FutureExt;
use futures::future::BoxFuture;
use mcp_types::CallToolRequestParams;
use mcp_types::CallToolResult;
use mcp_types::InitializeRequestParams;
use mcp_types::InitializeResult;
use mcp_types::ListResourceTemplatesRequestParams;
use mcp_types::ListResourceTemplatesResult;
use mcp_types::ListResourcesRequestParams;
use mcp_types::ListResourcesResult;
use mcp_types::ListToolsRequestParams;
use mcp_types::ListToolsResult;
use mcp_types::ReadResourceRequestParams;
use mcp_types::ReadResourceResult;
use mcp_types::RequestId;
use reqwest::header::HeaderMap;
use rmcp::model::CallToolRequestParam;
use rmcp::model::ClientNotification;
use rmcp::model::ClientRequest;
use rmcp::model::CreateElicitationRequestParam;
use rmcp::model::CreateElicitationResult;
use rmcp::model::CustomNotification;
use rmcp::model::CustomRequest;
use rmcp::model::Extensions;
use rmcp::model::InitializeRequestParam;
use rmcp::model::PaginatedRequestParam;
use rmcp::model::ReadResourceRequestParam;
use rmcp::model::ServerResult;
use rmcp::service::RoleClient;
use rmcp::service::RunningService;
use rmcp::service::{self};
use rmcp::transport::StreamableHttpClientTransport;
use rmcp::transport::auth::AuthClient;
use rmcp::transport::auth::OAuthState;
use rmcp::transport::child_process::TokioChildProcess;
use rmcp::transport::streamable_http_client::StreamableHttpClientTransportConfig;
use tokio::io::AsyncBufReadExt;
use tokio::io::BufReader;
use tokio::process::Command;
use tokio::sync::Mutex;
use tokio::time;
use tracing::info;
use tracing::warn;
use crate::load_oauth_tokens;
use crate::logging_client_handler::LoggingClientHandler;
use crate::oauth::OAuthCredentialsStoreMode;
use crate::oauth::OAuthPersistor;
use crate::oauth::StoredOAuthTokens;
use crate::program_resolver;
use crate::utils::apply_default_headers;
use crate::utils::build_default_headers;
use crate::utils::convert_call_tool_result;
use crate::utils::convert_to_mcp;
use crate::utils::convert_to_rmcp;
use crate::utils::create_env_for_mcp_server;
use crate::utils::run_with_timeout;
enum PendingTransport {
ChildProcess(TokioChildProcess),
StreamableHttp {
transport: StreamableHttpClientTransport<reqwest::Client>,
},
StreamableHttpWithOAuth {
transport: StreamableHttpClientTransport<AuthClient<reqwest::Client>>,
oauth_persistor: OAuthPersistor,
},
}
enum ClientState {
Connecting {
transport: Option<PendingTransport>,
},
Ready {
service: Arc<RunningService<RoleClient, LoggingClientHandler>>,
oauth: Option<OAuthPersistor>,
},
}
pub type Elicitation = CreateElicitationRequestParam;
pub type ElicitationResponse = CreateElicitationResult;
/// Interface for sending elicitation requests to the UI and awaiting a response.
pub type SendElicitation = Box<
dyn Fn(RequestId, Elicitation) -> BoxFuture<'static, Result<ElicitationResponse>> + Send + Sync,
>;
/// MCP client implemented on top of the official `rmcp` SDK.
/// https://github.com/modelcontextprotocol/rust-sdk
pub struct RmcpClient {
state: Mutex<ClientState>,
}
impl RmcpClient {
pub async fn new_stdio_client(
program: OsString,
args: Vec<OsString>,
env: Option<HashMap<String, String>>,
env_vars: &[String],
cwd: Option<PathBuf>,
) -> io::Result<Self> {
let program_name = program.to_string_lossy().into_owned();
// Build environment for program resolution and subprocess
let envs = create_env_for_mcp_server(env, env_vars);
// Resolve program to executable path (platform-specific)
let resolved_program = program_resolver::resolve(program, &envs)?;
let mut command = Command::new(resolved_program);
command
.kill_on_drop(true)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.env_clear()
.envs(envs)
.args(&args);
if let Some(cwd) = cwd {
command.current_dir(cwd);
}
let (transport, stderr) = TokioChildProcess::builder(command)
.stderr(Stdio::piped())
.spawn()?;
if let Some(stderr) = stderr {
tokio::spawn(async move {
let mut reader = BufReader::new(stderr).lines();
loop {
match reader.next_line().await {
Ok(Some(line)) => {
info!("MCP server stderr ({program_name}): {line}");
}
Ok(None) => break,
Err(error) => {
warn!("Failed to read MCP server stderr ({program_name}): {error}");
break;
}
}
}
});
}
Ok(Self {
state: Mutex::new(ClientState::Connecting {
transport: Some(PendingTransport::ChildProcess(transport)),
}),
})
}
#[allow(clippy::too_many_arguments)]
pub async fn new_streamable_http_client(
server_name: &str,
url: &str,
bearer_token: Option<String>,
http_headers: Option<HashMap<String, String>>,
env_http_headers: Option<HashMap<String, String>>,
store_mode: OAuthCredentialsStoreMode,
) -> Result<Self> {
let default_headers = build_default_headers(http_headers, env_http_headers)?;
let initial_oauth_tokens = match bearer_token {
Some(_) => None,
None => match load_oauth_tokens(server_name, url, store_mode) {
Ok(tokens) => tokens,
Err(err) => {
warn!("failed to read tokens for server `{server_name}`: {err}");
None
}
},
};
let transport = if let Some(initial_tokens) = initial_oauth_tokens.clone() {
let (transport, oauth_persistor) = create_oauth_transport_and_runtime(
server_name,
url,
initial_tokens,
store_mode,
default_headers.clone(),
)
.await?;
PendingTransport::StreamableHttpWithOAuth {
transport,
oauth_persistor,
}
} else {
let mut http_config = StreamableHttpClientTransportConfig::with_uri(url.to_string());
if let Some(bearer_token) = bearer_token.clone() {
http_config = http_config.auth_header(bearer_token);
}
let http_client =
apply_default_headers(reqwest::Client::builder(), &default_headers).build()?;
let transport = StreamableHttpClientTransport::with_client(http_client, http_config);
PendingTransport::StreamableHttp { transport }
};
Ok(Self {
state: Mutex::new(ClientState::Connecting {
transport: Some(transport),
}),
})
}
/// Perform the initialization handshake with the MCP server.
/// https://modelcontextprotocol.io/specification/2025-06-18/basic/lifecycle#initialization
pub async fn initialize(
&self,
params: InitializeRequestParams,
timeout: Option<Duration>,
send_elicitation: SendElicitation,
) -> Result<InitializeResult> {
let rmcp_params: InitializeRequestParam = convert_to_rmcp(params.clone())?;
let client_handler = LoggingClientHandler::new(rmcp_params, send_elicitation);
let (transport, oauth_persistor) = {
let mut guard = self.state.lock().await;
match &mut *guard {
ClientState::Connecting { transport } => match transport.take() {
Some(PendingTransport::ChildProcess(transport)) => (
service::serve_client(client_handler.clone(), transport).boxed(),
None,
),
Some(PendingTransport::StreamableHttp { transport }) => (
service::serve_client(client_handler.clone(), transport).boxed(),
None,
),
Some(PendingTransport::StreamableHttpWithOAuth {
transport,
oauth_persistor,
}) => (
service::serve_client(client_handler.clone(), transport).boxed(),
Some(oauth_persistor),
),
None => return Err(anyhow!("client already initializing")),
},
ClientState::Ready { .. } => return Err(anyhow!("client already initialized")),
}
};
let service = match timeout {
Some(duration) => time::timeout(duration, transport)
.await
.map_err(|_| anyhow!("timed out handshaking with MCP server after {duration:?}"))?
.map_err(|err| anyhow!("handshaking with MCP server failed: {err}"))?,
None => transport
.await
.map_err(|err| anyhow!("handshaking with MCP server failed: {err}"))?,
};
let initialize_result_rmcp = service
.peer()
.peer_info()
.ok_or_else(|| anyhow!("handshake succeeded but server info was missing"))?;
let initialize_result = convert_to_mcp(initialize_result_rmcp)?;
{
let mut guard = self.state.lock().await;
*guard = ClientState::Ready {
service: Arc::new(service),
oauth: oauth_persistor.clone(),
};
}
if let Some(runtime) = oauth_persistor
&& let Err(error) = runtime.persist_if_needed().await
{
warn!("failed to persist OAuth tokens after initialize: {error}");
}
Ok(initialize_result)
}
pub async fn list_tools(
&self,
params: Option<ListToolsRequestParams>,
timeout: Option<Duration>,
) -> Result<ListToolsResult> {
self.refresh_oauth_if_needed().await;
let service = self.service().await?;
let rmcp_params = params
.map(convert_to_rmcp::<_, PaginatedRequestParam>)
.transpose()?;
let fut = service.list_tools(rmcp_params);
let result = run_with_timeout(fut, timeout, "tools/list").await?;
let converted = convert_to_mcp(result)?;
self.persist_oauth_tokens().await;
Ok(converted)
}
pub async fn list_resources(
&self,
params: Option<ListResourcesRequestParams>,
timeout: Option<Duration>,
) -> Result<ListResourcesResult> {
self.refresh_oauth_if_needed().await;
let service = self.service().await?;
let rmcp_params = params
.map(convert_to_rmcp::<_, PaginatedRequestParam>)
.transpose()?;
let fut = service.list_resources(rmcp_params);
let result = run_with_timeout(fut, timeout, "resources/list").await?;
let converted = convert_to_mcp(result)?;
self.persist_oauth_tokens().await;
Ok(converted)
}
pub async fn list_resource_templates(
&self,
params: Option<ListResourceTemplatesRequestParams>,
timeout: Option<Duration>,
) -> Result<ListResourceTemplatesResult> {
self.refresh_oauth_if_needed().await;
let service = self.service().await?;
let rmcp_params = params
.map(convert_to_rmcp::<_, PaginatedRequestParam>)
.transpose()?;
let fut = service.list_resource_templates(rmcp_params);
let result = run_with_timeout(fut, timeout, "resources/templates/list").await?;
let converted = convert_to_mcp(result)?;
self.persist_oauth_tokens().await;
Ok(converted)
}
pub async fn read_resource(
&self,
params: ReadResourceRequestParams,
timeout: Option<Duration>,
) -> Result<ReadResourceResult> {
self.refresh_oauth_if_needed().await;
let service = self.service().await?;
let rmcp_params: ReadResourceRequestParam = convert_to_rmcp(params)?;
let fut = service.read_resource(rmcp_params);
let result = run_with_timeout(fut, timeout, "resources/read").await?;
let converted = convert_to_mcp(result)?;
self.persist_oauth_tokens().await;
Ok(converted)
}
pub async fn call_tool(
&self,
name: String,
arguments: Option<serde_json::Value>,
timeout: Option<Duration>,
) -> Result<CallToolResult> {
self.refresh_oauth_if_needed().await;
let service = self.service().await?;
let params = CallToolRequestParams { arguments, name };
let rmcp_params: CallToolRequestParam = convert_to_rmcp(params)?;
let fut = service.call_tool(rmcp_params);
let rmcp_result = run_with_timeout(fut, timeout, "tools/call").await?;
let converted = convert_call_tool_result(rmcp_result)?;
self.persist_oauth_tokens().await;
Ok(converted)
}
pub async fn send_custom_notification(
&self,
method: &str,
params: Option<serde_json::Value>,
) -> Result<()> {
let service: Arc<RunningService<RoleClient, LoggingClientHandler>> = self.service().await?;
service
.send_notification(ClientNotification::CustomNotification(CustomNotification {
method: method.to_string(),
params,
extensions: Extensions::new(),
}))
.await?;
Ok(())
}
pub async fn send_custom_request(
&self,
method: &str,
params: Option<serde_json::Value>,
) -> Result<ServerResult> {
let service: Arc<RunningService<RoleClient, LoggingClientHandler>> = self.service().await?;
let response = service
.send_request(ClientRequest::CustomRequest(CustomRequest::new(
method, params,
)))
.await?;
Ok(response)
}
async fn service(&self) -> Result<Arc<RunningService<RoleClient, LoggingClientHandler>>> {
let guard = self.state.lock().await;
match &*guard {
ClientState::Ready { service, .. } => Ok(Arc::clone(service)),
ClientState::Connecting { .. } => Err(anyhow!("MCP client not initialized")),
}
}
async fn oauth_persistor(&self) -> Option<OAuthPersistor> {
let guard = self.state.lock().await;
match &*guard {
ClientState::Ready {
oauth: Some(runtime),
service: _,
} => Some(runtime.clone()),
_ => None,
}
}
/// This should be called after every tool call so that if a given tool call triggered
/// a refresh of the OAuth tokens, they are persisted.
async fn persist_oauth_tokens(&self) {
if let Some(runtime) = self.oauth_persistor().await
&& let Err(error) = runtime.persist_if_needed().await
{
warn!("failed to persist OAuth tokens: {error}");
}
}
async fn refresh_oauth_if_needed(&self) {
if let Some(runtime) = self.oauth_persistor().await
&& let Err(error) = runtime.refresh_if_needed().await
{
warn!("failed to refresh OAuth tokens: {error}");
}
}
}
async fn create_oauth_transport_and_runtime(
server_name: &str,
url: &str,
initial_tokens: StoredOAuthTokens,
credentials_store: OAuthCredentialsStoreMode,
default_headers: HeaderMap,
) -> Result<(
StreamableHttpClientTransport<AuthClient<reqwest::Client>>,
OAuthPersistor,
)> {
let http_client =
apply_default_headers(reqwest::Client::builder(), &default_headers).build()?;
let mut oauth_state = OAuthState::new(url.to_string(), Some(http_client.clone())).await?;
oauth_state
.set_credentials(
&initial_tokens.client_id,
initial_tokens.token_response.0.clone(),
)
.await?;
let manager = match oauth_state {
OAuthState::Authorized(manager) => manager,
OAuthState::Unauthorized(manager) => manager,
OAuthState::Session(_) | OAuthState::AuthorizedHttpClient(_) => {
return Err(anyhow!("unexpected OAuth state during client setup"));
}
};
let auth_client = AuthClient::new(http_client, manager);
let auth_manager = auth_client.auth_manager.clone();
let transport = StreamableHttpClientTransport::with_client(
auth_client,
StreamableHttpClientTransportConfig::with_uri(url.to_string()),
);
let runtime = OAuthPersistor::new(
server_name.to_string(),
url.to_string(),
auth_manager,
credentials_store,
Some(initial_tokens),
);
Ok((transport, runtime))
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/rmcp-client/src/logging_client_handler.rs | codex-rs/rmcp-client/src/logging_client_handler.rs | use std::sync::Arc;
use rmcp::ClientHandler;
use rmcp::RoleClient;
use rmcp::model::CancelledNotificationParam;
use rmcp::model::ClientInfo;
use rmcp::model::CreateElicitationRequestParam;
use rmcp::model::CreateElicitationResult;
use rmcp::model::LoggingLevel;
use rmcp::model::LoggingMessageNotificationParam;
use rmcp::model::ProgressNotificationParam;
use rmcp::model::RequestId;
use rmcp::model::ResourceUpdatedNotificationParam;
use rmcp::service::NotificationContext;
use rmcp::service::RequestContext;
use tracing::debug;
use tracing::error;
use tracing::info;
use tracing::warn;
use crate::rmcp_client::SendElicitation;
#[derive(Clone)]
pub(crate) struct LoggingClientHandler {
client_info: ClientInfo,
send_elicitation: Arc<SendElicitation>,
}
impl LoggingClientHandler {
pub(crate) fn new(client_info: ClientInfo, send_elicitation: SendElicitation) -> Self {
Self {
client_info,
send_elicitation: Arc::new(send_elicitation),
}
}
}
impl ClientHandler for LoggingClientHandler {
async fn create_elicitation(
&self,
request: CreateElicitationRequestParam,
context: RequestContext<RoleClient>,
) -> Result<CreateElicitationResult, rmcp::ErrorData> {
let id = match context.id {
RequestId::String(id) => mcp_types::RequestId::String(id.to_string()),
RequestId::Number(id) => mcp_types::RequestId::Integer(id),
};
(self.send_elicitation)(id, request)
.await
.map_err(|err| rmcp::ErrorData::internal_error(err.to_string(), None))
}
async fn on_cancelled(
&self,
params: CancelledNotificationParam,
_context: NotificationContext<RoleClient>,
) {
info!(
"MCP server cancelled request (request_id: {}, reason: {:?})",
params.request_id, params.reason
);
}
async fn on_progress(
&self,
params: ProgressNotificationParam,
_context: NotificationContext<RoleClient>,
) {
info!(
"MCP server progress notification (token: {:?}, progress: {}, total: {:?}, message: {:?})",
params.progress_token, params.progress, params.total, params.message
);
}
async fn on_resource_updated(
&self,
params: ResourceUpdatedNotificationParam,
_context: NotificationContext<RoleClient>,
) {
info!("MCP server resource updated (uri: {})", params.uri);
}
async fn on_resource_list_changed(&self, _context: NotificationContext<RoleClient>) {
info!("MCP server resource list changed");
}
async fn on_tool_list_changed(&self, _context: NotificationContext<RoleClient>) {
info!("MCP server tool list changed");
}
async fn on_prompt_list_changed(&self, _context: NotificationContext<RoleClient>) {
info!("MCP server prompt list changed");
}
fn get_info(&self) -> ClientInfo {
self.client_info.clone()
}
async fn on_logging_message(
&self,
params: LoggingMessageNotificationParam,
_context: NotificationContext<RoleClient>,
) {
let LoggingMessageNotificationParam {
level,
logger,
data,
} = params;
let logger = logger.as_deref();
match level {
LoggingLevel::Emergency
| LoggingLevel::Alert
| LoggingLevel::Critical
| LoggingLevel::Error => {
error!(
"MCP server log message (level: {:?}, logger: {:?}, data: {})",
level, logger, data
);
}
LoggingLevel::Warning => {
warn!(
"MCP server log message (level: {:?}, logger: {:?}, data: {})",
level, logger, data
);
}
LoggingLevel::Notice | LoggingLevel::Info => {
info!(
"MCP server log message (level: {:?}, logger: {:?}, data: {})",
level, logger, data
);
}
LoggingLevel::Debug => {
debug!(
"MCP server log message (level: {:?}, logger: {:?}, data: {})",
level, logger, data
);
}
}
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/rmcp-client/src/bin/rmcp_test_server.rs | codex-rs/rmcp-client/src/bin/rmcp_test_server.rs | use std::borrow::Cow;
use std::collections::HashMap;
use std::sync::Arc;
use rmcp::ErrorData as McpError;
use rmcp::ServiceExt;
use rmcp::handler::server::ServerHandler;
use rmcp::model::CallToolRequestParam;
use rmcp::model::CallToolResult;
use rmcp::model::JsonObject;
use rmcp::model::ListToolsResult;
use rmcp::model::PaginatedRequestParam;
use rmcp::model::ServerCapabilities;
use rmcp::model::ServerInfo;
use rmcp::model::Tool;
use serde::Deserialize;
use serde_json::json;
use tokio::task;
#[derive(Clone)]
struct TestToolServer {
tools: Arc<Vec<Tool>>,
}
pub fn stdio() -> (tokio::io::Stdin, tokio::io::Stdout) {
(tokio::io::stdin(), tokio::io::stdout())
}
impl TestToolServer {
fn new() -> Self {
let tools = vec![Self::echo_tool()];
Self {
tools: Arc::new(tools),
}
}
fn echo_tool() -> Tool {
#[expect(clippy::expect_used)]
let schema: JsonObject = serde_json::from_value(json!({
"type": "object",
"properties": {
"message": { "type": "string" },
"env_var": { "type": "string" }
},
"required": ["message"],
"additionalProperties": false
}))
.expect("echo tool schema should deserialize");
Tool::new(
Cow::Borrowed("echo"),
Cow::Borrowed("Echo back the provided message and include environment data."),
Arc::new(schema),
)
}
}
#[derive(Deserialize)]
struct EchoArgs {
message: String,
#[allow(dead_code)]
env_var: Option<String>,
}
impl ServerHandler for TestToolServer {
fn get_info(&self) -> ServerInfo {
ServerInfo {
capabilities: ServerCapabilities::builder()
.enable_tools()
.enable_tool_list_changed()
.build(),
..ServerInfo::default()
}
}
fn list_tools(
&self,
_request: Option<PaginatedRequestParam>,
_context: rmcp::service::RequestContext<rmcp::service::RoleServer>,
) -> impl std::future::Future<Output = Result<ListToolsResult, McpError>> + Send + '_ {
let tools = self.tools.clone();
async move {
Ok(ListToolsResult {
tools: (*tools).clone(),
next_cursor: None,
meta: None,
})
}
}
async fn call_tool(
&self,
request: CallToolRequestParam,
_context: rmcp::service::RequestContext<rmcp::service::RoleServer>,
) -> Result<CallToolResult, McpError> {
match request.name.as_ref() {
"echo" => {
let args: EchoArgs = match request.arguments {
Some(arguments) => serde_json::from_value(serde_json::Value::Object(
arguments.into_iter().collect(),
))
.map_err(|err| McpError::invalid_params(err.to_string(), None))?,
None => {
return Err(McpError::invalid_params(
"missing arguments for echo tool",
None,
));
}
};
let env_snapshot: HashMap<String, String> = std::env::vars().collect();
let structured_content = json!({
"echo": args.message,
"env": env_snapshot.get("MCP_TEST_VALUE"),
});
Ok(CallToolResult {
content: Vec::new(),
structured_content: Some(structured_content),
is_error: Some(false),
meta: None,
})
}
other => Err(McpError::invalid_params(
format!("unknown tool: {other}"),
None,
)),
}
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
eprintln!("starting rmcp test server");
// Run the server with STDIO transport. If the client disconnects we simply
// bubble up the error so the process exits.
let service = TestToolServer::new();
let running = service.serve(stdio()).await?;
// Wait for the client to finish interacting with the server.
running.waiting().await?;
// Drain background tasks to ensure clean shutdown.
task::yield_now().await;
Ok(())
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/rmcp-client/src/bin/test_stdio_server.rs | codex-rs/rmcp-client/src/bin/test_stdio_server.rs | use std::borrow::Cow;
use std::collections::HashMap;
use std::sync::Arc;
use rmcp::ErrorData as McpError;
use rmcp::ServiceExt;
use rmcp::handler::server::ServerHandler;
use rmcp::model::CallToolRequestParam;
use rmcp::model::CallToolResult;
use rmcp::model::JsonObject;
use rmcp::model::ListResourceTemplatesResult;
use rmcp::model::ListResourcesResult;
use rmcp::model::ListToolsResult;
use rmcp::model::PaginatedRequestParam;
use rmcp::model::RawResource;
use rmcp::model::RawResourceTemplate;
use rmcp::model::ReadResourceRequestParam;
use rmcp::model::ReadResourceResult;
use rmcp::model::Resource;
use rmcp::model::ResourceContents;
use rmcp::model::ResourceTemplate;
use rmcp::model::ServerCapabilities;
use rmcp::model::ServerInfo;
use rmcp::model::Tool;
use serde::Deserialize;
use serde_json::json;
use tokio::task;
#[derive(Clone)]
struct TestToolServer {
tools: Arc<Vec<Tool>>,
resources: Arc<Vec<Resource>>,
resource_templates: Arc<Vec<ResourceTemplate>>,
}
const MEMO_URI: &str = "memo://codex/example-note";
const MEMO_CONTENT: &str = "This is a sample MCP resource served by the rmcp test server.";
pub fn stdio() -> (tokio::io::Stdin, tokio::io::Stdout) {
(tokio::io::stdin(), tokio::io::stdout())
}
impl TestToolServer {
fn new() -> Self {
let tools = vec![Self::echo_tool(), Self::image_tool()];
let resources = vec![Self::memo_resource()];
let resource_templates = vec![Self::memo_template()];
Self {
tools: Arc::new(tools),
resources: Arc::new(resources),
resource_templates: Arc::new(resource_templates),
}
}
fn echo_tool() -> Tool {
#[expect(clippy::expect_used)]
let schema: JsonObject = serde_json::from_value(json!({
"type": "object",
"properties": {
"message": { "type": "string" },
"env_var": { "type": "string" }
},
"required": ["message"],
"additionalProperties": false
}))
.expect("echo tool schema should deserialize");
Tool::new(
Cow::Borrowed("echo"),
Cow::Borrowed("Echo back the provided message and include environment data."),
Arc::new(schema),
)
}
fn image_tool() -> Tool {
#[expect(clippy::expect_used)]
let schema: JsonObject = serde_json::from_value(serde_json::json!({
"type": "object",
"properties": {},
"additionalProperties": false
}))
.expect("image tool schema should deserialize");
Tool::new(
Cow::Borrowed("image"),
Cow::Borrowed("Return a single image content block."),
Arc::new(schema),
)
}
fn memo_resource() -> Resource {
let raw = RawResource {
uri: MEMO_URI.to_string(),
name: "example-note".to_string(),
title: Some("Example Note".to_string()),
description: Some("A sample MCP resource exposed for integration tests.".to_string()),
mime_type: Some("text/plain".to_string()),
size: None,
icons: None,
meta: None,
};
Resource::new(raw, None)
}
fn memo_template() -> ResourceTemplate {
let raw = RawResourceTemplate {
uri_template: "memo://codex/{slug}".to_string(),
name: "codex-memo".to_string(),
title: Some("Codex Memo".to_string()),
description: Some(
"Template for memo://codex/{slug} resources used in tests.".to_string(),
),
mime_type: Some("text/plain".to_string()),
};
ResourceTemplate::new(raw, None)
}
fn memo_text() -> &'static str {
MEMO_CONTENT
}
}
#[derive(Deserialize)]
struct EchoArgs {
message: String,
#[allow(dead_code)]
env_var: Option<String>,
}
impl ServerHandler for TestToolServer {
fn get_info(&self) -> ServerInfo {
ServerInfo {
capabilities: ServerCapabilities::builder()
.enable_tools()
.enable_tool_list_changed()
.enable_resources()
.build(),
..ServerInfo::default()
}
}
fn list_tools(
&self,
_request: Option<PaginatedRequestParam>,
_context: rmcp::service::RequestContext<rmcp::service::RoleServer>,
) -> impl std::future::Future<Output = Result<ListToolsResult, McpError>> + Send + '_ {
let tools = self.tools.clone();
async move {
Ok(ListToolsResult {
tools: (*tools).clone(),
next_cursor: None,
meta: None,
})
}
}
fn list_resources(
&self,
_request: Option<PaginatedRequestParam>,
_context: rmcp::service::RequestContext<rmcp::service::RoleServer>,
) -> impl std::future::Future<Output = Result<ListResourcesResult, McpError>> + Send + '_ {
let resources = self.resources.clone();
async move {
Ok(ListResourcesResult {
resources: (*resources).clone(),
next_cursor: None,
meta: None,
})
}
}
async fn list_resource_templates(
&self,
_request: Option<PaginatedRequestParam>,
_context: rmcp::service::RequestContext<rmcp::service::RoleServer>,
) -> Result<ListResourceTemplatesResult, McpError> {
Ok(ListResourceTemplatesResult {
resource_templates: (*self.resource_templates).clone(),
next_cursor: None,
meta: None,
})
}
async fn read_resource(
&self,
ReadResourceRequestParam { uri }: ReadResourceRequestParam,
_context: rmcp::service::RequestContext<rmcp::service::RoleServer>,
) -> Result<ReadResourceResult, McpError> {
if uri == MEMO_URI {
Ok(ReadResourceResult {
contents: vec![ResourceContents::TextResourceContents {
uri,
mime_type: Some("text/plain".to_string()),
text: Self::memo_text().to_string(),
meta: None,
}],
})
} else {
Err(McpError::resource_not_found(
"resource_not_found",
Some(json!({ "uri": uri })),
))
}
}
async fn call_tool(
&self,
request: CallToolRequestParam,
_context: rmcp::service::RequestContext<rmcp::service::RoleServer>,
) -> Result<CallToolResult, McpError> {
match request.name.as_ref() {
"echo" => {
let args: EchoArgs = match request.arguments {
Some(arguments) => serde_json::from_value(serde_json::Value::Object(
arguments.into_iter().collect(),
))
.map_err(|err| McpError::invalid_params(err.to_string(), None))?,
None => {
return Err(McpError::invalid_params(
"missing arguments for echo tool",
None,
));
}
};
let env_snapshot: HashMap<String, String> = std::env::vars().collect();
let structured_content = json!({
"echo": format!("ECHOING: {}", args.message),
"env": env_snapshot.get("MCP_TEST_VALUE"),
});
Ok(CallToolResult {
content: Vec::new(),
structured_content: Some(structured_content),
is_error: Some(false),
meta: None,
})
}
"image" => {
// Read a data URL (e.g. data:image/png;base64,AAA...) from env and convert to
// an MCP image content block. Tests set MCP_TEST_IMAGE_DATA_URL.
let data_url = std::env::var("MCP_TEST_IMAGE_DATA_URL").map_err(|_| {
McpError::invalid_params(
"missing MCP_TEST_IMAGE_DATA_URL env var for image tool",
None,
)
})?;
fn parse_data_url(url: &str) -> Option<(String, String)> {
let rest = url.strip_prefix("data:")?;
let (mime_and_opts, data) = rest.split_once(',')?;
let (mime, _opts) =
mime_and_opts.split_once(';').unwrap_or((mime_and_opts, ""));
Some((mime.to_string(), data.to_string()))
}
let (mime_type, data_b64) = parse_data_url(&data_url).ok_or_else(|| {
McpError::invalid_params(
format!("invalid data URL for image tool: {data_url}"),
None,
)
})?;
Ok(CallToolResult::success(vec![rmcp::model::Content::image(
data_b64, mime_type,
)]))
}
other => Err(McpError::invalid_params(
format!("unknown tool: {other}"),
None,
)),
}
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
eprintln!("starting rmcp test server");
// Run the server with STDIO transport. If the client disconnects we simply
// bubble up the error so the process exits.
let service = TestToolServer::new();
let running = service.serve(stdio()).await?;
// Wait for the client to finish interacting with the server.
running.waiting().await?;
// Drain background tasks to ensure clean shutdown.
task::yield_now().await;
Ok(())
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/rmcp-client/src/bin/test_streamable_http_server.rs | codex-rs/rmcp-client/src/bin/test_streamable_http_server.rs | use std::borrow::Cow;
use std::collections::HashMap;
use std::io::ErrorKind;
use std::net::SocketAddr;
use std::sync::Arc;
use axum::Router;
use axum::body::Body;
use axum::extract::State;
use axum::http::Request;
use axum::http::StatusCode;
use axum::http::header::AUTHORIZATION;
use axum::http::header::CONTENT_TYPE;
use axum::middleware;
use axum::middleware::Next;
use axum::response::Response;
use axum::routing::get;
use rmcp::ErrorData as McpError;
use rmcp::handler::server::ServerHandler;
use rmcp::model::CallToolRequestParam;
use rmcp::model::CallToolResult;
use rmcp::model::JsonObject;
use rmcp::model::ListResourceTemplatesResult;
use rmcp::model::ListResourcesResult;
use rmcp::model::ListToolsResult;
use rmcp::model::PaginatedRequestParam;
use rmcp::model::RawResource;
use rmcp::model::RawResourceTemplate;
use rmcp::model::ReadResourceRequestParam;
use rmcp::model::ReadResourceResult;
use rmcp::model::Resource;
use rmcp::model::ResourceContents;
use rmcp::model::ResourceTemplate;
use rmcp::model::ServerCapabilities;
use rmcp::model::ServerInfo;
use rmcp::model::Tool;
use rmcp::transport::StreamableHttpServerConfig;
use rmcp::transport::StreamableHttpService;
use rmcp::transport::streamable_http_server::session::local::LocalSessionManager;
use serde::Deserialize;
use serde_json::json;
use tokio::task;
#[derive(Clone)]
struct TestToolServer {
tools: Arc<Vec<Tool>>,
resources: Arc<Vec<Resource>>,
resource_templates: Arc<Vec<ResourceTemplate>>,
}
const MEMO_URI: &str = "memo://codex/example-note";
const MEMO_CONTENT: &str = "This is a sample MCP resource served by the rmcp test server.";
impl TestToolServer {
fn new() -> Self {
let tools = vec![Self::echo_tool()];
let resources = vec![Self::memo_resource()];
let resource_templates = vec![Self::memo_template()];
Self {
tools: Arc::new(tools),
resources: Arc::new(resources),
resource_templates: Arc::new(resource_templates),
}
}
fn echo_tool() -> Tool {
#[expect(clippy::expect_used)]
let schema: JsonObject = serde_json::from_value(json!({
"type": "object",
"properties": {
"message": { "type": "string" },
"env_var": { "type": "string" }
},
"required": ["message"],
"additionalProperties": false
}))
.expect("echo tool schema should deserialize");
Tool::new(
Cow::Borrowed("echo"),
Cow::Borrowed("Echo back the provided message and include environment data."),
Arc::new(schema),
)
}
fn memo_resource() -> Resource {
let raw = RawResource {
uri: MEMO_URI.to_string(),
name: "example-note".to_string(),
title: Some("Example Note".to_string()),
description: Some("A sample MCP resource exposed for integration tests.".to_string()),
mime_type: Some("text/plain".to_string()),
size: None,
icons: None,
meta: None,
};
Resource::new(raw, None)
}
fn memo_template() -> ResourceTemplate {
let raw = RawResourceTemplate {
uri_template: "memo://codex/{slug}".to_string(),
name: "codex-memo".to_string(),
title: Some("Codex Memo".to_string()),
description: Some(
"Template for memo://codex/{slug} resources used in tests.".to_string(),
),
mime_type: Some("text/plain".to_string()),
};
ResourceTemplate::new(raw, None)
}
fn memo_text() -> &'static str {
MEMO_CONTENT
}
}
#[derive(Deserialize)]
struct EchoArgs {
message: String,
#[allow(dead_code)]
env_var: Option<String>,
}
impl ServerHandler for TestToolServer {
fn get_info(&self) -> ServerInfo {
ServerInfo {
capabilities: ServerCapabilities::builder()
.enable_tools()
.enable_tool_list_changed()
.enable_resources()
.build(),
..ServerInfo::default()
}
}
fn list_tools(
&self,
_request: Option<PaginatedRequestParam>,
_context: rmcp::service::RequestContext<rmcp::service::RoleServer>,
) -> impl std::future::Future<Output = Result<ListToolsResult, McpError>> + Send + '_ {
let tools = self.tools.clone();
async move {
Ok(ListToolsResult {
tools: (*tools).clone(),
next_cursor: None,
meta: None,
})
}
}
fn list_resources(
&self,
_request: Option<PaginatedRequestParam>,
_context: rmcp::service::RequestContext<rmcp::service::RoleServer>,
) -> impl std::future::Future<Output = Result<ListResourcesResult, McpError>> + Send + '_ {
let resources = self.resources.clone();
async move {
Ok(ListResourcesResult {
resources: (*resources).clone(),
next_cursor: None,
meta: None,
})
}
}
async fn list_resource_templates(
&self,
_request: Option<PaginatedRequestParam>,
_context: rmcp::service::RequestContext<rmcp::service::RoleServer>,
) -> Result<ListResourceTemplatesResult, McpError> {
Ok(ListResourceTemplatesResult {
resource_templates: (*self.resource_templates).clone(),
next_cursor: None,
meta: None,
})
}
async fn read_resource(
&self,
ReadResourceRequestParam { uri }: ReadResourceRequestParam,
_context: rmcp::service::RequestContext<rmcp::service::RoleServer>,
) -> Result<ReadResourceResult, McpError> {
if uri == MEMO_URI {
Ok(ReadResourceResult {
contents: vec![ResourceContents::TextResourceContents {
uri,
mime_type: Some("text/plain".to_string()),
text: Self::memo_text().to_string(),
meta: None,
}],
})
} else {
Err(McpError::resource_not_found(
"resource_not_found",
Some(json!({ "uri": uri })),
))
}
}
async fn call_tool(
&self,
request: CallToolRequestParam,
_context: rmcp::service::RequestContext<rmcp::service::RoleServer>,
) -> Result<CallToolResult, McpError> {
match request.name.as_ref() {
"echo" => {
let args: EchoArgs = match request.arguments {
Some(arguments) => serde_json::from_value(serde_json::Value::Object(
arguments.into_iter().collect(),
))
.map_err(|err| McpError::invalid_params(err.to_string(), None))?,
None => {
return Err(McpError::invalid_params(
"missing arguments for echo tool",
None,
));
}
};
let env_snapshot: HashMap<String, String> = std::env::vars().collect();
let structured_content = json!({
"echo": format!("ECHOING: {}", args.message),
"env": env_snapshot.get("MCP_TEST_VALUE"),
});
Ok(CallToolResult {
content: Vec::new(),
structured_content: Some(structured_content),
is_error: Some(false),
meta: None,
})
}
other => Err(McpError::invalid_params(
format!("unknown tool: {other}"),
None,
)),
}
}
}
fn parse_bind_addr() -> Result<SocketAddr, Box<dyn std::error::Error>> {
let default_addr = "127.0.0.1:3920";
let bind_addr = std::env::var("MCP_STREAMABLE_HTTP_BIND_ADDR")
.or_else(|_| std::env::var("BIND_ADDR"))
.unwrap_or_else(|_| default_addr.to_string());
Ok(bind_addr.parse()?)
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let bind_addr = parse_bind_addr()?;
let listener = match tokio::net::TcpListener::bind(&bind_addr).await {
Ok(listener) => listener,
Err(err) if err.kind() == ErrorKind::PermissionDenied => {
eprintln!(
"failed to bind to {bind_addr}: {err}. make sure the process has network access"
);
return Ok(());
}
Err(err) => return Err(err.into()),
};
eprintln!("starting rmcp streamable http test server on http://{bind_addr}/mcp");
let router = Router::new()
.route(
"/.well-known/oauth-authorization-server/mcp",
get({
move || async move {
let metadata_base = format!("http://{bind_addr}");
#[expect(clippy::expect_used)]
Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, "application/json")
.body(Body::from(
serde_json::to_vec(&json!({
"authorization_endpoint": format!("{metadata_base}/oauth/authorize"),
"token_endpoint": format!("{metadata_base}/oauth/token"),
"scopes_supported": [""],
})).expect("failed to serialize metadata"),
))
.expect("valid metadata response")
}
}),
)
.nest_service(
"/mcp",
StreamableHttpService::new(
|| Ok(TestToolServer::new()),
Arc::new(LocalSessionManager::default()),
StreamableHttpServerConfig::default(),
),
);
let router = if let Ok(token) = std::env::var("MCP_EXPECT_BEARER") {
let expected = Arc::new(format!("Bearer {token}"));
router.layer(middleware::from_fn_with_state(expected, require_bearer))
} else {
router
};
axum::serve(listener, router).await?;
task::yield_now().await;
Ok(())
}
async fn require_bearer(
State(expected): State<Arc<String>>,
request: Request<Body>,
next: Next,
) -> Result<Response, StatusCode> {
if request.uri().path().contains("/.well-known/") {
return Ok(next.run(request).await);
}
if request
.headers()
.get(AUTHORIZATION)
.is_some_and(|value| value.as_bytes() == expected.as_bytes())
{
Ok(next.run(request).await)
} else {
Err(StatusCode::UNAUTHORIZED)
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/rmcp-client/tests/resources.rs | codex-rs/rmcp-client/tests/resources.rs | use std::ffi::OsString;
use std::path::PathBuf;
use std::time::Duration;
use codex_rmcp_client::ElicitationAction;
use codex_rmcp_client::ElicitationResponse;
use codex_rmcp_client::RmcpClient;
use codex_utils_cargo_bin::CargoBinError;
use futures::FutureExt as _;
use mcp_types::ClientCapabilities;
use mcp_types::Implementation;
use mcp_types::InitializeRequestParams;
use mcp_types::ListResourceTemplatesResult;
use mcp_types::ReadResourceRequestParams;
use mcp_types::ReadResourceResultContents;
use mcp_types::Resource;
use mcp_types::ResourceTemplate;
use mcp_types::TextResourceContents;
use serde_json::json;
const RESOURCE_URI: &str = "memo://codex/example-note";
fn stdio_server_bin() -> Result<PathBuf, CargoBinError> {
codex_utils_cargo_bin::cargo_bin("test_stdio_server")
}
fn init_params() -> InitializeRequestParams {
InitializeRequestParams {
capabilities: ClientCapabilities {
experimental: None,
roots: None,
sampling: None,
elicitation: Some(json!({})),
},
client_info: Implementation {
name: "codex-test".into(),
version: "0.0.0-test".into(),
title: Some("Codex rmcp resource test".into()),
user_agent: None,
},
protocol_version: mcp_types::MCP_SCHEMA_VERSION.to_string(),
}
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn rmcp_client_can_list_and_read_resources() -> anyhow::Result<()> {
let client = RmcpClient::new_stdio_client(
stdio_server_bin()?.into(),
Vec::<OsString>::new(),
None,
&[],
None,
)
.await?;
client
.initialize(
init_params(),
Some(Duration::from_secs(5)),
Box::new(|_, _| {
async {
Ok(ElicitationResponse {
action: ElicitationAction::Accept,
content: Some(json!({})),
})
}
.boxed()
}),
)
.await?;
let list = client
.list_resources(None, Some(Duration::from_secs(5)))
.await?;
let memo = list
.resources
.iter()
.find(|resource| resource.uri == RESOURCE_URI)
.expect("memo resource present");
assert_eq!(
memo,
&Resource {
annotations: None,
description: Some("A sample MCP resource exposed for integration tests.".to_string()),
mime_type: Some("text/plain".to_string()),
name: "example-note".to_string(),
size: None,
title: Some("Example Note".to_string()),
uri: RESOURCE_URI.to_string(),
}
);
let templates = client
.list_resource_templates(None, Some(Duration::from_secs(5)))
.await?;
assert_eq!(
templates,
ListResourceTemplatesResult {
next_cursor: None,
resource_templates: vec![ResourceTemplate {
annotations: None,
description: Some(
"Template for memo://codex/{slug} resources used in tests.".to_string()
),
mime_type: Some("text/plain".to_string()),
name: "codex-memo".to_string(),
title: Some("Codex Memo".to_string()),
uri_template: "memo://codex/{slug}".to_string(),
}],
}
);
let read = client
.read_resource(
ReadResourceRequestParams {
uri: RESOURCE_URI.to_string(),
},
Some(Duration::from_secs(5)),
)
.await?;
let ReadResourceResultContents::TextResourceContents(text) =
read.contents.first().expect("resource contents present")
else {
panic!("expected text resource");
};
assert_eq!(
text,
&TextResourceContents {
text: "This is a sample MCP resource served by the rmcp test server.".to_string(),
uri: RESOURCE_URI.to_string(),
mime_type: Some("text/plain".to_string()),
}
);
Ok(())
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/ansi-escape/src/lib.rs | codex-rs/ansi-escape/src/lib.rs | use ansi_to_tui::Error;
use ansi_to_tui::IntoText;
use ratatui::text::Line;
use ratatui::text::Text;
// Expand tabs in a best-effort way for transcript rendering.
// Tabs can interact poorly with left-gutter prefixes in our TUI and CLI
// transcript views (e.g., `nl` separates line numbers from content with a tab).
// Replacing tabs with spaces avoids odd visual artifacts without changing
// semantics for our use cases.
fn expand_tabs(s: &str) -> std::borrow::Cow<'_, str> {
if s.contains('\t') {
// Keep it simple: replace each tab with 4 spaces.
// We do not try to align to tab stops since most usages (like `nl`)
// look acceptable with a fixed substitution and this avoids stateful math
// across spans.
std::borrow::Cow::Owned(s.replace('\t', " "))
} else {
std::borrow::Cow::Borrowed(s)
}
}
/// This function should be used when the contents of `s` are expected to match
/// a single line. If multiple lines are found, a warning is logged and only the
/// first line is returned.
pub fn ansi_escape_line(s: &str) -> Line<'static> {
// Normalize tabs to spaces to avoid odd gutter collisions in transcript mode.
let s = expand_tabs(s);
let text = ansi_escape(&s);
match text.lines.as_slice() {
[] => "".into(),
[only] => only.clone(),
[first, rest @ ..] => {
tracing::warn!("ansi_escape_line: expected a single line, got {first:?} and {rest:?}");
first.clone()
}
}
}
pub fn ansi_escape(s: &str) -> Text<'static> {
// to_text() claims to be faster, but introduces complex lifetime issues
// such that it's not worth it.
match s.into_text() {
Ok(text) => text,
Err(err) => match err {
Error::NomError(message) => {
tracing::error!(
"ansi_to_tui NomError docs claim should never happen when parsing `{s}`: {message}"
);
panic!();
}
Error::Utf8Error(utf8error) => {
tracing::error!("Utf8Error: {utf8error}");
panic!();
}
},
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cloud-tasks/src/app.rs | codex-rs/cloud-tasks/src/app.rs | use std::time::Duration;
use std::time::Instant;
// Environment filter data models for the TUI
#[derive(Clone, Debug, Default)]
pub struct EnvironmentRow {
pub id: String,
pub label: Option<String>,
pub is_pinned: bool,
pub repo_hints: Option<String>, // e.g., "openai/codex"
}
#[derive(Clone, Debug, Default)]
pub struct EnvModalState {
pub query: String,
pub selected: usize,
}
#[derive(Clone, Debug, Default)]
pub struct BestOfModalState {
pub selected: usize,
}
#[derive(Clone, Debug, Copy, PartialEq, Eq)]
pub enum ApplyResultLevel {
Success,
Partial,
Error,
}
#[derive(Clone, Debug)]
pub struct ApplyModalState {
pub task_id: TaskId,
pub title: String,
pub result_message: Option<String>,
pub result_level: Option<ApplyResultLevel>,
pub skipped_paths: Vec<String>,
pub conflict_paths: Vec<String>,
pub diff_override: Option<String>,
}
use crate::scrollable_diff::ScrollableDiff;
use codex_cloud_tasks_client::CloudBackend;
use codex_cloud_tasks_client::TaskId;
use codex_cloud_tasks_client::TaskSummary;
#[derive(Default)]
pub struct App {
pub tasks: Vec<TaskSummary>,
pub selected: usize,
pub status: String,
pub diff_overlay: Option<DiffOverlay>,
pub spinner_start: Option<Instant>,
pub refresh_inflight: bool,
pub details_inflight: bool,
// Environment filter state
pub env_filter: Option<String>,
pub env_modal: Option<EnvModalState>,
pub apply_modal: Option<ApplyModalState>,
pub best_of_modal: Option<BestOfModalState>,
pub environments: Vec<EnvironmentRow>,
pub env_last_loaded: Option<std::time::Instant>,
pub env_loading: bool,
pub env_error: Option<String>,
// New Task page
pub new_task: Option<crate::new_task::NewTaskPage>,
pub best_of_n: usize,
// Apply preflight spinner state
pub apply_preflight_inflight: bool,
// Apply action spinner state
pub apply_inflight: bool,
// Background enrichment coordination
pub list_generation: u64,
pub in_flight: std::collections::HashSet<String>,
// Background enrichment caches were planned; currently unused.
}
impl App {
pub fn new() -> Self {
Self {
tasks: Vec::new(),
selected: 0,
status: "Press r to refresh".to_string(),
diff_overlay: None,
spinner_start: None,
refresh_inflight: false,
details_inflight: false,
env_filter: None,
env_modal: None,
apply_modal: None,
best_of_modal: None,
environments: Vec::new(),
env_last_loaded: None,
env_loading: false,
env_error: None,
new_task: None,
best_of_n: 1,
apply_preflight_inflight: false,
apply_inflight: false,
list_generation: 0,
in_flight: std::collections::HashSet::new(),
}
}
pub fn next(&mut self) {
if self.tasks.is_empty() {
return;
}
self.selected = (self.selected + 1).min(self.tasks.len().saturating_sub(1));
}
pub fn prev(&mut self) {
if self.tasks.is_empty() {
return;
}
if self.selected > 0 {
self.selected -= 1;
}
}
}
pub async fn load_tasks(
backend: &dyn CloudBackend,
env: Option<&str>,
) -> anyhow::Result<Vec<TaskSummary>> {
// In later milestones, add a small debounce, spinner, and error display.
let tasks = tokio::time::timeout(Duration::from_secs(5), backend.list_tasks(env)).await??;
// Hide review-only tasks from the main list.
let filtered: Vec<TaskSummary> = tasks.into_iter().filter(|t| !t.is_review).collect();
Ok(filtered)
}
pub struct DiffOverlay {
pub title: String,
pub task_id: TaskId,
pub sd: ScrollableDiff,
pub base_can_apply: bool,
pub diff_lines: Vec<String>,
pub text_lines: Vec<String>,
pub prompt: Option<String>,
pub attempts: Vec<AttemptView>,
pub selected_attempt: usize,
pub current_view: DetailView,
pub base_turn_id: Option<String>,
pub sibling_turn_ids: Vec<String>,
pub attempt_total_hint: Option<usize>,
}
#[derive(Clone, Debug, Default)]
pub struct AttemptView {
pub turn_id: Option<String>,
pub status: codex_cloud_tasks_client::AttemptStatus,
pub attempt_placement: Option<i64>,
pub diff_lines: Vec<String>,
pub text_lines: Vec<String>,
pub prompt: Option<String>,
pub diff_raw: Option<String>,
}
impl AttemptView {
pub fn has_diff(&self) -> bool {
!self.diff_lines.is_empty()
}
pub fn has_text(&self) -> bool {
!self.text_lines.is_empty() || self.prompt.is_some()
}
}
impl DiffOverlay {
pub fn new(task_id: TaskId, title: String, attempt_total_hint: Option<usize>) -> Self {
let mut sd = ScrollableDiff::new();
sd.set_content(Vec::new());
Self {
title,
task_id,
sd,
base_can_apply: false,
diff_lines: Vec::new(),
text_lines: Vec::new(),
prompt: None,
attempts: vec![AttemptView::default()],
selected_attempt: 0,
current_view: DetailView::Prompt,
base_turn_id: None,
sibling_turn_ids: Vec::new(),
attempt_total_hint,
}
}
pub fn current_attempt(&self) -> Option<&AttemptView> {
self.attempts.get(self.selected_attempt)
}
pub fn base_attempt_mut(&mut self) -> &mut AttemptView {
if self.attempts.is_empty() {
self.attempts.push(AttemptView::default());
}
&mut self.attempts[0]
}
pub fn set_view(&mut self, view: DetailView) {
self.current_view = view;
self.apply_selection_to_fields();
}
pub fn expected_attempts(&self) -> Option<usize> {
self.attempt_total_hint.or({
if self.attempts.is_empty() {
None
} else {
Some(self.attempts.len())
}
})
}
pub fn attempt_count(&self) -> usize {
self.attempts.len()
}
pub fn attempt_display_total(&self) -> usize {
self.expected_attempts()
.unwrap_or_else(|| self.attempts.len().max(1))
}
pub fn step_attempt(&mut self, delta: isize) -> bool {
let total = self.attempts.len();
if total <= 1 {
return false;
}
let total_isize = total as isize;
let current = self.selected_attempt as isize;
let mut next = current + delta;
next = ((next % total_isize) + total_isize) % total_isize;
let next = next as usize;
self.selected_attempt = next;
self.apply_selection_to_fields();
true
}
pub fn current_can_apply(&self) -> bool {
matches!(self.current_view, DetailView::Diff)
&& self
.current_attempt()
.and_then(|attempt| attempt.diff_raw.as_ref())
.map(|diff| !diff.is_empty())
.unwrap_or(false)
}
pub fn apply_selection_to_fields(&mut self) {
let (diff_lines, text_lines, prompt) = if let Some(attempt) = self.current_attempt() {
(
attempt.diff_lines.clone(),
attempt.text_lines.clone(),
attempt.prompt.clone(),
)
} else {
self.diff_lines.clear();
self.text_lines.clear();
self.prompt = None;
self.sd.set_content(vec!["<loading attempt>".to_string()]);
return;
};
self.diff_lines = diff_lines.clone();
self.text_lines = text_lines.clone();
self.prompt = prompt;
match self.current_view {
DetailView::Diff => {
if diff_lines.is_empty() {
self.sd.set_content(vec!["<no diff available>".to_string()]);
} else {
self.sd.set_content(diff_lines);
}
}
DetailView::Prompt => {
if text_lines.is_empty() {
self.sd.set_content(vec!["<no output>".to_string()]);
} else {
self.sd.set_content(text_lines);
}
}
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DetailView {
Diff,
Prompt,
}
/// Internal app events delivered from background tasks.
/// These let the UI event loop remain responsive and keep the spinner animating.
#[derive(Debug)]
pub enum AppEvent {
TasksLoaded {
env: Option<String>,
result: anyhow::Result<Vec<TaskSummary>>,
},
// Background diff summary events were planned; removed for now to keep code minimal.
/// Autodetection of a likely environment id finished
EnvironmentAutodetected(anyhow::Result<crate::env_detect::AutodetectSelection>),
/// Background completion of environment list fetch
EnvironmentsLoaded(anyhow::Result<Vec<EnvironmentRow>>),
DetailsDiffLoaded {
id: TaskId,
title: String,
diff: String,
},
DetailsMessagesLoaded {
id: TaskId,
title: String,
messages: Vec<String>,
prompt: Option<String>,
turn_id: Option<String>,
sibling_turn_ids: Vec<String>,
attempt_placement: Option<i64>,
attempt_status: codex_cloud_tasks_client::AttemptStatus,
},
DetailsFailed {
id: TaskId,
title: String,
error: String,
},
AttemptsLoaded {
id: TaskId,
attempts: Vec<codex_cloud_tasks_client::TurnAttempt>,
},
/// Background completion of new task submission
NewTaskSubmitted(Result<codex_cloud_tasks_client::CreatedTask, String>),
/// Background completion of apply preflight when opening modal or on demand
ApplyPreflightFinished {
id: TaskId,
title: String,
message: String,
level: ApplyResultLevel,
skipped: Vec<String>,
conflicts: Vec<String>,
},
/// Background completion of apply action (actual patch application)
ApplyFinished {
id: TaskId,
result: std::result::Result<codex_cloud_tasks_client::ApplyOutcome, String>,
},
}
// Convenience aliases; currently unused.
#[cfg(test)]
mod tests {
use super::*;
use chrono::Utc;
use codex_cloud_tasks_client::CloudTaskError;
struct FakeBackend {
// maps env key to titles
by_env: std::collections::HashMap<Option<String>, Vec<&'static str>>,
}
#[async_trait::async_trait]
impl codex_cloud_tasks_client::CloudBackend for FakeBackend {
async fn list_tasks(
&self,
env: Option<&str>,
) -> codex_cloud_tasks_client::Result<Vec<TaskSummary>> {
let key = env.map(str::to_string);
let titles = self
.by_env
.get(&key)
.cloned()
.unwrap_or_else(|| vec!["default-a", "default-b"]);
let mut out = Vec::new();
for (i, t) in titles.into_iter().enumerate() {
out.push(TaskSummary {
id: TaskId(format!("T-{i}")),
title: t.to_string(),
status: codex_cloud_tasks_client::TaskStatus::Ready,
updated_at: Utc::now(),
environment_id: env.map(str::to_string),
environment_label: None,
summary: codex_cloud_tasks_client::DiffSummary::default(),
is_review: false,
attempt_total: Some(1),
});
}
Ok(out)
}
async fn get_task_summary(
&self,
id: TaskId,
) -> codex_cloud_tasks_client::Result<TaskSummary> {
self.list_tasks(None)
.await?
.into_iter()
.find(|t| t.id == id)
.ok_or_else(|| CloudTaskError::Msg(format!("Task {} not found", id.0)))
}
async fn get_task_diff(
&self,
_id: TaskId,
) -> codex_cloud_tasks_client::Result<Option<String>> {
Err(codex_cloud_tasks_client::CloudTaskError::Unimplemented(
"not used in test",
))
}
async fn get_task_messages(
&self,
_id: TaskId,
) -> codex_cloud_tasks_client::Result<Vec<String>> {
Ok(vec![])
}
async fn get_task_text(
&self,
_id: TaskId,
) -> codex_cloud_tasks_client::Result<codex_cloud_tasks_client::TaskText> {
Ok(codex_cloud_tasks_client::TaskText {
prompt: Some("Example prompt".to_string()),
messages: Vec::new(),
turn_id: Some("fake-turn".to_string()),
sibling_turn_ids: Vec::new(),
attempt_placement: Some(0),
attempt_status: codex_cloud_tasks_client::AttemptStatus::Completed,
})
}
async fn list_sibling_attempts(
&self,
_task: TaskId,
_turn_id: String,
) -> codex_cloud_tasks_client::Result<Vec<codex_cloud_tasks_client::TurnAttempt>> {
Ok(Vec::new())
}
async fn apply_task(
&self,
_id: TaskId,
_diff_override: Option<String>,
) -> codex_cloud_tasks_client::Result<codex_cloud_tasks_client::ApplyOutcome> {
Err(codex_cloud_tasks_client::CloudTaskError::Unimplemented(
"not used in test",
))
}
async fn apply_task_preflight(
&self,
_id: TaskId,
_diff_override: Option<String>,
) -> codex_cloud_tasks_client::Result<codex_cloud_tasks_client::ApplyOutcome> {
Err(codex_cloud_tasks_client::CloudTaskError::Unimplemented(
"not used in test",
))
}
async fn create_task(
&self,
_env_id: &str,
_prompt: &str,
_git_ref: &str,
_qa_mode: bool,
_best_of_n: usize,
) -> codex_cloud_tasks_client::Result<codex_cloud_tasks_client::CreatedTask> {
Err(codex_cloud_tasks_client::CloudTaskError::Unimplemented(
"not used in test",
))
}
}
#[tokio::test]
async fn load_tasks_uses_env_parameter() {
// Arrange: env-specific task titles
let mut by_env = std::collections::HashMap::new();
by_env.insert(None, vec!["root-1", "root-2"]);
by_env.insert(Some("env-A".to_string()), vec!["A-1"]);
by_env.insert(Some("env-B".to_string()), vec!["B-1", "B-2", "B-3"]);
let backend = FakeBackend { by_env };
// Act + Assert
let root = load_tasks(&backend, None).await.unwrap();
assert_eq!(root.len(), 2);
assert_eq!(root[0].title, "root-1");
let a = load_tasks(&backend, Some("env-A")).await.unwrap();
assert_eq!(a.len(), 1);
assert_eq!(a[0].title, "A-1");
let b = load_tasks(&backend, Some("env-B")).await.unwrap();
assert_eq!(b.len(), 3);
assert_eq!(b[2].title, "B-3");
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cloud-tasks/src/lib.rs | codex-rs/cloud-tasks/src/lib.rs | mod app;
mod cli;
pub mod env_detect;
mod new_task;
pub mod scrollable_diff;
mod ui;
pub mod util;
pub use cli::Cli;
use anyhow::anyhow;
use chrono::Utc;
use codex_cloud_tasks_client::TaskStatus;
use codex_login::AuthManager;
use owo_colors::OwoColorize;
use owo_colors::Stream;
use std::cmp::Ordering;
use std::io::IsTerminal;
use std::io::Read;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use supports_color::Stream as SupportStream;
use tokio::sync::mpsc::UnboundedSender;
use tracing::info;
use tracing_subscriber::EnvFilter;
use util::append_error_log;
use util::format_relative_time;
use util::set_user_agent_suffix;
struct ApplyJob {
task_id: codex_cloud_tasks_client::TaskId,
diff_override: Option<String>,
}
struct BackendContext {
backend: Arc<dyn codex_cloud_tasks_client::CloudBackend>,
base_url: String,
}
async fn init_backend(user_agent_suffix: &str) -> anyhow::Result<BackendContext> {
let use_mock = matches!(
std::env::var("CODEX_CLOUD_TASKS_MODE").ok().as_deref(),
Some("mock") | Some("MOCK")
);
let base_url = std::env::var("CODEX_CLOUD_TASKS_BASE_URL")
.unwrap_or_else(|_| "https://chatgpt.com/backend-api".to_string());
set_user_agent_suffix(user_agent_suffix);
if use_mock {
return Ok(BackendContext {
backend: Arc::new(codex_cloud_tasks_client::MockClient),
base_url,
});
}
let ua = codex_core::default_client::get_codex_user_agent();
let mut http = codex_cloud_tasks_client::HttpClient::new(base_url.clone())?.with_user_agent(ua);
let style = if base_url.contains("/backend-api") {
"wham"
} else {
"codex-api"
};
append_error_log(format!("startup: base_url={base_url} path_style={style}"));
let auth_manager = util::load_auth_manager().await;
let auth = match auth_manager.as_ref().and_then(AuthManager::auth) {
Some(auth) => auth,
None => {
eprintln!(
"Not signed in. Please run 'codex login' to sign in with ChatGPT, then re-run 'codex cloud'."
);
std::process::exit(1);
}
};
if let Some(acc) = auth.get_account_id() {
append_error_log(format!("auth: mode=ChatGPT account_id={acc}"));
}
let token = match auth.get_token().await {
Ok(t) if !t.is_empty() => t,
_ => {
eprintln!(
"Not signed in. Please run 'codex login' to sign in with ChatGPT, then re-run 'codex cloud'."
);
std::process::exit(1);
}
};
http = http.with_bearer_token(token.clone());
if let Some(acc) = auth
.get_account_id()
.or_else(|| util::extract_chatgpt_account_id(&token))
{
append_error_log(format!("auth: set ChatGPT-Account-Id header: {acc}"));
http = http.with_chatgpt_account_id(acc);
}
Ok(BackendContext {
backend: Arc::new(http),
base_url,
})
}
#[async_trait::async_trait]
trait GitInfoProvider {
async fn default_branch_name(&self, path: &std::path::Path) -> Option<String>;
async fn current_branch_name(&self, path: &std::path::Path) -> Option<String>;
}
struct RealGitInfo;
#[async_trait::async_trait]
impl GitInfoProvider for RealGitInfo {
async fn default_branch_name(&self, path: &std::path::Path) -> Option<String> {
codex_core::git_info::default_branch_name(path).await
}
async fn current_branch_name(&self, path: &std::path::Path) -> Option<String> {
codex_core::git_info::current_branch_name(path).await
}
}
async fn resolve_git_ref(branch_override: Option<&String>) -> String {
resolve_git_ref_with_git_info(branch_override, &RealGitInfo).await
}
async fn resolve_git_ref_with_git_info(
branch_override: Option<&String>,
git_info: &impl GitInfoProvider,
) -> String {
if let Some(branch) = branch_override {
let branch = branch.trim();
if !branch.is_empty() {
return branch.to_string();
}
}
if let Ok(cwd) = std::env::current_dir() {
if let Some(branch) = git_info.current_branch_name(&cwd).await {
branch
} else if let Some(branch) = git_info.default_branch_name(&cwd).await {
branch
} else {
"main".to_string()
}
} else {
"main".to_string()
}
}
async fn run_exec_command(args: crate::cli::ExecCommand) -> anyhow::Result<()> {
let crate::cli::ExecCommand {
query,
environment,
branch,
attempts,
} = args;
let ctx = init_backend("codex_cloud_tasks_exec").await?;
let prompt = resolve_query_input(query)?;
let env_id = resolve_environment_id(&ctx, &environment).await?;
let git_ref = resolve_git_ref(branch.as_ref()).await;
let created = codex_cloud_tasks_client::CloudBackend::create_task(
&*ctx.backend,
&env_id,
&prompt,
&git_ref,
false,
attempts,
)
.await?;
let url = util::task_url(&ctx.base_url, &created.id.0);
println!("{url}");
Ok(())
}
async fn resolve_environment_id(ctx: &BackendContext, requested: &str) -> anyhow::Result<String> {
let trimmed = requested.trim();
if trimmed.is_empty() {
return Err(anyhow!("environment id must not be empty"));
}
let normalized = util::normalize_base_url(&ctx.base_url);
let headers = util::build_chatgpt_headers().await;
let environments = crate::env_detect::list_environments(&normalized, &headers).await?;
if environments.is_empty() {
return Err(anyhow!(
"no cloud environments are available for this workspace"
));
}
if let Some(row) = environments.iter().find(|row| row.id == trimmed) {
return Ok(row.id.clone());
}
let label_matches = environments
.iter()
.filter(|row| {
row.label
.as_deref()
.map(|label| label.eq_ignore_ascii_case(trimmed))
.unwrap_or(false)
})
.collect::<Vec<_>>();
match label_matches.as_slice() {
[] => Err(anyhow!(
"environment '{trimmed}' not found; run `codex cloud` to list available environments"
)),
[single] => Ok(single.id.clone()),
[first, rest @ ..] => {
let first_id = &first.id;
if rest.iter().all(|row| row.id == *first_id) {
Ok(first_id.clone())
} else {
Err(anyhow!(
"environment label '{trimmed}' is ambiguous; run `codex cloud` to pick the desired environment id"
))
}
}
}
}
fn resolve_query_input(query_arg: Option<String>) -> anyhow::Result<String> {
match query_arg {
Some(q) if q != "-" => Ok(q),
maybe_dash => {
let force_stdin = matches!(maybe_dash.as_deref(), Some("-"));
if std::io::stdin().is_terminal() && !force_stdin {
return Err(anyhow!(
"no query provided. Pass one as an argument or pipe it via stdin."
));
}
if !force_stdin {
eprintln!("Reading query from stdin...");
}
let mut buffer = String::new();
std::io::stdin()
.read_to_string(&mut buffer)
.map_err(|e| anyhow!("failed to read query from stdin: {e}"))?;
if buffer.trim().is_empty() {
return Err(anyhow!(
"no query provided via stdin (received empty input)."
));
}
Ok(buffer)
}
}
}
fn parse_task_id(raw: &str) -> anyhow::Result<codex_cloud_tasks_client::TaskId> {
let trimmed = raw.trim();
if trimmed.is_empty() {
anyhow::bail!("task id must not be empty");
}
let without_fragment = trimmed.split('#').next().unwrap_or(trimmed);
let without_query = without_fragment
.split('?')
.next()
.unwrap_or(without_fragment);
let id = without_query
.rsplit('/')
.next()
.unwrap_or(without_query)
.trim();
if id.is_empty() {
anyhow::bail!("task id must not be empty");
}
Ok(codex_cloud_tasks_client::TaskId(id.to_string()))
}
#[derive(Clone, Debug)]
struct AttemptDiffData {
placement: Option<i64>,
created_at: Option<chrono::DateTime<Utc>>,
diff: String,
}
fn cmp_attempt(lhs: &AttemptDiffData, rhs: &AttemptDiffData) -> Ordering {
match (lhs.placement, rhs.placement) {
(Some(a), Some(b)) => a.cmp(&b),
(Some(_), None) => Ordering::Less,
(None, Some(_)) => Ordering::Greater,
(None, None) => match (lhs.created_at, rhs.created_at) {
(Some(a), Some(b)) => a.cmp(&b),
(Some(_), None) => Ordering::Less,
(None, Some(_)) => Ordering::Greater,
(None, None) => Ordering::Equal,
},
}
}
async fn collect_attempt_diffs(
backend: &dyn codex_cloud_tasks_client::CloudBackend,
task_id: &codex_cloud_tasks_client::TaskId,
) -> anyhow::Result<Vec<AttemptDiffData>> {
let text =
codex_cloud_tasks_client::CloudBackend::get_task_text(backend, task_id.clone()).await?;
let mut attempts = Vec::new();
if let Some(diff) =
codex_cloud_tasks_client::CloudBackend::get_task_diff(backend, task_id.clone()).await?
{
attempts.push(AttemptDiffData {
placement: text.attempt_placement,
created_at: None,
diff,
});
}
if let Some(turn_id) = text.turn_id {
let siblings = codex_cloud_tasks_client::CloudBackend::list_sibling_attempts(
backend,
task_id.clone(),
turn_id,
)
.await?;
for sibling in siblings {
if let Some(diff) = sibling.diff {
attempts.push(AttemptDiffData {
placement: sibling.attempt_placement,
created_at: sibling.created_at,
diff,
});
}
}
}
attempts.sort_by(cmp_attempt);
if attempts.is_empty() {
anyhow::bail!(
"No diff available for task {}; it may still be running.",
task_id.0
);
}
Ok(attempts)
}
fn select_attempt(
attempts: &[AttemptDiffData],
attempt: Option<usize>,
) -> anyhow::Result<&AttemptDiffData> {
if attempts.is_empty() {
anyhow::bail!("No attempts available");
}
let desired = attempt.unwrap_or(1);
let idx = desired
.checked_sub(1)
.ok_or_else(|| anyhow!("attempt must be at least 1"))?;
if idx >= attempts.len() {
anyhow::bail!(
"Attempt {desired} not available; only {} attempt(s) found",
attempts.len()
);
}
Ok(&attempts[idx])
}
fn task_status_label(status: &TaskStatus) -> &'static str {
match status {
TaskStatus::Pending => "PENDING",
TaskStatus::Ready => "READY",
TaskStatus::Applied => "APPLIED",
TaskStatus::Error => "ERROR",
}
}
fn summary_line(summary: &codex_cloud_tasks_client::DiffSummary, colorize: bool) -> String {
if summary.files_changed == 0 && summary.lines_added == 0 && summary.lines_removed == 0 {
let base = "no diff";
return if colorize {
base.if_supports_color(Stream::Stdout, |t| t.dimmed())
.to_string()
} else {
base.to_string()
};
}
let adds = summary.lines_added;
let dels = summary.lines_removed;
let files = summary.files_changed;
if colorize {
let adds_raw = format!("+{adds}");
let adds_str = adds_raw
.as_str()
.if_supports_color(Stream::Stdout, |t| t.green())
.to_string();
let dels_raw = format!("-{dels}");
let dels_str = dels_raw
.as_str()
.if_supports_color(Stream::Stdout, |t| t.red())
.to_string();
let bullet = "•"
.if_supports_color(Stream::Stdout, |t| t.dimmed())
.to_string();
let file_label = "file"
.if_supports_color(Stream::Stdout, |t| t.dimmed())
.to_string();
let plural = if files == 1 { "" } else { "s" };
format!("{adds_str}/{dels_str} {bullet} {files} {file_label}{plural}")
} else {
format!(
"+{adds}/-{dels} • {files} file{}",
if files == 1 { "" } else { "s" }
)
}
}
fn format_task_status_lines(
task: &codex_cloud_tasks_client::TaskSummary,
now: chrono::DateTime<Utc>,
colorize: bool,
) -> Vec<String> {
let mut lines = Vec::new();
let status = task_status_label(&task.status);
let status = if colorize {
match task.status {
TaskStatus::Ready => status
.if_supports_color(Stream::Stdout, |t| t.green())
.to_string(),
TaskStatus::Pending => status
.if_supports_color(Stream::Stdout, |t| t.magenta())
.to_string(),
TaskStatus::Applied => status
.if_supports_color(Stream::Stdout, |t| t.blue())
.to_string(),
TaskStatus::Error => status
.if_supports_color(Stream::Stdout, |t| t.red())
.to_string(),
}
} else {
status.to_string()
};
lines.push(format!("[{status}] {}", task.title));
let mut meta_parts = Vec::new();
if let Some(label) = task.environment_label.as_deref().filter(|s| !s.is_empty()) {
if colorize {
meta_parts.push(
label
.if_supports_color(Stream::Stdout, |t| t.dimmed())
.to_string(),
);
} else {
meta_parts.push(label.to_string());
}
} else if let Some(id) = task.environment_id.as_deref() {
if colorize {
meta_parts.push(
id.if_supports_color(Stream::Stdout, |t| t.dimmed())
.to_string(),
);
} else {
meta_parts.push(id.to_string());
}
}
let when = format_relative_time(now, task.updated_at);
meta_parts.push(if colorize {
when.as_str()
.if_supports_color(Stream::Stdout, |t| t.dimmed())
.to_string()
} else {
when
});
let sep = if colorize {
" • "
.if_supports_color(Stream::Stdout, |t| t.dimmed())
.to_string()
} else {
" • ".to_string()
};
lines.push(meta_parts.join(&sep));
lines.push(summary_line(&task.summary, colorize));
lines
}
async fn run_status_command(args: crate::cli::StatusCommand) -> anyhow::Result<()> {
let ctx = init_backend("codex_cloud_tasks_status").await?;
let task_id = parse_task_id(&args.task_id)?;
let summary =
codex_cloud_tasks_client::CloudBackend::get_task_summary(&*ctx.backend, task_id).await?;
let now = Utc::now();
let colorize = supports_color::on(SupportStream::Stdout).is_some();
for line in format_task_status_lines(&summary, now, colorize) {
println!("{line}");
}
if !matches!(summary.status, TaskStatus::Ready) {
std::process::exit(1);
}
Ok(())
}
async fn run_diff_command(args: crate::cli::DiffCommand) -> anyhow::Result<()> {
let ctx = init_backend("codex_cloud_tasks_diff").await?;
let task_id = parse_task_id(&args.task_id)?;
let attempts = collect_attempt_diffs(&*ctx.backend, &task_id).await?;
let selected = select_attempt(&attempts, args.attempt)?;
print!("{}", selected.diff);
Ok(())
}
async fn run_apply_command(args: crate::cli::ApplyCommand) -> anyhow::Result<()> {
let ctx = init_backend("codex_cloud_tasks_apply").await?;
let task_id = parse_task_id(&args.task_id)?;
let attempts = collect_attempt_diffs(&*ctx.backend, &task_id).await?;
let selected = select_attempt(&attempts, args.attempt)?;
let outcome = codex_cloud_tasks_client::CloudBackend::apply_task(
&*ctx.backend,
task_id,
Some(selected.diff.clone()),
)
.await?;
println!("{}", outcome.message);
if !matches!(
outcome.status,
codex_cloud_tasks_client::ApplyStatus::Success
) {
std::process::exit(1);
}
Ok(())
}
fn level_from_status(status: codex_cloud_tasks_client::ApplyStatus) -> app::ApplyResultLevel {
match status {
codex_cloud_tasks_client::ApplyStatus::Success => app::ApplyResultLevel::Success,
codex_cloud_tasks_client::ApplyStatus::Partial => app::ApplyResultLevel::Partial,
codex_cloud_tasks_client::ApplyStatus::Error => app::ApplyResultLevel::Error,
}
}
fn spawn_preflight(
app: &mut app::App,
backend: &Arc<dyn codex_cloud_tasks_client::CloudBackend>,
tx: &UnboundedSender<app::AppEvent>,
frame_tx: &UnboundedSender<Instant>,
title: String,
job: ApplyJob,
) -> bool {
if app.apply_inflight {
app.status = "An apply is already running; wait for it to finish first.".to_string();
return false;
}
if app.apply_preflight_inflight {
app.status = "A preflight is already running; wait for it to finish first.".to_string();
return false;
}
app.apply_preflight_inflight = true;
let _ = frame_tx.send(Instant::now() + Duration::from_millis(100));
let backend = backend.clone();
let tx = tx.clone();
tokio::spawn(async move {
let ApplyJob {
task_id,
diff_override,
} = job;
let result = codex_cloud_tasks_client::CloudBackend::apply_task_preflight(
&*backend,
task_id.clone(),
diff_override,
)
.await;
let event = match result {
Ok(outcome) => {
let level = level_from_status(outcome.status);
app::AppEvent::ApplyPreflightFinished {
id: task_id,
title,
message: outcome.message,
level,
skipped: outcome.skipped_paths,
conflicts: outcome.conflict_paths,
}
}
Err(e) => app::AppEvent::ApplyPreflightFinished {
id: task_id,
title,
message: format!("Preflight failed: {e}"),
level: app::ApplyResultLevel::Error,
skipped: Vec::new(),
conflicts: Vec::new(),
},
};
let _ = tx.send(event);
});
true
}
fn spawn_apply(
app: &mut app::App,
backend: &Arc<dyn codex_cloud_tasks_client::CloudBackend>,
tx: &UnboundedSender<app::AppEvent>,
frame_tx: &UnboundedSender<Instant>,
job: ApplyJob,
) -> bool {
if app.apply_inflight {
app.status = "An apply is already running; wait for it to finish first.".to_string();
return false;
}
if app.apply_preflight_inflight {
app.status = "Finish the current preflight before starting another apply.".to_string();
return false;
}
app.apply_inflight = true;
let _ = frame_tx.send(Instant::now() + Duration::from_millis(100));
let backend = backend.clone();
let tx = tx.clone();
tokio::spawn(async move {
let ApplyJob {
task_id,
diff_override,
} = job;
let result = codex_cloud_tasks_client::CloudBackend::apply_task(
&*backend,
task_id.clone(),
diff_override,
)
.await;
let event = match result {
Ok(outcome) => app::AppEvent::ApplyFinished {
id: task_id,
result: Ok(outcome),
},
Err(e) => app::AppEvent::ApplyFinished {
id: task_id,
result: Err(format!("{e}")),
},
};
let _ = tx.send(event);
});
true
}
// logging helper lives in util module
// (no standalone patch summarizer needed – UI displays raw diffs)
/// Entry point for the `codex cloud` subcommand.
pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()> {
if let Some(command) = cli.command {
return match command {
crate::cli::Command::Exec(args) => run_exec_command(args).await,
crate::cli::Command::Status(args) => run_status_command(args).await,
crate::cli::Command::Apply(args) => run_apply_command(args).await,
crate::cli::Command::Diff(args) => run_diff_command(args).await,
};
}
let Cli { .. } = cli;
// Very minimal logging setup; mirrors other crates' pattern.
let default_level = "error";
let _ = tracing_subscriber::fmt()
.with_env_filter(
EnvFilter::try_from_default_env()
.or_else(|_| EnvFilter::try_new(default_level))
.unwrap_or_else(|_| EnvFilter::new(default_level)),
)
.with_ansi(std::io::stderr().is_terminal())
.with_writer(std::io::stderr)
.try_init();
info!("Launching Cloud Tasks list UI");
let BackendContext { backend, .. } = init_backend("codex_cloud_tasks_tui").await?;
let backend = backend;
// Terminal setup
use crossterm::ExecutableCommand;
use crossterm::event::DisableBracketedPaste;
use crossterm::event::EnableBracketedPaste;
use crossterm::event::KeyboardEnhancementFlags;
use crossterm::event::PopKeyboardEnhancementFlags;
use crossterm::event::PushKeyboardEnhancementFlags;
use crossterm::terminal::EnterAlternateScreen;
use crossterm::terminal::LeaveAlternateScreen;
use crossterm::terminal::disable_raw_mode;
use crossterm::terminal::enable_raw_mode;
use ratatui::Terminal;
use ratatui::backend::CrosstermBackend;
let mut stdout = std::io::stdout();
enable_raw_mode()?;
stdout.execute(EnterAlternateScreen)?;
stdout.execute(EnableBracketedPaste)?;
// Enable enhanced key reporting so Shift+Enter is distinguishable from Enter.
// Some terminals may not support these flags; ignore errors if enabling fails.
let _ = crossterm::execute!(
std::io::stdout(),
PushKeyboardEnhancementFlags(
KeyboardEnhancementFlags::DISAMBIGUATE_ESCAPE_CODES
| KeyboardEnhancementFlags::REPORT_EVENT_TYPES
| KeyboardEnhancementFlags::REPORT_ALTERNATE_KEYS
)
);
let backend_ui = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend_ui)?;
terminal.clear()?;
// App state
let mut app = app::App::new();
// Initial load
let force_internal = matches!(
std::env::var("CODEX_CLOUD_TASKS_FORCE_INTERNAL")
.ok()
.as_deref(),
Some("1") | Some("true") | Some("TRUE")
);
append_error_log(format!(
"startup: wham_force_internal={} ua={}",
force_internal,
codex_core::default_client::get_codex_user_agent()
));
// Non-blocking initial load so the in-box spinner can animate
app.status = "Loading tasks…".to_string();
app.refresh_inflight = true;
// New list generation; reset background enrichment coordination
app.list_generation = app.list_generation.saturating_add(1);
app.in_flight.clear();
// reset any in-flight enrichment state
// Event stream
use crossterm::event::Event;
use crossterm::event::EventStream;
use crossterm::event::KeyCode;
use crossterm::event::KeyEventKind;
use crossterm::event::KeyModifiers;
use tokio_stream::StreamExt;
let mut events = EventStream::new();
// Channel for non-blocking background loads
use tokio::sync::mpsc::unbounded_channel;
let (tx, mut rx) = unbounded_channel::<app::AppEvent>();
// Kick off the initial load in background
{
let backend = Arc::clone(&backend);
let tx = tx.clone();
tokio::spawn(async move {
let res = app::load_tasks(&*backend, None).await;
let _ = tx.send(app::AppEvent::TasksLoaded {
env: None,
result: res,
});
});
}
// Fetch environment list in parallel so the header can show friendly names quickly.
{
let tx = tx.clone();
tokio::spawn(async move {
let base_url = util::normalize_base_url(
&std::env::var("CODEX_CLOUD_TASKS_BASE_URL")
.unwrap_or_else(|_| "https://chatgpt.com/backend-api".to_string()),
);
let headers = util::build_chatgpt_headers().await;
let res = crate::env_detect::list_environments(&base_url, &headers).await;
let _ = tx.send(app::AppEvent::EnvironmentsLoaded(res));
});
}
// Try to auto-detect a likely environment id on startup and refresh if found.
// Do this concurrently so the initial list shows quickly; on success we refetch with filter.
{
let tx = tx.clone();
tokio::spawn(async move {
let base_url = util::normalize_base_url(
&std::env::var("CODEX_CLOUD_TASKS_BASE_URL")
.unwrap_or_else(|_| "https://chatgpt.com/backend-api".to_string()),
);
// Build headers: UA + ChatGPT auth if available
let headers = util::build_chatgpt_headers().await;
// Run autodetect. If it fails, we keep using "All".
let res = crate::env_detect::autodetect_environment_id(&base_url, &headers, None).await;
let _ = tx.send(app::AppEvent::EnvironmentAutodetected(res));
});
}
// Event-driven redraws with a tiny coalescing scheduler (snappy UI, no fixed 250ms tick).
let mut needs_redraw = true;
use std::time::Instant;
use tokio::time::Instant as TokioInstant;
use tokio::time::sleep_until;
let (frame_tx, mut frame_rx) = tokio::sync::mpsc::unbounded_channel::<Instant>();
let (redraw_tx, mut redraw_rx) = tokio::sync::mpsc::unbounded_channel::<()>();
// Coalesce frame requests to the earliest deadline; emit a single redraw signal.
tokio::spawn(async move {
let mut next_deadline: Option<Instant> = None;
loop {
let target =
next_deadline.unwrap_or_else(|| Instant::now() + Duration::from_secs(24 * 60 * 60));
let sleeper = sleep_until(TokioInstant::from_std(target));
tokio::pin!(sleeper);
tokio::select! {
recv = frame_rx.recv() => {
match recv {
Some(at) => {
if next_deadline.is_none_or(|cur| at < cur) {
next_deadline = Some(at);
}
continue; // recompute sleep target
}
None => break,
}
}
_ = &mut sleeper => {
if next_deadline.take().is_some() {
let _ = redraw_tx.send(());
}
}
}
}
});
// Kick an initial draw so the UI appears immediately.
let _ = frame_tx.send(Instant::now());
// Render helper to centralize immediate redraws after handling events.
let render_if_needed = |terminal: &mut Terminal<CrosstermBackend<std::io::Stdout>>,
app: &mut app::App,
needs_redraw: &mut bool|
-> anyhow::Result<()> {
if *needs_redraw {
terminal.draw(|f| ui::draw(f, app))?;
*needs_redraw = false;
}
Ok(())
};
let exit_code = loop {
tokio::select! {
// Coalesced redraw requests: spinner animation and paste-burst micro‑flush.
Some(()) = redraw_rx.recv() => {
// Micro‑flush pending first key held by paste‑burst.
if let Some(page) = app.new_task.as_mut() {
if page.composer.flush_paste_burst_if_due() { needs_redraw = true; }
if page.composer.is_in_paste_burst() {
let _ = frame_tx.send(Instant::now() + codex_tui::ComposerInput::recommended_flush_delay());
}
}
// Keep spinner pulsing only while loading.
if app.refresh_inflight
|| app.details_inflight
|| app.env_loading
|| app.apply_preflight_inflight
|| app.apply_inflight
{
if app.spinner_start.is_none() {
app.spinner_start = Some(Instant::now());
}
needs_redraw = true;
let _ = frame_tx.send(Instant::now() + Duration::from_millis(600));
} else {
app.spinner_start = None;
}
render_if_needed(&mut terminal, &mut app, &mut needs_redraw)?;
}
maybe_app_event = rx.recv() => {
if let Some(ev) = maybe_app_event {
match ev {
app::AppEvent::TasksLoaded { env, result } => {
// Only apply results for the current filter to avoid races.
if env.as_deref() != app.env_filter.as_deref() {
append_error_log(format!(
"refresh.drop: env={} current={}",
env.clone().unwrap_or_else(|| "<all>".to_string()),
app.env_filter.clone().unwrap_or_else(|| "<all>".to_string())
));
continue;
}
app.refresh_inflight = false;
match result {
Ok(tasks) => {
append_error_log(format!(
"refresh.apply: env={} count={}",
env.clone().unwrap_or_else(|| "<all>".to_string()),
tasks.len()
));
app.tasks = tasks;
if app.selected >= app.tasks.len() { app.selected = app.tasks.len().saturating_sub(1); }
app.status = "Loaded tasks".to_string();
}
Err(e) => {
append_error_log(format!("refresh load_tasks failed: {e}"));
app.status = format!("Failed to load tasks: {e}");
}
}
needs_redraw = true;
let _ = frame_tx.send(Instant::now());
}
app::AppEvent::NewTaskSubmitted(result) => {
match result {
Ok(created) => {
append_error_log(format!("new-task: created id={}", created.id.0));
app.status = format!("Submitted as {}", created.id.0);
app.new_task = None;
// Refresh tasks in background for current filter
app.status = format!("Submitted as {} — refreshing…", created.id.0);
app.refresh_inflight = true;
app.list_generation = app.list_generation.saturating_add(1);
needs_redraw = true;
let backend = Arc::clone(&backend);
let tx = tx.clone();
let env_sel = app.env_filter.clone();
tokio::spawn(async move {
let res = app::load_tasks(&*backend, env_sel.as_deref()).await;
let _ = tx.send(app::AppEvent::TasksLoaded { env: env_sel, result: res });
});
let _ = frame_tx.send(Instant::now());
}
Err(msg) => {
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | true |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cloud-tasks/src/scrollable_diff.rs | codex-rs/cloud-tasks/src/scrollable_diff.rs | use unicode_width::UnicodeWidthChar;
use unicode_width::UnicodeWidthStr;
/// Scroll position and geometry for a vertical scroll view.
#[derive(Clone, Copy, Debug, Default)]
pub struct ScrollViewState {
pub scroll: u16,
pub viewport_h: u16,
pub content_h: u16,
}
impl ScrollViewState {
pub fn clamp(&mut self) {
let max_scroll = self.content_h.saturating_sub(self.viewport_h);
if self.scroll > max_scroll {
self.scroll = max_scroll;
}
}
}
/// A simple, local scrollable view for diffs or message text.
///
/// Owns raw lines, caches wrapped lines for a given width, and maintains
/// a small scroll state that is clamped whenever geometry shrinks.
#[derive(Clone, Debug, Default)]
pub struct ScrollableDiff {
raw: Vec<String>,
wrapped: Vec<String>,
wrapped_src_idx: Vec<usize>,
wrap_cols: Option<u16>,
pub state: ScrollViewState,
}
impl ScrollableDiff {
pub fn new() -> Self {
Self::default()
}
/// Replace the raw content lines. Does not rewrap immediately; call `set_width` next.
pub fn set_content(&mut self, lines: Vec<String>) {
self.raw = lines;
self.wrapped.clear();
self.wrapped_src_idx.clear();
self.state.content_h = 0;
// Force rewrap on next set_width even if width is unchanged
self.wrap_cols = None;
}
/// Set the wrap width. If changed, rebuild wrapped lines and clamp scroll.
pub fn set_width(&mut self, width: u16) {
if self.wrap_cols == Some(width) {
return;
}
self.wrap_cols = Some(width);
self.rewrap(width);
self.state.clamp();
}
/// Update viewport height and clamp scroll if needed.
pub fn set_viewport(&mut self, height: u16) {
self.state.viewport_h = height;
self.state.clamp();
}
/// Return the cached wrapped lines. Call `set_width` first when area changes.
pub fn wrapped_lines(&self) -> &[String] {
&self.wrapped
}
pub fn wrapped_src_indices(&self) -> &[usize] {
&self.wrapped_src_idx
}
pub fn raw_line_at(&self, idx: usize) -> &str {
self.raw.get(idx).map(String::as_str).unwrap_or("")
}
/// Scroll by a signed delta; clamps to content.
pub fn scroll_by(&mut self, delta: i16) {
let s = self.state.scroll as i32 + delta as i32;
self.state.scroll = s.clamp(0, self.max_scroll() as i32) as u16;
}
/// Page by a signed delta; typically viewport_h - 1.
pub fn page_by(&mut self, delta: i16) {
self.scroll_by(delta);
}
pub fn to_top(&mut self) {
self.state.scroll = 0;
}
pub fn to_bottom(&mut self) {
self.state.scroll = self.max_scroll();
}
/// Optional percent scrolled; None when not enough geometry is known.
pub fn percent_scrolled(&self) -> Option<u8> {
if self.state.content_h == 0 || self.state.viewport_h == 0 {
return None;
}
if self.state.content_h <= self.state.viewport_h {
return None;
}
let visible_bottom = self.state.scroll.saturating_add(self.state.viewport_h) as f32;
let pct = (visible_bottom / self.state.content_h as f32 * 100.0).round();
Some(pct.clamp(0.0, 100.0) as u8)
}
fn max_scroll(&self) -> u16 {
self.state.content_h.saturating_sub(self.state.viewport_h)
}
fn rewrap(&mut self, width: u16) {
if width == 0 {
self.wrapped = self.raw.clone();
self.state.content_h = self.wrapped.len() as u16;
return;
}
let max_cols = width as usize;
let mut out: Vec<String> = Vec::new();
let mut out_idx: Vec<usize> = Vec::new();
for (raw_idx, raw) in self.raw.iter().enumerate() {
// Normalize tabs for width accounting (MVP: 4 spaces).
let raw = raw.replace('\t', " ");
if raw.is_empty() {
out.push(String::new());
out_idx.push(raw_idx);
continue;
}
let mut line = String::new();
let mut line_cols = 0usize;
let mut last_soft_idx: Option<usize> = None; // last whitespace or punctuation break
for (_i, ch) in raw.char_indices() {
if ch == '\n' {
out.push(std::mem::take(&mut line));
out_idx.push(raw_idx);
line_cols = 0;
last_soft_idx = None;
continue;
}
let w = UnicodeWidthChar::width(ch).unwrap_or(0);
if line_cols.saturating_add(w) > max_cols {
if let Some(split) = last_soft_idx {
let (prefix, rest) = line.split_at(split);
out.push(prefix.trim_end().to_string());
out_idx.push(raw_idx);
line = rest.trim_start().to_string();
last_soft_idx = None;
// retry add current ch now that line may be shorter
} else if !line.is_empty() {
out.push(std::mem::take(&mut line));
out_idx.push(raw_idx);
}
}
if ch.is_whitespace()
|| matches!(
ch,
',' | ';' | '.' | ':' | ')' | ']' | '}' | '|' | '/' | '?' | '!' | '-' | '_'
)
{
last_soft_idx = Some(line.len());
}
line.push(ch);
line_cols = UnicodeWidthStr::width(line.as_str());
}
if !line.is_empty() {
out.push(line);
out_idx.push(raw_idx);
}
}
self.wrapped = out;
self.wrapped_src_idx = out_idx;
self.state.content_h = self.wrapped.len() as u16;
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cloud-tasks/src/env_detect.rs | codex-rs/cloud-tasks/src/env_detect.rs | use reqwest::header::CONTENT_TYPE;
use reqwest::header::HeaderMap;
use std::collections::HashMap;
use tracing::info;
use tracing::warn;
#[derive(Debug, Clone, serde::Deserialize)]
struct CodeEnvironment {
id: String,
#[serde(default)]
label: Option<String>,
#[serde(default)]
is_pinned: Option<bool>,
#[serde(default)]
task_count: Option<i64>,
}
#[derive(Debug, Clone)]
pub struct AutodetectSelection {
pub id: String,
pub label: Option<String>,
}
pub async fn autodetect_environment_id(
base_url: &str,
headers: &HeaderMap,
desired_label: Option<String>,
) -> anyhow::Result<AutodetectSelection> {
// 1) Try repo-specific environments based on local git origins (GitHub only, like VSCode)
let origins = get_git_origins();
crate::append_error_log(format!("env: git origins: {origins:?}"));
let mut by_repo_envs: Vec<CodeEnvironment> = Vec::new();
for origin in &origins {
if let Some((owner, repo)) = parse_owner_repo(origin) {
let url = if base_url.contains("/backend-api") {
format!(
"{}/wham/environments/by-repo/{}/{}/{}",
base_url, "github", owner, repo
)
} else {
format!(
"{}/api/codex/environments/by-repo/{}/{}/{}",
base_url, "github", owner, repo
)
};
crate::append_error_log(format!("env: GET {url}"));
match get_json::<Vec<CodeEnvironment>>(&url, headers).await {
Ok(mut list) => {
crate::append_error_log(format!(
"env: by-repo returned {} env(s) for {owner}/{repo}",
list.len(),
));
by_repo_envs.append(&mut list);
}
Err(e) => crate::append_error_log(format!(
"env: by-repo fetch failed for {owner}/{repo}: {e}"
)),
}
}
}
if let Some(env) = pick_environment_row(&by_repo_envs, desired_label.as_deref()) {
return Ok(AutodetectSelection {
id: env.id.clone(),
label: env.label.as_deref().map(str::to_owned),
});
}
// 2) Fallback to the full list
let list_url = if base_url.contains("/backend-api") {
format!("{base_url}/wham/environments")
} else {
format!("{base_url}/api/codex/environments")
};
crate::append_error_log(format!("env: GET {list_url}"));
// Fetch and log the full environments JSON for debugging
let http = reqwest::Client::builder().build()?;
let res = http.get(&list_url).headers(headers.clone()).send().await?;
let status = res.status();
let ct = res
.headers()
.get(CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
.unwrap_or("")
.to_string();
let body = res.text().await.unwrap_or_default();
crate::append_error_log(format!("env: status={status} content-type={ct}"));
match serde_json::from_str::<serde_json::Value>(&body) {
Ok(v) => {
let pretty = serde_json::to_string_pretty(&v).unwrap_or(body.clone());
crate::append_error_log(format!("env: /environments JSON (pretty):\n{pretty}"));
}
Err(_) => crate::append_error_log(format!("env: /environments (raw):\n{body}")),
}
if !status.is_success() {
anyhow::bail!("GET {list_url} failed: {status}; content-type={ct}; body={body}");
}
let all_envs: Vec<CodeEnvironment> = serde_json::from_str(&body).map_err(|e| {
anyhow::anyhow!("Decode error for {list_url}: {e}; content-type={ct}; body={body}")
})?;
if let Some(env) = pick_environment_row(&all_envs, desired_label.as_deref()) {
return Ok(AutodetectSelection {
id: env.id.clone(),
label: env.label.as_deref().map(str::to_owned),
});
}
anyhow::bail!("no environments available")
}
fn pick_environment_row(
envs: &[CodeEnvironment],
desired_label: Option<&str>,
) -> Option<CodeEnvironment> {
if envs.is_empty() {
return None;
}
if let Some(label) = desired_label {
let lc = label.to_lowercase();
if let Some(e) = envs
.iter()
.find(|e| e.label.as_deref().unwrap_or("").to_lowercase() == lc)
{
crate::append_error_log(format!("env: matched by label: {label} -> {}", e.id));
return Some(e.clone());
}
}
if envs.len() == 1 {
crate::append_error_log("env: single environment available; selecting it");
return Some(envs[0].clone());
}
if let Some(e) = envs.iter().find(|e| e.is_pinned.unwrap_or(false)) {
crate::append_error_log(format!("env: selecting pinned environment: {}", e.id));
return Some(e.clone());
}
// Highest task_count as heuristic
if let Some(e) = envs
.iter()
.max_by_key(|e| e.task_count.unwrap_or(0))
.or_else(|| envs.first())
{
crate::append_error_log(format!("env: selecting by task_count/first: {}", e.id));
return Some(e.clone());
}
None
}
async fn get_json<T: serde::de::DeserializeOwned>(
url: &str,
headers: &HeaderMap,
) -> anyhow::Result<T> {
let http = reqwest::Client::builder().build()?;
let res = http.get(url).headers(headers.clone()).send().await?;
let status = res.status();
let ct = res
.headers()
.get(CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
.unwrap_or("")
.to_string();
let body = res.text().await.unwrap_or_default();
crate::append_error_log(format!("env: status={status} content-type={ct}"));
if !status.is_success() {
anyhow::bail!("GET {url} failed: {status}; content-type={ct}; body={body}");
}
let parsed = serde_json::from_str::<T>(&body).map_err(|e| {
anyhow::anyhow!("Decode error for {url}: {e}; content-type={ct}; body={body}")
})?;
Ok(parsed)
}
fn get_git_origins() -> Vec<String> {
// Prefer: git config --get-regexp remote\..*\.url
let out = std::process::Command::new("git")
.args(["config", "--get-regexp", "remote\\..*\\.url"])
.output();
if let Ok(ok) = out
&& ok.status.success()
{
let s = String::from_utf8_lossy(&ok.stdout);
let mut urls = Vec::new();
for line in s.lines() {
if let Some((_, url)) = line.split_once(' ') {
urls.push(url.trim().to_string());
}
}
if !urls.is_empty() {
return uniq(urls);
}
}
// Fallback: git remote -v
let out = std::process::Command::new("git")
.args(["remote", "-v"])
.output();
if let Ok(ok) = out
&& ok.status.success()
{
let s = String::from_utf8_lossy(&ok.stdout);
let mut urls = Vec::new();
for line in s.lines() {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() >= 2 {
urls.push(parts[1].to_string());
}
}
if !urls.is_empty() {
return uniq(urls);
}
}
Vec::new()
}
fn uniq(mut v: Vec<String>) -> Vec<String> {
v.sort();
v.dedup();
v
}
fn parse_owner_repo(url: &str) -> Option<(String, String)> {
// Normalize common prefixes and handle multiple SSH/HTTPS variants.
let mut s = url.trim().to_string();
// Drop protocol scheme for ssh URLs
if let Some(rest) = s.strip_prefix("ssh://") {
s = rest.to_string();
}
// Accept any user before @github.com (e.g., git@, org-123@)
if let Some(idx) = s.find("@github.com:") {
let rest = &s[idx + "@github.com:".len()..];
let rest = rest.trim_start_matches('/').trim_end_matches(".git");
let mut parts = rest.splitn(2, '/');
let owner = parts.next()?.to_string();
let repo = parts.next()?.to_string();
crate::append_error_log(format!("env: parsed SSH GitHub origin => {owner}/{repo}"));
return Some((owner, repo));
}
// HTTPS or git protocol
for prefix in [
"https://github.com/",
"http://github.com/",
"git://github.com/",
"github.com/",
] {
if let Some(rest) = s.strip_prefix(prefix) {
let rest = rest.trim_start_matches('/').trim_end_matches(".git");
let mut parts = rest.splitn(2, '/');
let owner = parts.next()?.to_string();
let repo = parts.next()?.to_string();
crate::append_error_log(format!("env: parsed HTTP GitHub origin => {owner}/{repo}"));
return Some((owner, repo));
}
}
None
}
/// List environments for the current repo(s) with a fallback to the global list.
/// Returns a de-duplicated, sorted set suitable for the TUI modal.
pub async fn list_environments(
base_url: &str,
headers: &HeaderMap,
) -> anyhow::Result<Vec<crate::app::EnvironmentRow>> {
let mut map: HashMap<String, crate::app::EnvironmentRow> = HashMap::new();
// 1) By-repo lookup for each parsed GitHub origin
let origins = get_git_origins();
for origin in &origins {
if let Some((owner, repo)) = parse_owner_repo(origin) {
let url = if base_url.contains("/backend-api") {
format!(
"{}/wham/environments/by-repo/{}/{}/{}",
base_url, "github", owner, repo
)
} else {
format!(
"{}/api/codex/environments/by-repo/{}/{}/{}",
base_url, "github", owner, repo
)
};
match get_json::<Vec<CodeEnvironment>>(&url, headers).await {
Ok(list) => {
info!("env_tui: by-repo {}:{} -> {} envs", owner, repo, list.len());
for e in list {
let entry =
map.entry(e.id.clone())
.or_insert_with(|| crate::app::EnvironmentRow {
id: e.id.clone(),
label: e.label.clone(),
is_pinned: e.is_pinned.unwrap_or(false),
repo_hints: Some(format!("{owner}/{repo}")),
});
// Merge: keep label if present, or use new; accumulate pinned flag
if entry.label.is_none() {
entry.label = e.label.clone();
}
entry.is_pinned = entry.is_pinned || e.is_pinned.unwrap_or(false);
if entry.repo_hints.is_none() {
entry.repo_hints = Some(format!("{owner}/{repo}"));
}
}
}
Err(e) => {
warn!(
"env_tui: by-repo fetch failed for {}/{}: {}",
owner, repo, e
);
}
}
}
}
// 2) Fallback to the full list; on error return what we have if any.
let list_url = if base_url.contains("/backend-api") {
format!("{base_url}/wham/environments")
} else {
format!("{base_url}/api/codex/environments")
};
match get_json::<Vec<CodeEnvironment>>(&list_url, headers).await {
Ok(list) => {
info!("env_tui: global list -> {} envs", list.len());
for e in list {
let entry = map
.entry(e.id.clone())
.or_insert_with(|| crate::app::EnvironmentRow {
id: e.id.clone(),
label: e.label.clone(),
is_pinned: e.is_pinned.unwrap_or(false),
repo_hints: None,
});
if entry.label.is_none() {
entry.label = e.label.clone();
}
entry.is_pinned = entry.is_pinned || e.is_pinned.unwrap_or(false);
}
}
Err(e) => {
if map.is_empty() {
return Err(e);
} else {
warn!(
"env_tui: global list failed; using by-repo results only: {}",
e
);
}
}
}
let mut rows: Vec<crate::app::EnvironmentRow> = map.into_values().collect();
rows.sort_by(|a, b| {
// pinned first
let p = b.is_pinned.cmp(&a.is_pinned);
if p != std::cmp::Ordering::Equal {
return p;
}
// then label (ci), then id
let al = a.label.as_deref().unwrap_or("").to_lowercase();
let bl = b.label.as_deref().unwrap_or("").to_lowercase();
let l = al.cmp(&bl);
if l != std::cmp::Ordering::Equal {
return l;
}
a.id.cmp(&b.id)
});
Ok(rows)
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cloud-tasks/src/cli.rs | codex-rs/cloud-tasks/src/cli.rs | use clap::Args;
use clap::Parser;
use codex_common::CliConfigOverrides;
#[derive(Parser, Debug, Default)]
#[command(version)]
pub struct Cli {
#[clap(skip)]
pub config_overrides: CliConfigOverrides,
#[command(subcommand)]
pub command: Option<Command>,
}
#[derive(Debug, clap::Subcommand)]
pub enum Command {
/// Submit a new Codex Cloud task without launching the TUI.
Exec(ExecCommand),
/// Show the status of a Codex Cloud task.
Status(StatusCommand),
/// Apply the diff for a Codex Cloud task locally.
Apply(ApplyCommand),
/// Show the unified diff for a Codex Cloud task.
Diff(DiffCommand),
}
#[derive(Debug, Args)]
pub struct ExecCommand {
/// Task prompt to run in Codex Cloud.
#[arg(value_name = "QUERY")]
pub query: Option<String>,
/// Target environment identifier (see `codex cloud` to browse).
#[arg(long = "env", value_name = "ENV_ID")]
pub environment: String,
/// Number of assistant attempts (best-of-N).
#[arg(
long = "attempts",
default_value_t = 1usize,
value_parser = parse_attempts
)]
pub attempts: usize,
/// Git branch to run in Codex Cloud (defaults to current branch).
#[arg(long = "branch", value_name = "BRANCH")]
pub branch: Option<String>,
}
fn parse_attempts(input: &str) -> Result<usize, String> {
let value: usize = input
.parse()
.map_err(|_| "attempts must be an integer between 1 and 4".to_string())?;
if (1..=4).contains(&value) {
Ok(value)
} else {
Err("attempts must be between 1 and 4".to_string())
}
}
#[derive(Debug, Args)]
pub struct StatusCommand {
/// Codex Cloud task identifier to inspect.
#[arg(value_name = "TASK_ID")]
pub task_id: String,
}
#[derive(Debug, Args)]
pub struct ApplyCommand {
/// Codex Cloud task identifier to apply.
#[arg(value_name = "TASK_ID")]
pub task_id: String,
/// Attempt number to apply (1-based).
#[arg(long = "attempt", value_parser = parse_attempts, value_name = "N")]
pub attempt: Option<usize>,
}
#[derive(Debug, Args)]
pub struct DiffCommand {
/// Codex Cloud task identifier to display.
#[arg(value_name = "TASK_ID")]
pub task_id: String,
/// Attempt number to display (1-based).
#[arg(long = "attempt", value_parser = parse_attempts, value_name = "N")]
pub attempt: Option<usize>,
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cloud-tasks/src/util.rs | codex-rs/cloud-tasks/src/util.rs | use base64::Engine as _;
use chrono::DateTime;
use chrono::Local;
use chrono::Utc;
use reqwest::header::HeaderMap;
use codex_core::config::Config;
use codex_login::AuthManager;
pub fn set_user_agent_suffix(suffix: &str) {
if let Ok(mut guard) = codex_core::default_client::USER_AGENT_SUFFIX.lock() {
guard.replace(suffix.to_string());
}
}
pub fn append_error_log(message: impl AsRef<str>) {
let ts = Utc::now().to_rfc3339();
if let Ok(mut f) = std::fs::OpenOptions::new()
.create(true)
.append(true)
.open("error.log")
{
use std::io::Write as _;
let _ = writeln!(f, "[{ts}] {}", message.as_ref());
}
}
/// Normalize the configured base URL to a canonical form used by the backend client.
/// - trims trailing '/'
/// - appends '/backend-api' for ChatGPT hosts when missing
pub fn normalize_base_url(input: &str) -> String {
let mut base_url = input.to_string();
while base_url.ends_with('/') {
base_url.pop();
}
if (base_url.starts_with("https://chatgpt.com")
|| base_url.starts_with("https://chat.openai.com"))
&& !base_url.contains("/backend-api")
{
base_url = format!("{base_url}/backend-api");
}
base_url
}
/// Extract the ChatGPT account id from a JWT token, when present.
pub fn extract_chatgpt_account_id(token: &str) -> Option<String> {
let mut parts = token.split('.');
let (_h, payload_b64, _s) = match (parts.next(), parts.next(), parts.next()) {
(Some(h), Some(p), Some(s)) if !h.is_empty() && !p.is_empty() && !s.is_empty() => (h, p, s),
_ => return None,
};
let payload_bytes = base64::engine::general_purpose::URL_SAFE_NO_PAD
.decode(payload_b64)
.ok()?;
let v: serde_json::Value = serde_json::from_slice(&payload_bytes).ok()?;
v.get("https://api.openai.com/auth")
.and_then(|auth| auth.get("chatgpt_account_id"))
.and_then(|id| id.as_str())
.map(str::to_string)
}
pub async fn load_auth_manager() -> Option<AuthManager> {
// TODO: pass in cli overrides once cloud tasks properly support them.
let config = Config::load_with_cli_overrides(Vec::new()).await.ok()?;
Some(AuthManager::new(
config.codex_home,
false,
config.cli_auth_credentials_store_mode,
))
}
/// Build headers for ChatGPT-backed requests: `User-Agent`, optional `Authorization`,
/// and optional `ChatGPT-Account-Id`.
pub async fn build_chatgpt_headers() -> HeaderMap {
use reqwest::header::AUTHORIZATION;
use reqwest::header::HeaderName;
use reqwest::header::HeaderValue;
use reqwest::header::USER_AGENT;
set_user_agent_suffix("codex_cloud_tasks_tui");
let ua = codex_core::default_client::get_codex_user_agent();
let mut headers = HeaderMap::new();
headers.insert(
USER_AGENT,
HeaderValue::from_str(&ua).unwrap_or(HeaderValue::from_static("codex-cli")),
);
if let Some(am) = load_auth_manager().await
&& let Some(auth) = am.auth()
&& let Ok(tok) = auth.get_token().await
&& !tok.is_empty()
{
let v = format!("Bearer {tok}");
if let Ok(hv) = HeaderValue::from_str(&v) {
headers.insert(AUTHORIZATION, hv);
}
if let Some(acc) = auth
.get_account_id()
.or_else(|| extract_chatgpt_account_id(&tok))
&& let Ok(name) = HeaderName::from_bytes(b"ChatGPT-Account-Id")
&& let Ok(hv) = HeaderValue::from_str(&acc)
{
headers.insert(name, hv);
}
}
headers
}
/// Construct a browser-friendly task URL for the given backend base URL.
pub fn task_url(base_url: &str, task_id: &str) -> String {
let normalized = normalize_base_url(base_url);
if let Some(root) = normalized.strip_suffix("/backend-api") {
return format!("{root}/codex/tasks/{task_id}");
}
if let Some(root) = normalized.strip_suffix("/api/codex") {
return format!("{root}/codex/tasks/{task_id}");
}
if normalized.ends_with("/codex") {
return format!("{normalized}/tasks/{task_id}");
}
format!("{normalized}/codex/tasks/{task_id}")
}
pub fn format_relative_time(reference: DateTime<Utc>, ts: DateTime<Utc>) -> String {
let mut secs = (reference - ts).num_seconds();
if secs < 0 {
secs = 0;
}
if secs < 60 {
return format!("{secs}s ago");
}
let mins = secs / 60;
if mins < 60 {
return format!("{mins}m ago");
}
let hours = mins / 60;
if hours < 24 {
return format!("{hours}h ago");
}
let local = ts.with_timezone(&Local);
local.format("%b %e %H:%M").to_string()
}
pub fn format_relative_time_now(ts: DateTime<Utc>) -> String {
format_relative_time(Utc::now(), ts)
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cloud-tasks/src/ui.rs | codex-rs/cloud-tasks/src/ui.rs | use ratatui::layout::Constraint;
use ratatui::layout::Direction;
use ratatui::layout::Layout;
use ratatui::prelude::*;
use ratatui::style::Color;
use ratatui::style::Modifier;
use ratatui::style::Style;
use ratatui::style::Stylize;
use ratatui::widgets::Block;
use ratatui::widgets::BorderType;
use ratatui::widgets::Borders;
use ratatui::widgets::Clear;
use ratatui::widgets::List;
use ratatui::widgets::ListItem;
use ratatui::widgets::ListState;
use ratatui::widgets::Padding;
use ratatui::widgets::Paragraph;
use std::sync::OnceLock;
use std::time::Instant;
use crate::app::App;
use crate::app::AttemptView;
use crate::util::format_relative_time_now;
use codex_cloud_tasks_client::AttemptStatus;
use codex_cloud_tasks_client::TaskStatus;
use codex_tui::render_markdown_text;
pub fn draw(frame: &mut Frame, app: &mut App) {
let area = frame.area();
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Min(1), // list
Constraint::Length(2), // two-line footer (help + status)
])
.split(area);
if app.new_task.is_some() {
draw_new_task_page(frame, chunks[0], app);
draw_footer(frame, chunks[1], app);
} else {
draw_list(frame, chunks[0], app);
draw_footer(frame, chunks[1], app);
}
if app.diff_overlay.is_some() {
draw_diff_overlay(frame, area, app);
}
if app.env_modal.is_some() {
draw_env_modal(frame, area, app);
}
if app.best_of_modal.is_some() {
draw_best_of_modal(frame, area, app);
}
if app.apply_modal.is_some() {
draw_apply_modal(frame, area, app);
}
}
// ===== Overlay helpers (geometry + styling) =====
static ROUNDED: OnceLock<bool> = OnceLock::new();
fn rounded_enabled() -> bool {
*ROUNDED.get_or_init(|| {
std::env::var("CODEX_TUI_ROUNDED")
.ok()
.map(|v| v == "1")
.unwrap_or(true)
})
}
fn overlay_outer(area: Rect) -> Rect {
let outer_v = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Percentage(10),
Constraint::Percentage(80),
Constraint::Percentage(10),
])
.split(area)[1];
Layout::default()
.direction(Direction::Horizontal)
.constraints([
Constraint::Percentage(10),
Constraint::Percentage(80),
Constraint::Percentage(10),
])
.split(outer_v)[1]
}
fn overlay_block() -> Block<'static> {
let base = Block::default().borders(Borders::ALL);
let base = if rounded_enabled() {
base.border_type(BorderType::Rounded)
} else {
base
};
base.padding(Padding::new(2, 2, 1, 1))
}
fn overlay_content(area: Rect) -> Rect {
overlay_block().inner(area)
}
pub fn draw_new_task_page(frame: &mut Frame, area: Rect, app: &mut App) {
let title_spans = {
let mut spans: Vec<ratatui::text::Span> = vec!["New Task".magenta().bold()];
if let Some(id) = app
.new_task
.as_ref()
.and_then(|p| p.env_id.as_ref())
.cloned()
{
spans.push(" • ".into());
// Try to map id to label
let label = app
.environments
.iter()
.find(|r| r.id == id)
.and_then(|r| r.label.clone())
.unwrap_or(id);
spans.push(label.dim());
} else {
spans.push(" • ".into());
spans.push("Env: none (press ctrl-o to choose)".red());
}
if let Some(page) = app.new_task.as_ref() {
spans.push(" • ".into());
let attempts = page.best_of_n;
let label = format!(
"{} attempt{}",
attempts,
if attempts == 1 { "" } else { "s" }
);
spans.push(label.cyan());
}
spans
};
let block = Block::default()
.borders(Borders::ALL)
.title(Line::from(title_spans));
frame.render_widget(Clear, area);
frame.render_widget(block.clone(), area);
let content = block.inner(area);
// Expand composer height up to (terminal height - 6), with a 3-line minimum.
let max_allowed = frame.area().height.saturating_sub(6).max(3);
let desired = app
.new_task
.as_ref()
.map(|p| p.composer.desired_height(content.width))
.unwrap_or(3)
.clamp(3, max_allowed);
// Anchor the composer to the bottom-left by allocating a flexible spacer
// above it and a fixed `desired`-height area for the composer.
let rows = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Min(1), Constraint::Length(desired)])
.split(content);
let composer_area = rows[1];
if let Some(page) = app.new_task.as_ref() {
page.composer.render_ref(composer_area, frame.buffer_mut());
// Composer renders its own footer hints; no extra row here.
}
// Place cursor where composer wants it
if let Some(page) = app.new_task.as_ref()
&& let Some((x, y)) = page.composer.cursor_pos(composer_area)
{
frame.set_cursor_position((x, y));
}
}
fn draw_list(frame: &mut Frame, area: Rect, app: &mut App) {
let items: Vec<ListItem> = app.tasks.iter().map(|t| render_task_item(app, t)).collect();
// Selection reflects the actual task index (no artificial spacer item).
let mut state = ListState::default().with_selected(Some(app.selected));
// Dim task list when a modal/overlay is active to emphasize focus.
let dim_bg = app.env_modal.is_some()
|| app.apply_modal.is_some()
|| app.best_of_modal.is_some()
|| app.diff_overlay.is_some();
// Dynamic title includes current environment filter
let suffix_span = if let Some(ref id) = app.env_filter {
let label = app
.environments
.iter()
.find(|r| &r.id == id)
.and_then(|r| r.label.clone())
.unwrap_or_else(|| "Selected".to_string());
format!(" • {label}").dim()
} else {
" • All".dim()
};
// Percent scrolled based on selection position in the list (0% at top, 100% at bottom).
let percent_span = if app.tasks.len() <= 1 {
" • 0%".dim()
} else {
let p = ((app.selected as f32) / ((app.tasks.len() - 1) as f32) * 100.0).round() as i32;
format!(" • {}%", p.clamp(0, 100)).dim()
};
let title_line = {
let base = Line::from(vec!["Cloud Tasks".into(), suffix_span, percent_span]);
if dim_bg {
base.style(Style::default().add_modifier(Modifier::DIM))
} else {
base
}
};
let block = Block::default().borders(Borders::ALL).title(title_line);
// Render the outer block first
frame.render_widget(block.clone(), area);
// Draw list inside with a persistent top spacer row
let inner = block.inner(area);
let rows = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(1), Constraint::Min(1)])
.split(inner);
let mut list = List::new(items)
.highlight_symbol("› ")
.highlight_style(Style::default().bold());
if dim_bg {
list = list.style(Style::default().add_modifier(Modifier::DIM));
}
frame.render_stateful_widget(list, rows[1], &mut state);
// In-box spinner during initial/refresh loads
if app.refresh_inflight {
draw_centered_spinner(frame, inner, &mut app.spinner_start, "Loading tasks…");
}
}
fn draw_footer(frame: &mut Frame, area: Rect, app: &mut App) {
let mut help = vec![
"↑/↓".dim(),
": Move ".dim(),
"r".dim(),
": Refresh ".dim(),
"Enter".dim(),
": Open ".dim(),
];
// Apply hint; show disabled note when overlay is open without a diff.
if let Some(ov) = app.diff_overlay.as_ref() {
if !ov.current_can_apply() {
help.push("a".dim());
help.push(": Apply (disabled) ".dim());
} else {
help.push("a".dim());
help.push(": Apply ".dim());
}
if ov.attempt_count() > 1 {
help.push("Tab".dim());
help.push(": Next attempt ".dim());
help.push("[ ]".dim());
help.push(": Cycle attempts ".dim());
}
} else {
help.push("a".dim());
help.push(": Apply ".dim());
}
help.push("o : Set Env ".dim());
if app.new_task.is_some() {
help.push("Ctrl+N".dim());
help.push(format!(": Attempts {}x ", app.best_of_n).dim());
help.push("(editing new task) ".dim());
} else {
help.push("n : New Task ".dim());
}
help.extend(vec!["q".dim(), ": Quit ".dim()]);
// Split footer area into two rows: help+spinner (top) and status (bottom)
let rows = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(1), Constraint::Length(1)])
.split(area);
// Top row: help text + spinner at right
let top = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Fill(1), Constraint::Length(18)])
.split(rows[0]);
let para = Paragraph::new(Line::from(help));
// Draw help text; avoid clearing the whole footer area every frame.
frame.render_widget(para, top[0]);
// Right side: spinner or clear the spinner area if idle to prevent stale glyphs.
if app.refresh_inflight
|| app.details_inflight
|| app.env_loading
|| app.apply_preflight_inflight
|| app.apply_inflight
{
draw_inline_spinner(frame, top[1], &mut app.spinner_start, "Loading…");
} else {
frame.render_widget(Clear, top[1]);
}
// Bottom row: status/log text across full width (single-line; sanitize newlines)
let mut status_line = app.status.replace('\n', " ");
if status_line.len() > 2000 {
// hard cap to avoid TUI noise
status_line.truncate(2000);
status_line.push('…');
}
// Clear the status row to avoid trailing characters when the message shrinks.
frame.render_widget(Clear, rows[1]);
let status = Paragraph::new(status_line);
frame.render_widget(status, rows[1]);
}
fn draw_diff_overlay(frame: &mut Frame, area: Rect, app: &mut App) {
let inner = overlay_outer(area);
if app.diff_overlay.is_none() {
return;
}
let ov_can_apply = app
.diff_overlay
.as_ref()
.map(super::app::DiffOverlay::current_can_apply)
.unwrap_or(false);
let is_error = app
.diff_overlay
.as_ref()
.and_then(|o| o.sd.wrapped_lines().first().cloned())
.map(|s| s.trim_start().starts_with("Task failed:"))
.unwrap_or(false)
&& !ov_can_apply;
let title = app
.diff_overlay
.as_ref()
.map(|o| o.title.clone())
.unwrap_or_default();
// Title block
let title_ref = title.as_str();
let mut title_spans: Vec<ratatui::text::Span> = if is_error {
vec![
"Details ".magenta(),
"[FAILED]".red().bold(),
" ".into(),
title_ref.magenta(),
]
} else if ov_can_apply {
vec!["Diff: ".magenta(), title_ref.magenta()]
} else {
vec!["Details: ".magenta(), title_ref.magenta()]
};
if let Some(p) = app
.diff_overlay
.as_ref()
.and_then(|o| o.sd.percent_scrolled())
{
title_spans.push(" • ".dim());
title_spans.push(format!("{p}%").dim());
}
frame.render_widget(Clear, inner);
frame.render_widget(
overlay_block().title(Line::from(title_spans)).clone(),
inner,
);
// Content area and optional status bar
let content_full = overlay_content(inner);
let mut content_area = content_full;
if let Some(ov) = app.diff_overlay.as_mut() {
let has_text = ov.current_attempt().is_some_and(AttemptView::has_text);
let has_diff = ov.current_attempt().is_some_and(AttemptView::has_diff) || ov.base_can_apply;
if has_diff || has_text {
let rows = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(1), Constraint::Min(1)])
.split(content_full);
// Status bar label
let mut spans: Vec<ratatui::text::Span> = Vec::new();
if has_diff && has_text {
let prompt_lbl = if matches!(ov.current_view, crate::app::DetailView::Prompt) {
"[Prompt]".magenta().bold()
} else {
"Prompt".dim()
};
let diff_lbl = if matches!(ov.current_view, crate::app::DetailView::Diff) {
"[Diff]".magenta().bold()
} else {
"Diff".dim()
};
spans.extend(vec![
prompt_lbl,
" ".into(),
diff_lbl,
" ".into(),
"(← → to switch view)".dim(),
]);
} else if has_text {
spans.push("Conversation".magenta().bold());
} else {
spans.push("Diff".magenta().bold());
}
if let Some(total) = ov.expected_attempts().or({
if ov.attempts.is_empty() {
None
} else {
Some(ov.attempts.len())
}
}) && total > 1
{
spans.extend(vec![
" ".into(),
format!("Attempt {}/{}", ov.selected_attempt + 1, total)
.bold()
.dim(),
" ".into(),
"(Tab/Shift-Tab or [ ] to cycle attempts)".dim(),
]);
}
frame.render_widget(Paragraph::new(Line::from(spans)), rows[0]);
ov.sd.set_width(rows[1].width);
ov.sd.set_viewport(rows[1].height);
content_area = rows[1];
} else {
ov.sd.set_width(content_full.width);
ov.sd.set_viewport(content_full.height);
content_area = content_full;
}
}
// Styled content render
// Choose styling by the active view, not just presence of a diff
let is_diff_view = app
.diff_overlay
.as_ref()
.map(|o| matches!(o.current_view, crate::app::DetailView::Diff))
.unwrap_or(false);
let styled_lines: Vec<Line<'static>> = if is_diff_view {
let raw = app.diff_overlay.as_ref().map(|o| o.sd.wrapped_lines());
raw.unwrap_or(&[])
.iter()
.map(|l| style_diff_line(l))
.collect()
} else {
app.diff_overlay
.as_ref()
.map(|o| style_conversation_lines(&o.sd, o.current_attempt()))
.unwrap_or_default()
};
let raw_empty = app
.diff_overlay
.as_ref()
.map(|o| o.sd.wrapped_lines().is_empty())
.unwrap_or(true);
if app.details_inflight && raw_empty {
draw_centered_spinner(
frame,
content_area,
&mut app.spinner_start,
"Loading details…",
);
} else {
let scroll = app
.diff_overlay
.as_ref()
.map(|o| o.sd.state.scroll)
.unwrap_or(0);
let content = Paragraph::new(Text::from(styled_lines)).scroll((scroll, 0));
frame.render_widget(content, content_area);
}
}
pub fn draw_apply_modal(frame: &mut Frame, area: Rect, app: &mut App) {
use ratatui::widgets::Wrap;
let inner = overlay_outer(area);
let title = Line::from("Apply Changes?".magenta().bold());
let block = overlay_block().title(title);
frame.render_widget(Clear, inner);
frame.render_widget(block.clone(), inner);
let content = overlay_content(inner);
if let Some(m) = &app.apply_modal {
// Header
let header = Paragraph::new(Line::from(
format!("Apply '{}' ?", m.title).magenta().bold(),
))
.wrap(Wrap { trim: true });
// Footer instructions
let footer =
Paragraph::new(Line::from("Press Y to apply, P to preflight, N to cancel.").dim())
.wrap(Wrap { trim: true });
// Split into header/body/footer
let rows = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(1),
Constraint::Min(1),
Constraint::Length(1),
])
.split(content);
frame.render_widget(header, rows[0]);
// Body: spinner while preflight/apply runs; otherwise show result message and path lists
if app.apply_preflight_inflight {
draw_centered_spinner(frame, rows[1], &mut app.spinner_start, "Checking…");
} else if app.apply_inflight {
draw_centered_spinner(frame, rows[1], &mut app.spinner_start, "Applying…");
} else if m.result_message.is_none() {
draw_centered_spinner(frame, rows[1], &mut app.spinner_start, "Loading…");
} else if let Some(msg) = &m.result_message {
let mut body_lines: Vec<Line> = Vec::new();
let first = match m.result_level {
Some(crate::app::ApplyResultLevel::Success) => msg.clone().green(),
Some(crate::app::ApplyResultLevel::Partial) => msg.clone().magenta(),
Some(crate::app::ApplyResultLevel::Error) => msg.clone().red(),
None => msg.clone().into(),
};
body_lines.push(Line::from(first));
// On partial or error, show conflicts/skips if present
if !matches!(m.result_level, Some(crate::app::ApplyResultLevel::Success)) {
use ratatui::text::Span;
if !m.conflict_paths.is_empty() {
body_lines.push(Line::from(""));
body_lines.push(
Line::from(format!("Conflicts ({}):", m.conflict_paths.len()))
.red()
.bold(),
);
for p in &m.conflict_paths {
body_lines
.push(Line::from(vec![" • ".into(), Span::raw(p.clone()).dim()]));
}
}
if !m.skipped_paths.is_empty() {
body_lines.push(Line::from(""));
body_lines.push(
Line::from(format!("Skipped ({}):", m.skipped_paths.len()))
.magenta()
.bold(),
);
for p in &m.skipped_paths {
body_lines
.push(Line::from(vec![" • ".into(), Span::raw(p.clone()).dim()]));
}
}
}
let body = Paragraph::new(body_lines).wrap(Wrap { trim: true });
frame.render_widget(body, rows[1]);
}
frame.render_widget(footer, rows[2]);
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum ConversationSpeaker {
User,
Assistant,
}
fn style_conversation_lines(
sd: &crate::scrollable_diff::ScrollableDiff,
attempt: Option<&AttemptView>,
) -> Vec<Line<'static>> {
use ratatui::text::Span;
let wrapped = sd.wrapped_lines();
if wrapped.is_empty() {
return Vec::new();
}
let indices = sd.wrapped_src_indices();
let mut styled: Vec<Line<'static>> = Vec::new();
let mut speaker: Option<ConversationSpeaker> = None;
let mut in_code = false;
let mut last_src: Option<usize> = None;
let mut bullet_indent: Option<usize> = None;
for (display, &src_idx) in wrapped.iter().zip(indices.iter()) {
let raw = sd.raw_line_at(src_idx);
let trimmed = raw.trim();
let is_new_raw = last_src.map(|prev| prev != src_idx).unwrap_or(true);
if trimmed.eq_ignore_ascii_case("user:") {
speaker = Some(ConversationSpeaker::User);
in_code = false;
bullet_indent = None;
styled.push(conversation_header_line(ConversationSpeaker::User, None));
last_src = Some(src_idx);
continue;
}
if trimmed.eq_ignore_ascii_case("assistant:") {
speaker = Some(ConversationSpeaker::Assistant);
in_code = false;
bullet_indent = None;
styled.push(conversation_header_line(
ConversationSpeaker::Assistant,
attempt,
));
last_src = Some(src_idx);
continue;
}
if raw.is_empty() {
let mut spans: Vec<Span> = Vec::new();
if let Some(role) = speaker {
spans.push(conversation_gutter_span(role));
} else {
spans.push(Span::raw(String::new()));
}
styled.push(Line::from(spans));
last_src = Some(src_idx);
bullet_indent = None;
continue;
}
if is_new_raw {
let trimmed_start = raw.trim_start();
if trimmed_start.starts_with("```") {
in_code = !in_code;
bullet_indent = None;
} else if !in_code
&& (trimmed_start.starts_with("- ") || trimmed_start.starts_with("* "))
{
let indent = raw.chars().take_while(|c| c.is_whitespace()).count();
bullet_indent = Some(indent);
} else if !in_code {
bullet_indent = None;
}
}
let mut spans: Vec<Span> = Vec::new();
if let Some(role) = speaker {
spans.push(conversation_gutter_span(role));
}
spans.extend(conversation_text_spans(
display,
in_code,
is_new_raw,
bullet_indent,
));
styled.push(Line::from(spans));
last_src = Some(src_idx);
}
if styled.is_empty() {
wrapped.iter().map(|l| Line::from(l.to_string())).collect()
} else {
styled
}
}
fn conversation_header_line(
speaker: ConversationSpeaker,
attempt: Option<&AttemptView>,
) -> Line<'static> {
use ratatui::text::Span;
let mut spans: Vec<Span> = vec!["╭ ".dim()];
match speaker {
ConversationSpeaker::User => {
spans.push("User".cyan().bold());
spans.push(" prompt".dim());
}
ConversationSpeaker::Assistant => {
spans.push("Assistant".magenta().bold());
spans.push(" response".dim());
if let Some(attempt) = attempt
&& let Some(status_span) = attempt_status_span(attempt.status)
{
spans.push(" • ".dim());
spans.push(status_span);
}
}
}
Line::from(spans)
}
fn conversation_gutter_span(speaker: ConversationSpeaker) -> ratatui::text::Span<'static> {
match speaker {
ConversationSpeaker::User => "│ ".cyan().dim(),
ConversationSpeaker::Assistant => "│ ".magenta().dim(),
}
}
fn conversation_text_spans(
display: &str,
in_code: bool,
is_new_raw: bool,
bullet_indent: Option<usize>,
) -> Vec<ratatui::text::Span<'static>> {
use ratatui::text::Span;
if in_code {
return vec![Span::styled(
display.to_string(),
Style::default().fg(Color::Cyan),
)];
}
let trimmed = display.trim_start();
if let Some(indent) = bullet_indent {
if is_new_raw {
let rest = trimmed.get(2..).unwrap_or("").trim_start();
let mut spans: Vec<Span> = Vec::new();
if indent > 0 {
spans.push(Span::raw(" ".repeat(indent)));
}
spans.push("• ".into());
spans.push(Span::raw(rest.to_string()));
return spans;
}
let mut continuation = String::new();
continuation.push_str(&" ".repeat(indent + 2));
continuation.push_str(trimmed);
return vec![Span::raw(continuation)];
}
if is_new_raw
&& (trimmed.starts_with("### ") || trimmed.starts_with("## ") || trimmed.starts_with("# "))
{
return vec![Span::styled(
display.to_string(),
Style::default()
.fg(Color::Magenta)
.add_modifier(Modifier::BOLD),
)];
}
let mut rendered = render_markdown_text(display);
if rendered.lines.is_empty() {
return vec![Span::raw(display.to_string())];
}
// `render_markdown_text` can yield multiple lines when the input contains
// explicit breaks. We only expect a single line here; join the spans of the
// first rendered line for styling.
rendered.lines.remove(0).spans.into_iter().collect()
}
fn attempt_status_span(status: AttemptStatus) -> Option<ratatui::text::Span<'static>> {
match status {
AttemptStatus::Completed => Some("Completed".green()),
AttemptStatus::Failed => Some("Failed".red().bold()),
AttemptStatus::InProgress => Some("In progress".magenta()),
AttemptStatus::Pending => Some("Pending".cyan()),
AttemptStatus::Cancelled => Some("Cancelled".dim()),
AttemptStatus::Unknown => None,
}
}
fn style_diff_line(raw: &str) -> Line<'static> {
use ratatui::style::Color;
use ratatui::style::Modifier;
use ratatui::style::Style;
use ratatui::text::Span;
if raw.starts_with("@@") {
return Line::from(vec![Span::styled(
raw.to_string(),
Style::default()
.fg(Color::Magenta)
.add_modifier(Modifier::BOLD),
)]);
}
if raw.starts_with("+++") || raw.starts_with("---") {
return Line::from(vec![Span::styled(
raw.to_string(),
Style::default().add_modifier(Modifier::DIM),
)]);
}
if raw.starts_with('+') {
return Line::from(vec![Span::styled(
raw.to_string(),
Style::default().fg(Color::Green),
)]);
}
if raw.starts_with('-') {
return Line::from(vec![Span::styled(
raw.to_string(),
Style::default().fg(Color::Red),
)]);
}
Line::from(vec![Span::raw(raw.to_string())])
}
fn render_task_item(_app: &App, t: &codex_cloud_tasks_client::TaskSummary) -> ListItem<'static> {
let status = match t.status {
TaskStatus::Ready => "READY".green(),
TaskStatus::Pending => "PENDING".magenta(),
TaskStatus::Applied => "APPLIED".blue(),
TaskStatus::Error => "ERROR".red(),
};
// Title line: [STATUS] Title
let title = Line::from(vec![
"[".into(),
status,
"] ".into(),
t.title.clone().into(),
]);
// Meta line: environment label and relative time (dim)
let mut meta: Vec<ratatui::text::Span> = Vec::new();
if let Some(lbl) = t.environment_label.as_ref().filter(|s| !s.is_empty()) {
meta.push(lbl.clone().dim());
}
let when = format_relative_time_now(t.updated_at).dim();
if !meta.is_empty() {
meta.push(" ".into());
meta.push("•".dim());
meta.push(" ".into());
}
meta.push(when);
let meta_line = Line::from(meta);
// Subline: summary when present; otherwise show "no diff"
let sub = if t.summary.files_changed > 0
|| t.summary.lines_added > 0
|| t.summary.lines_removed > 0
{
let adds = t.summary.lines_added;
let dels = t.summary.lines_removed;
let files = t.summary.files_changed;
Line::from(vec![
format!("+{adds}").green(),
"/".into(),
format!("−{dels}").red(),
" ".into(),
"•".dim(),
" ".into(),
format!("{files}").into(),
" ".into(),
"files".dim(),
])
} else {
Line::from("no diff".to_string().dim())
};
// Insert a blank spacer line after the summary to separate tasks
let spacer = Line::from("");
ListItem::new(vec![title, meta_line, sub, spacer])
}
fn draw_inline_spinner(
frame: &mut Frame,
area: Rect,
spinner_start: &mut Option<Instant>,
label: &str,
) {
use ratatui::widgets::Paragraph;
let start = spinner_start.get_or_insert_with(Instant::now);
let blink_on = (start.elapsed().as_millis() / 600).is_multiple_of(2);
let dot = if blink_on {
"• ".into()
} else {
"◦ ".dim()
};
let label = label.cyan();
let line = Line::from(vec![dot, label]);
frame.render_widget(Paragraph::new(line), area);
}
fn draw_centered_spinner(
frame: &mut Frame,
area: Rect,
spinner_start: &mut Option<Instant>,
label: &str,
) {
// Center a 1xN spinner within the given rect
let rows = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Percentage(50),
Constraint::Length(1),
Constraint::Percentage(49),
])
.split(area);
let cols = Layout::default()
.direction(Direction::Horizontal)
.constraints([
Constraint::Percentage(50),
Constraint::Length(18),
Constraint::Percentage(50),
])
.split(rows[1]);
draw_inline_spinner(frame, cols[1], spinner_start, label);
}
// Styling helpers for diff rendering live inline where used.
pub fn draw_env_modal(frame: &mut Frame, area: Rect, app: &mut App) {
use ratatui::widgets::Wrap;
// Use shared overlay geometry and padding.
let inner = overlay_outer(area);
// Title: primary only; move long hints to a subheader inside content.
let title = Line::from(vec!["Select Environment".magenta().bold()]);
let block = overlay_block().title(title);
frame.render_widget(Clear, inner);
frame.render_widget(block.clone(), inner);
let content = overlay_content(inner);
if app.env_loading {
draw_centered_spinner(
frame,
content,
&mut app.spinner_start,
"Loading environments…",
);
return;
}
// Layout: subheader + search + results list
let rows = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(1), // subheader
Constraint::Length(1), // search
Constraint::Min(1), // list
])
.split(content);
// Subheader with usage hints (dim cyan)
let subheader = Paragraph::new(Line::from(
"Type to search, Enter select, Esc cancel".cyan().dim(),
))
.wrap(Wrap { trim: true });
frame.render_widget(subheader, rows[0]);
let query = app
.env_modal
.as_ref()
.map(|m| m.query.clone())
.unwrap_or_default();
let ql = query.to_lowercase();
let search = Paragraph::new(format!("Search: {query}")).wrap(Wrap { trim: true });
frame.render_widget(search, rows[1]);
// Filter environments by query (case-insensitive substring over label/id/hints)
let envs: Vec<&crate::app::EnvironmentRow> = app
.environments
.iter()
.filter(|e| {
if ql.is_empty() {
return true;
}
let mut hay = String::new();
if let Some(l) = &e.label {
hay.push_str(&l.to_lowercase());
hay.push(' ');
}
hay.push_str(&e.id.to_lowercase());
if let Some(h) = &e.repo_hints {
hay.push(' ');
hay.push_str(&h.to_lowercase());
}
hay.contains(&ql)
})
.collect();
let mut items: Vec<ListItem> = Vec::new();
items.push(ListItem::new(Line::from("All Environments (Global)")));
for env in envs.iter() {
let primary = env.label.clone().unwrap_or_else(|| "<unnamed>".to_string());
let mut spans: Vec<ratatui::text::Span> = vec![primary.into()];
if env.is_pinned {
spans.push(" ".into());
spans.push("PINNED".magenta().bold());
}
spans.push(" ".into());
spans.push(env.id.clone().dim());
if let Some(hint) = &env.repo_hints {
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | true |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cloud-tasks/src/new_task.rs | codex-rs/cloud-tasks/src/new_task.rs | use codex_tui::ComposerInput;
pub struct NewTaskPage {
pub composer: ComposerInput,
pub submitting: bool,
pub env_id: Option<String>,
pub best_of_n: usize,
}
impl NewTaskPage {
pub fn new(env_id: Option<String>, best_of_n: usize) -> Self {
let mut composer = ComposerInput::new();
composer.set_hint_items(vec![
("⏎", "send"),
("Shift+⏎", "newline"),
("Ctrl+O", "env"),
("Ctrl+N", "attempts"),
("Ctrl+C", "quit"),
]);
Self {
composer,
submitting: false,
env_id,
best_of_n,
}
}
// Additional helpers can be added as usage evolves.
}
impl Default for NewTaskPage {
fn default() -> Self {
Self::new(None, 1)
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/cloud-tasks/tests/env_filter.rs | codex-rs/cloud-tasks/tests/env_filter.rs | use codex_cloud_tasks_client::CloudBackend;
use codex_cloud_tasks_client::MockClient;
#[tokio::test]
async fn mock_backend_varies_by_env() {
let client = MockClient;
let root = CloudBackend::list_tasks(&client, None).await.unwrap();
assert!(root.iter().any(|t| t.title.contains("Update README")));
let a = CloudBackend::list_tasks(&client, Some("env-A"))
.await
.unwrap();
assert_eq!(a.len(), 1);
assert_eq!(a[0].title, "A: First");
let b = CloudBackend::list_tasks(&client, Some("env-B"))
.await
.unwrap();
assert_eq!(b.len(), 2);
assert!(b[0].title.starts_with("B: "));
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/mcp-types/src/lib.rs | codex-rs/mcp-types/src/lib.rs | // @generated
// DO NOT EDIT THIS FILE DIRECTLY.
// Run the following in the crate root to regenerate this file:
//
// ```shell
// ./generate_mcp_types.py
// ```
use serde::Deserialize;
use serde::Serialize;
use serde::de::DeserializeOwned;
use std::convert::TryFrom;
use schemars::JsonSchema;
use ts_rs::TS;
pub const MCP_SCHEMA_VERSION: &str = "2025-06-18";
pub const JSONRPC_VERSION: &str = "2.0";
/// Paired request/response types for the Model Context Protocol (MCP).
pub trait ModelContextProtocolRequest {
const METHOD: &'static str;
type Params: DeserializeOwned + Serialize + Send + Sync + 'static;
type Result: DeserializeOwned + Serialize + Send + Sync + 'static;
}
/// One-way message in the Model Context Protocol (MCP).
pub trait ModelContextProtocolNotification {
const METHOD: &'static str;
type Params: DeserializeOwned + Serialize + Send + Sync + 'static;
}
fn default_jsonrpc() -> String {
JSONRPC_VERSION.to_owned()
}
/// Optional annotations for the client. The client can use annotations to inform how objects are used or displayed
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct Annotations {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub audience: Option<Vec<Role>>,
#[serde(
rename = "lastModified",
default,
skip_serializing_if = "Option::is_none"
)]
#[ts(optional)]
pub last_modified: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub priority: Option<f64>,
}
/// Audio provided to or from an LLM.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct AudioContent {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub annotations: Option<Annotations>,
pub data: String,
#[serde(rename = "mimeType")]
pub mime_type: String,
pub r#type: String, // &'static str = "audio"
}
/// Base interface for metadata with name (identifier) and title (display name) properties.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct BaseMetadata {
pub name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub title: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct BlobResourceContents {
pub blob: String,
#[serde(rename = "mimeType", default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub mime_type: Option<String>,
pub uri: String,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct BooleanSchema {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub default: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub title: Option<String>,
pub r#type: String, // &'static str = "boolean"
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub enum CallToolRequest {}
impl ModelContextProtocolRequest for CallToolRequest {
const METHOD: &'static str = "tools/call";
type Params = CallToolRequestParams;
type Result = CallToolResult;
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct CallToolRequestParams {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub arguments: Option<serde_json::Value>,
pub name: String,
}
/// The server's response to a tool call.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct CallToolResult {
pub content: Vec<ContentBlock>,
#[serde(rename = "isError", default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub is_error: Option<bool>,
#[serde(
rename = "structuredContent",
default,
skip_serializing_if = "Option::is_none"
)]
#[ts(optional)]
pub structured_content: Option<serde_json::Value>,
}
impl From<CallToolResult> for serde_json::Value {
fn from(value: CallToolResult) -> Self {
// Leave this as it should never fail
#[expect(clippy::unwrap_used)]
serde_json::to_value(value).unwrap()
}
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub enum CancelledNotification {}
impl ModelContextProtocolNotification for CancelledNotification {
const METHOD: &'static str = "notifications/cancelled";
type Params = CancelledNotificationParams;
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct CancelledNotificationParams {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub reason: Option<String>,
#[serde(rename = "requestId")]
pub request_id: RequestId,
}
/// Capabilities a client may support. Known capabilities are defined here, in this schema, but this is not a closed set: any client can define its own, additional capabilities.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ClientCapabilities {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub elicitation: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub experimental: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub roots: Option<ClientCapabilitiesRoots>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub sampling: Option<serde_json::Value>,
}
/// Present if the client supports listing roots.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ClientCapabilitiesRoots {
#[serde(
rename = "listChanged",
default,
skip_serializing_if = "Option::is_none"
)]
#[ts(optional)]
pub list_changed: Option<bool>,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
#[serde(untagged)]
pub enum ClientNotification {
CancelledNotification(CancelledNotification),
InitializedNotification(InitializedNotification),
ProgressNotification(ProgressNotification),
RootsListChangedNotification(RootsListChangedNotification),
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
#[serde(tag = "method", content = "params")]
pub enum ClientRequest {
#[serde(rename = "initialize")]
InitializeRequest(<InitializeRequest as ModelContextProtocolRequest>::Params),
#[serde(rename = "ping")]
PingRequest(<PingRequest as ModelContextProtocolRequest>::Params),
#[serde(rename = "resources/list")]
ListResourcesRequest(<ListResourcesRequest as ModelContextProtocolRequest>::Params),
#[serde(rename = "resources/templates/list")]
ListResourceTemplatesRequest(
<ListResourceTemplatesRequest as ModelContextProtocolRequest>::Params,
),
#[serde(rename = "resources/read")]
ReadResourceRequest(<ReadResourceRequest as ModelContextProtocolRequest>::Params),
#[serde(rename = "resources/subscribe")]
SubscribeRequest(<SubscribeRequest as ModelContextProtocolRequest>::Params),
#[serde(rename = "resources/unsubscribe")]
UnsubscribeRequest(<UnsubscribeRequest as ModelContextProtocolRequest>::Params),
#[serde(rename = "prompts/list")]
ListPromptsRequest(<ListPromptsRequest as ModelContextProtocolRequest>::Params),
#[serde(rename = "prompts/get")]
GetPromptRequest(<GetPromptRequest as ModelContextProtocolRequest>::Params),
#[serde(rename = "tools/list")]
ListToolsRequest(<ListToolsRequest as ModelContextProtocolRequest>::Params),
#[serde(rename = "tools/call")]
CallToolRequest(<CallToolRequest as ModelContextProtocolRequest>::Params),
#[serde(rename = "logging/setLevel")]
SetLevelRequest(<SetLevelRequest as ModelContextProtocolRequest>::Params),
#[serde(rename = "completion/complete")]
CompleteRequest(<CompleteRequest as ModelContextProtocolRequest>::Params),
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
#[serde(untagged)]
pub enum ClientResult {
Result(Result),
CreateMessageResult(CreateMessageResult),
ListRootsResult(ListRootsResult),
ElicitResult(ElicitResult),
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub enum CompleteRequest {}
impl ModelContextProtocolRequest for CompleteRequest {
const METHOD: &'static str = "completion/complete";
type Params = CompleteRequestParams;
type Result = CompleteResult;
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct CompleteRequestParams {
pub argument: CompleteRequestParamsArgument,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub context: Option<CompleteRequestParamsContext>,
pub r#ref: CompleteRequestParamsRef,
}
/// Additional, optional context for completions
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct CompleteRequestParamsContext {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub arguments: Option<serde_json::Value>,
}
/// The argument's information
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct CompleteRequestParamsArgument {
pub name: String,
pub value: String,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
#[serde(untagged)]
pub enum CompleteRequestParamsRef {
PromptReference(PromptReference),
ResourceTemplateReference(ResourceTemplateReference),
}
/// The server's response to a completion/complete request
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct CompleteResult {
pub completion: CompleteResultCompletion,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct CompleteResultCompletion {
#[serde(rename = "hasMore", default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub has_more: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub total: Option<i64>,
pub values: Vec<String>,
}
impl From<CompleteResult> for serde_json::Value {
fn from(value: CompleteResult) -> Self {
// Leave this as it should never fail
#[expect(clippy::unwrap_used)]
serde_json::to_value(value).unwrap()
}
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
#[serde(untagged)]
pub enum ContentBlock {
TextContent(TextContent),
ImageContent(ImageContent),
AudioContent(AudioContent),
ResourceLink(ResourceLink),
EmbeddedResource(EmbeddedResource),
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub enum CreateMessageRequest {}
impl ModelContextProtocolRequest for CreateMessageRequest {
const METHOD: &'static str = "sampling/createMessage";
type Params = CreateMessageRequestParams;
type Result = CreateMessageResult;
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct CreateMessageRequestParams {
#[serde(
rename = "includeContext",
default,
skip_serializing_if = "Option::is_none"
)]
#[ts(optional)]
pub include_context: Option<String>,
#[serde(rename = "maxTokens")]
pub max_tokens: i64,
pub messages: Vec<SamplingMessage>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub metadata: Option<serde_json::Value>,
#[serde(
rename = "modelPreferences",
default,
skip_serializing_if = "Option::is_none"
)]
#[ts(optional)]
pub model_preferences: Option<ModelPreferences>,
#[serde(
rename = "stopSequences",
default,
skip_serializing_if = "Option::is_none"
)]
#[ts(optional)]
pub stop_sequences: Option<Vec<String>>,
#[serde(
rename = "systemPrompt",
default,
skip_serializing_if = "Option::is_none"
)]
#[ts(optional)]
pub system_prompt: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub temperature: Option<f64>,
}
/// The client's response to a sampling/create_message request from the server. The client should inform the user before returning the sampled message, to allow them to inspect the response (human in the loop) and decide whether to allow the server to see it.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct CreateMessageResult {
pub content: CreateMessageResultContent,
pub model: String,
pub role: Role,
#[serde(
rename = "stopReason",
default,
skip_serializing_if = "Option::is_none"
)]
#[ts(optional)]
pub stop_reason: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
#[serde(untagged)]
pub enum CreateMessageResultContent {
TextContent(TextContent),
ImageContent(ImageContent),
AudioContent(AudioContent),
}
impl From<CreateMessageResult> for serde_json::Value {
fn from(value: CreateMessageResult) -> Self {
// Leave this as it should never fail
#[expect(clippy::unwrap_used)]
serde_json::to_value(value).unwrap()
}
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct Cursor(String);
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub enum ElicitRequest {}
impl ModelContextProtocolRequest for ElicitRequest {
const METHOD: &'static str = "elicitation/create";
type Params = ElicitRequestParams;
type Result = ElicitResult;
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ElicitRequestParams {
pub message: String,
#[serde(rename = "requestedSchema")]
pub requested_schema: ElicitRequestParamsRequestedSchema,
}
/// A restricted subset of JSON Schema.
/// Only top-level properties are allowed, without nesting.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ElicitRequestParamsRequestedSchema {
pub properties: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub required: Option<Vec<String>>,
pub r#type: String, // &'static str = "object"
}
/// The client's response to an elicitation request.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ElicitResult {
pub action: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub content: Option<serde_json::Value>,
}
impl From<ElicitResult> for serde_json::Value {
fn from(value: ElicitResult) -> Self {
// Leave this as it should never fail
#[expect(clippy::unwrap_used)]
serde_json::to_value(value).unwrap()
}
}
/// The contents of a resource, embedded into a prompt or tool call result.
///
/// It is up to the client how best to render embedded resources for the benefit
/// of the LLM and/or the user.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct EmbeddedResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub annotations: Option<Annotations>,
pub resource: EmbeddedResourceResource,
pub r#type: String, // &'static str = "resource"
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
#[serde(untagged)]
pub enum EmbeddedResourceResource {
TextResourceContents(TextResourceContents),
BlobResourceContents(BlobResourceContents),
}
pub type EmptyResult = Result;
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct EnumSchema {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub description: Option<String>,
pub r#enum: Vec<String>,
#[serde(rename = "enumNames", default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub enum_names: Option<Vec<String>>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub title: Option<String>,
pub r#type: String, // &'static str = "string"
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub enum GetPromptRequest {}
impl ModelContextProtocolRequest for GetPromptRequest {
const METHOD: &'static str = "prompts/get";
type Params = GetPromptRequestParams;
type Result = GetPromptResult;
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct GetPromptRequestParams {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub arguments: Option<serde_json::Value>,
pub name: String,
}
/// The server's response to a prompts/get request from the client.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct GetPromptResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub description: Option<String>,
pub messages: Vec<PromptMessage>,
}
impl From<GetPromptResult> for serde_json::Value {
fn from(value: GetPromptResult) -> Self {
// Leave this as it should never fail
#[expect(clippy::unwrap_used)]
serde_json::to_value(value).unwrap()
}
}
/// An image provided to or from an LLM.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ImageContent {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub annotations: Option<Annotations>,
pub data: String,
#[serde(rename = "mimeType")]
pub mime_type: String,
pub r#type: String, // &'static str = "image"
}
/// Describes the name and version of an MCP implementation, with an optional title for UI representation.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct Implementation {
pub name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub title: Option<String>,
pub version: String,
// This is an extra field that the Codex MCP server sends as part of InitializeResult.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub user_agent: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub enum InitializeRequest {}
impl ModelContextProtocolRequest for InitializeRequest {
const METHOD: &'static str = "initialize";
type Params = InitializeRequestParams;
type Result = InitializeResult;
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct InitializeRequestParams {
pub capabilities: ClientCapabilities,
#[serde(rename = "clientInfo")]
pub client_info: Implementation,
#[serde(rename = "protocolVersion")]
pub protocol_version: String,
}
/// After receiving an initialize request from the client, the server sends this response.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct InitializeResult {
pub capabilities: ServerCapabilities,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub instructions: Option<String>,
#[serde(rename = "protocolVersion")]
pub protocol_version: String,
#[serde(rename = "serverInfo")]
pub server_info: Implementation,
}
impl From<InitializeResult> for serde_json::Value {
fn from(value: InitializeResult) -> Self {
// Leave this as it should never fail
#[expect(clippy::unwrap_used)]
serde_json::to_value(value).unwrap()
}
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub enum InitializedNotification {}
impl ModelContextProtocolNotification for InitializedNotification {
const METHOD: &'static str = "notifications/initialized";
type Params = Option<serde_json::Value>;
}
/// A response to a request that indicates an error occurred.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct JSONRPCError {
pub error: JSONRPCErrorError,
pub id: RequestId,
#[serde(rename = "jsonrpc", default = "default_jsonrpc")]
pub jsonrpc: String,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct JSONRPCErrorError {
pub code: i64,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub data: Option<serde_json::Value>,
pub message: String,
}
/// Refers to any valid JSON-RPC object that can be decoded off the wire, or encoded to be sent.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
#[serde(untagged)]
pub enum JSONRPCMessage {
Request(JSONRPCRequest),
Notification(JSONRPCNotification),
Response(JSONRPCResponse),
Error(JSONRPCError),
}
/// A notification which does not expect a response.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct JSONRPCNotification {
#[serde(rename = "jsonrpc", default = "default_jsonrpc")]
pub jsonrpc: String,
pub method: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub params: Option<serde_json::Value>,
}
/// A request that expects a response.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct JSONRPCRequest {
pub id: RequestId,
#[serde(rename = "jsonrpc", default = "default_jsonrpc")]
pub jsonrpc: String,
pub method: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub params: Option<serde_json::Value>,
}
/// A successful (non-error) response to a request.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct JSONRPCResponse {
pub id: RequestId,
#[serde(rename = "jsonrpc", default = "default_jsonrpc")]
pub jsonrpc: String,
pub result: Result,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub enum ListPromptsRequest {}
impl ModelContextProtocolRequest for ListPromptsRequest {
const METHOD: &'static str = "prompts/list";
type Params = Option<ListPromptsRequestParams>;
type Result = ListPromptsResult;
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ListPromptsRequestParams {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub cursor: Option<String>,
}
/// The server's response to a prompts/list request from the client.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ListPromptsResult {
#[serde(
rename = "nextCursor",
default,
skip_serializing_if = "Option::is_none"
)]
#[ts(optional)]
pub next_cursor: Option<String>,
pub prompts: Vec<Prompt>,
}
impl From<ListPromptsResult> for serde_json::Value {
fn from(value: ListPromptsResult) -> Self {
// Leave this as it should never fail
#[expect(clippy::unwrap_used)]
serde_json::to_value(value).unwrap()
}
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub enum ListResourceTemplatesRequest {}
impl ModelContextProtocolRequest for ListResourceTemplatesRequest {
const METHOD: &'static str = "resources/templates/list";
type Params = Option<ListResourceTemplatesRequestParams>;
type Result = ListResourceTemplatesResult;
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ListResourceTemplatesRequestParams {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub cursor: Option<String>,
}
/// The server's response to a resources/templates/list request from the client.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ListResourceTemplatesResult {
#[serde(
rename = "nextCursor",
default,
skip_serializing_if = "Option::is_none"
)]
#[ts(optional)]
pub next_cursor: Option<String>,
#[serde(rename = "resourceTemplates")]
pub resource_templates: Vec<ResourceTemplate>,
}
impl From<ListResourceTemplatesResult> for serde_json::Value {
fn from(value: ListResourceTemplatesResult) -> Self {
// Leave this as it should never fail
#[expect(clippy::unwrap_used)]
serde_json::to_value(value).unwrap()
}
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub enum ListResourcesRequest {}
impl ModelContextProtocolRequest for ListResourcesRequest {
const METHOD: &'static str = "resources/list";
type Params = Option<ListResourcesRequestParams>;
type Result = ListResourcesResult;
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ListResourcesRequestParams {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub cursor: Option<String>,
}
/// The server's response to a resources/list request from the client.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ListResourcesResult {
#[serde(
rename = "nextCursor",
default,
skip_serializing_if = "Option::is_none"
)]
#[ts(optional)]
pub next_cursor: Option<String>,
pub resources: Vec<Resource>,
}
impl From<ListResourcesResult> for serde_json::Value {
fn from(value: ListResourcesResult) -> Self {
// Leave this as it should never fail
#[expect(clippy::unwrap_used)]
serde_json::to_value(value).unwrap()
}
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub enum ListRootsRequest {}
impl ModelContextProtocolRequest for ListRootsRequest {
const METHOD: &'static str = "roots/list";
type Params = Option<serde_json::Value>;
type Result = ListRootsResult;
}
/// The client's response to a roots/list request from the server.
/// This result contains an array of Root objects, each representing a root directory
/// or file that the server can operate on.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ListRootsResult {
pub roots: Vec<Root>,
}
impl From<ListRootsResult> for serde_json::Value {
fn from(value: ListRootsResult) -> Self {
// Leave this as it should never fail
#[expect(clippy::unwrap_used)]
serde_json::to_value(value).unwrap()
}
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub enum ListToolsRequest {}
impl ModelContextProtocolRequest for ListToolsRequest {
const METHOD: &'static str = "tools/list";
type Params = Option<ListToolsRequestParams>;
type Result = ListToolsResult;
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ListToolsRequestParams {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub cursor: Option<String>,
}
/// The server's response to a tools/list request from the client.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ListToolsResult {
#[serde(
rename = "nextCursor",
default,
skip_serializing_if = "Option::is_none"
)]
#[ts(optional)]
pub next_cursor: Option<String>,
pub tools: Vec<Tool>,
}
impl From<ListToolsResult> for serde_json::Value {
fn from(value: ListToolsResult) -> Self {
// Leave this as it should never fail
#[expect(clippy::unwrap_used)]
serde_json::to_value(value).unwrap()
}
}
/// The severity of a log message.
///
/// These map to syslog message severities, as specified in RFC-5424:
/// https://datatracker.ietf.org/doc/html/rfc5424#section-6.2.1
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub enum LoggingLevel {
#[serde(rename = "alert")]
Alert,
#[serde(rename = "critical")]
Critical,
#[serde(rename = "debug")]
Debug,
#[serde(rename = "emergency")]
Emergency,
#[serde(rename = "error")]
Error,
#[serde(rename = "info")]
Info,
#[serde(rename = "notice")]
Notice,
#[serde(rename = "warning")]
Warning,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub enum LoggingMessageNotification {}
impl ModelContextProtocolNotification for LoggingMessageNotification {
const METHOD: &'static str = "notifications/message";
type Params = LoggingMessageNotificationParams;
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct LoggingMessageNotificationParams {
pub data: serde_json::Value,
pub level: LoggingLevel,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub logger: Option<String>,
}
/// Hints to use for model selection.
///
/// Keys not declared here are currently left unspecified by the spec and are up
/// to the client to interpret.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ModelHint {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub name: Option<String>,
}
/// The server's preferences for model selection, requested of the client during sampling.
///
/// Because LLMs can vary along multiple dimensions, choosing the "best" model is
/// rarely straightforward. Different models excel in different areas—some are
/// faster but less capable, others are more capable but more expensive, and so
/// on. This interface allows servers to express their priorities across multiple
/// dimensions to help clients make an appropriate selection for their use case.
///
/// These preferences are always advisory. The client MAY ignore them. It is also
/// up to the client to decide how to interpret these preferences and how to
/// balance them against other considerations.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct ModelPreferences {
#[serde(
rename = "costPriority",
default,
skip_serializing_if = "Option::is_none"
)]
#[ts(optional)]
pub cost_priority: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub hints: Option<Vec<ModelHint>>,
#[serde(
rename = "intelligencePriority",
default,
skip_serializing_if = "Option::is_none"
)]
#[ts(optional)]
pub intelligence_priority: Option<f64>,
#[serde(
rename = "speedPriority",
default,
skip_serializing_if = "Option::is_none"
)]
#[ts(optional)]
pub speed_priority: Option<f64>,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct Notification {
pub method: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub params: Option<serde_json::Value>,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct NumberSchema {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub maximum: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub minimum: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub title: Option<String>,
pub r#type: String,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct PaginatedRequest {
pub method: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub params: Option<PaginatedRequestParams>,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct PaginatedRequestParams {
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub cursor: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema, TS)]
pub struct PaginatedResult {
#[serde(
rename = "nextCursor",
default,
skip_serializing_if = "Option::is_none"
)]
#[ts(optional)]
pub next_cursor: Option<String>,
}
impl From<PaginatedResult> for serde_json::Value {
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | true |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/mcp-types/tests/all.rs | codex-rs/mcp-types/tests/all.rs | // Single integration test binary that aggregates all test modules.
// The submodules live in `tests/suite/`.
mod suite;
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/mcp-types/tests/suite/progress_notification.rs | codex-rs/mcp-types/tests/suite/progress_notification.rs | use mcp_types::JSONRPCMessage;
use mcp_types::ProgressNotificationParams;
use mcp_types::ProgressToken;
use mcp_types::ServerNotification;
#[test]
fn deserialize_progress_notification() {
let raw = r#"{
"jsonrpc": "2.0",
"method": "notifications/progress",
"params": {
"message": "Half way there",
"progress": 0.5,
"progressToken": 99,
"total": 1.0
}
}"#;
// Deserialize full JSONRPCMessage first.
let msg: JSONRPCMessage = serde_json::from_str(raw).expect("invalid JSONRPCMessage");
// Extract the notification variant.
let JSONRPCMessage::Notification(notif) = msg else {
unreachable!()
};
// Convert via generated TryFrom.
let server_notif: ServerNotification =
ServerNotification::try_from(notif).expect("conversion must succeed");
let ServerNotification::ProgressNotification(params) = server_notif else {
unreachable!()
};
let expected_params = ProgressNotificationParams {
message: Some("Half way there".into()),
progress: 0.5,
progress_token: ProgressToken::Integer(99),
total: Some(1.0),
};
assert_eq!(params, expected_params);
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/mcp-types/tests/suite/initialize.rs | codex-rs/mcp-types/tests/suite/initialize.rs | use mcp_types::ClientCapabilities;
use mcp_types::ClientRequest;
use mcp_types::Implementation;
use mcp_types::InitializeRequestParams;
use mcp_types::JSONRPC_VERSION;
use mcp_types::JSONRPCMessage;
use mcp_types::JSONRPCRequest;
use mcp_types::RequestId;
use serde_json::json;
#[test]
fn deserialize_initialize_request() {
let raw = r#"{
"jsonrpc": "2.0",
"id": 1,
"method": "initialize",
"params": {
"capabilities": {},
"clientInfo": { "name": "acme-client", "title": "Acme", "version": "1.2.3" },
"protocolVersion": "2025-06-18"
}
}"#;
// Deserialize full JSONRPCMessage first.
let msg: JSONRPCMessage =
serde_json::from_str(raw).expect("failed to deserialize JSONRPCMessage");
// Extract the request variant.
let JSONRPCMessage::Request(json_req) = msg else {
unreachable!()
};
let expected_req = JSONRPCRequest {
jsonrpc: JSONRPC_VERSION.into(),
id: RequestId::Integer(1),
method: "initialize".into(),
params: Some(json!({
"capabilities": {},
"clientInfo": { "name": "acme-client", "title": "Acme", "version": "1.2.3" },
"protocolVersion": "2025-06-18"
})),
};
assert_eq!(json_req, expected_req);
let client_req: ClientRequest =
ClientRequest::try_from(json_req).expect("conversion must succeed");
let ClientRequest::InitializeRequest(init_params) = client_req else {
unreachable!()
};
assert_eq!(
init_params,
InitializeRequestParams {
capabilities: ClientCapabilities {
experimental: None,
roots: None,
sampling: None,
elicitation: None,
},
client_info: Implementation {
name: "acme-client".into(),
title: Some("Acme".to_string()),
version: "1.2.3".into(),
user_agent: None,
},
protocol_version: "2025-06-18".into(),
}
);
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/mcp-types/tests/suite/mod.rs | codex-rs/mcp-types/tests/suite/mod.rs | // Aggregates all former standalone integration tests as modules.
mod initialize;
mod progress_notification;
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/src/posix.rs | codex-rs/exec-server/src/posix.rs | //! This is an MCP that implements an alternative `shell` tool with fine-grained privilege
//! escalation based on a per-exec() policy.
//!
//! We spawn Bash process inside a sandbox. The Bash we spawn is patched to allow us to intercept
//! every exec() call it makes by invoking a wrapper program and passing in the arguments it would
//! have passed to exec(). The Bash process (and its descendants) inherit a communication socket
//! from us, and we give its fd number in the CODEX_ESCALATE_SOCKET environment variable.
//!
//! When we intercept an exec() call, we send a message over the socket back to the main
//! MCP process. The MCP process can then decide whether to allow the exec() call to proceed
//! or to escalate privileges and run the requested command with elevated permissions. In the
//! latter case, we send a message back to the child requesting that it forward its open FDs to us.
//! We then execute the requested command on its behalf, patching in the forwarded FDs.
//!
//!
//! ### The privilege escalation flow
//!
//! Child MCP Bash Escalate Helper
//! |
//! o----->o
//! | |
//! | o--(exec)-->o
//! | | |
//! |o<-(EscalateReq)--o
//! || | |
//! |o--(Escalate)---->o
//! || | |
//! |o<---------(fds)--o
//! || | |
//! o<-----o | |
//! | || | |
//! x----->o | |
//! || | |
//! |x--(exit code)--->o
//! | | |
//! | o<--(exit)--x
//! | |
//! o<-----x
//!
//! ### The non-escalation flow
//!
//! MCP Bash Escalate Helper Child
//! |
//! o----->o
//! | |
//! | o--(exec)-->o
//! | | |
//! |o<-(EscalateReq)--o
//! || | |
//! |o-(Run)---------->o
//! | | |
//! | | x--(exec)-->o
//! | | |
//! | o<--------------(exit)--x
//! | |
//! o<-----x
//!
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use anyhow::Context as _;
use clap::Parser;
use codex_core::config::find_codex_home;
use codex_core::is_dangerous_command::command_might_be_dangerous;
use codex_core::sandboxing::SandboxPermissions;
use codex_execpolicy::Decision;
use codex_execpolicy::Policy;
use codex_execpolicy::RuleMatch;
use rmcp::ErrorData as McpError;
use tokio::sync::RwLock;
use tracing_subscriber::EnvFilter;
use tracing_subscriber::{self};
use crate::posix::mcp_escalation_policy::ExecPolicyOutcome;
mod escalate_client;
mod escalate_protocol;
mod escalate_server;
mod escalation_policy;
mod mcp;
mod mcp_escalation_policy;
mod socket;
mod stopwatch;
pub use mcp::ExecResult;
/// Default value of --execve option relative to the current executable.
/// Note this must match the name of the binary as specified in Cargo.toml.
const CODEX_EXECVE_WRAPPER_EXE_NAME: &str = "codex-execve-wrapper";
#[derive(Parser)]
#[clap(version)]
struct McpServerCli {
/// Executable to delegate execve(2) calls to in Bash.
#[arg(long = "execve")]
execve_wrapper: Option<PathBuf>,
/// Path to Bash that has been patched to support execve() wrapping.
#[arg(long = "bash")]
bash_path: Option<PathBuf>,
/// Preserve program paths when applying execpolicy (e.g., keep /usr/bin/echo instead of echo).
/// Note: this does change the actual program being run.
#[arg(long)]
preserve_program_paths: bool,
}
#[tokio::main]
pub async fn main_mcp_server() -> anyhow::Result<()> {
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.with_writer(std::io::stderr)
.with_ansi(false)
.init();
let cli = McpServerCli::parse();
let execve_wrapper = match cli.execve_wrapper {
Some(path) => path,
None => {
let cwd = std::env::current_exe()?;
cwd.parent()
.map(|p| p.join(CODEX_EXECVE_WRAPPER_EXE_NAME))
.ok_or_else(|| {
anyhow::anyhow!("failed to determine execve wrapper path from current exe")
})?
}
};
let bash_path = match cli.bash_path {
Some(path) => path,
None => mcp::get_bash_path()?,
};
let policy = Arc::new(RwLock::new(load_exec_policy().await?));
tracing::info!("Starting MCP server");
let service = mcp::serve(
bash_path,
execve_wrapper,
policy,
cli.preserve_program_paths,
)
.await
.inspect_err(|e| {
tracing::error!("serving error: {:?}", e);
})?;
service.waiting().await?;
Ok(())
}
#[derive(Parser)]
pub struct ExecveWrapperCli {
file: String,
#[arg(trailing_var_arg = true)]
argv: Vec<String>,
}
#[tokio::main]
pub async fn main_execve_wrapper() -> anyhow::Result<()> {
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.with_writer(std::io::stderr)
.with_ansi(false)
.init();
let ExecveWrapperCli { file, argv } = ExecveWrapperCli::parse();
let exit_code = escalate_client::run(file, argv).await?;
std::process::exit(exit_code);
}
/// Decide how to handle an exec() call for a specific command.
///
/// `file` is the absolute, canonical path to the executable to run, i.e. the first arg to exec.
/// `argv` is the argv, including the program name (`argv[0]`).
pub(crate) fn evaluate_exec_policy(
policy: &Policy,
file: &Path,
argv: &[String],
preserve_program_paths: bool,
) -> Result<ExecPolicyOutcome, McpError> {
let program_name = format_program_name(file, preserve_program_paths).ok_or_else(|| {
McpError::internal_error(
format!("failed to format program name for `{}`", file.display()),
None,
)
})?;
let command: Vec<String> = std::iter::once(program_name)
// Use the normalized program name instead of argv[0].
.chain(argv.iter().skip(1).cloned())
.collect();
let evaluation = policy.check(&command, &|cmd| {
if command_might_be_dangerous(cmd) {
Decision::Prompt
} else {
Decision::Allow
}
});
// decisions driven by policy should run outside sandbox
let decision_driven_by_policy = evaluation.matched_rules.iter().any(|rule_match| {
!matches!(rule_match, RuleMatch::HeuristicsRuleMatch { .. })
&& rule_match.decision() == evaluation.decision
});
let sandbox_permissions = if decision_driven_by_policy {
SandboxPermissions::RequireEscalated
} else {
SandboxPermissions::UseDefault
};
Ok(match evaluation.decision {
Decision::Forbidden => ExecPolicyOutcome::Forbidden,
Decision::Prompt => ExecPolicyOutcome::Prompt {
sandbox_permissions,
},
Decision::Allow => ExecPolicyOutcome::Allow {
sandbox_permissions,
},
})
}
fn format_program_name(path: &Path, preserve_program_paths: bool) -> Option<String> {
if preserve_program_paths {
path.to_str().map(str::to_string)
} else {
path.file_name()?.to_str().map(str::to_string)
}
}
async fn load_exec_policy() -> anyhow::Result<Policy> {
let codex_home = find_codex_home().context("failed to resolve codex_home for execpolicy")?;
// TODO(mbolin): At a minimum, `cwd` should be configurable via
// `codex/sandbox-state/update` or some other custom MCP call.
let cwd = None;
let cli_overrides = Vec::new();
let overrides = codex_core::config_loader::LoaderOverrides::default();
let config_layer_stack = codex_core::config_loader::load_config_layers_state(
&codex_home,
cwd,
&cli_overrides,
overrides,
)
.await?;
codex_core::load_exec_policy(&config_layer_stack)
.await
.map_err(anyhow::Error::from)
}
#[cfg(test)]
mod tests {
use super::*;
use codex_core::sandboxing::SandboxPermissions;
use codex_execpolicy::Decision;
use codex_execpolicy::Policy;
use pretty_assertions::assert_eq;
use std::path::Path;
#[test]
fn evaluate_exec_policy_uses_heuristics_for_dangerous_commands() {
let policy = Policy::empty();
let file = Path::new("/bin/rm");
let argv = vec!["rm".to_string(), "-rf".to_string(), "/".to_string()];
let outcome = evaluate_exec_policy(&policy, file, &argv, false).expect("policy evaluation");
assert_eq!(
outcome,
ExecPolicyOutcome::Prompt {
sandbox_permissions: SandboxPermissions::UseDefault
}
);
}
#[test]
fn evaluate_exec_policy_respects_preserve_program_paths() {
let mut policy = Policy::empty();
policy
.add_prefix_rule(
&[
"/usr/local/bin/custom-cmd".to_string(),
"--flag".to_string(),
],
Decision::Allow,
)
.expect("policy rule should be added");
let file = Path::new("/usr/local/bin/custom-cmd");
let argv = vec![
"/usr/local/bin/custom-cmd".to_string(),
"--flag".to_string(),
"value".to_string(),
];
let outcome = evaluate_exec_policy(&policy, file, &argv, true).expect("policy evaluation");
assert_eq!(
outcome,
ExecPolicyOutcome::Allow {
sandbox_permissions: SandboxPermissions::RequireEscalated
}
);
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/src/lib.rs | codex-rs/exec-server/src/lib.rs | #[cfg(unix)]
mod posix;
#[cfg(unix)]
pub use posix::main_execve_wrapper;
#[cfg(unix)]
pub use posix::main_mcp_server;
#[cfg(unix)]
pub use posix::ExecResult;
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/src/posix/mcp.rs | codex-rs/exec-server/src/posix/mcp.rs | use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use anyhow::Context as _;
use anyhow::Result;
use codex_core::MCP_SANDBOX_STATE_CAPABILITY;
use codex_core::MCP_SANDBOX_STATE_METHOD;
use codex_core::SandboxState;
use codex_core::protocol::SandboxPolicy;
use codex_execpolicy::Policy;
use rmcp::ErrorData as McpError;
use rmcp::RoleServer;
use rmcp::ServerHandler;
use rmcp::ServiceExt;
use rmcp::handler::server::router::tool::ToolRouter;
use rmcp::handler::server::wrapper::Parameters;
use rmcp::model::CustomRequest;
use rmcp::model::CustomResult;
use rmcp::model::*;
use rmcp::schemars;
use rmcp::service::RequestContext;
use rmcp::service::RunningService;
use rmcp::tool;
use rmcp::tool_handler;
use rmcp::tool_router;
use rmcp::transport::stdio;
use serde_json::json;
use tokio::sync::RwLock;
use crate::posix::escalate_server::EscalateServer;
use crate::posix::escalate_server::{self};
use crate::posix::mcp_escalation_policy::McpEscalationPolicy;
use crate::posix::stopwatch::Stopwatch;
/// Path to our patched bash.
const CODEX_BASH_PATH_ENV_VAR: &str = "CODEX_BASH_PATH";
const SANDBOX_STATE_CAPABILITY_VERSION: &str = "1.0.0";
pub(crate) fn get_bash_path() -> Result<PathBuf> {
std::env::var(CODEX_BASH_PATH_ENV_VAR)
.map(PathBuf::from)
.context(format!("{CODEX_BASH_PATH_ENV_VAR} must be set"))
}
#[derive(Debug, serde::Deserialize, schemars::JsonSchema)]
pub struct ExecParams {
/// The bash string to execute.
pub command: String,
/// The working directory to execute the command in. Must be an absolute path.
pub workdir: String,
/// The timeout for the command in milliseconds.
pub timeout_ms: Option<u64>,
/// Launch Bash with -lc instead of -c: defaults to true.
pub login: Option<bool>,
}
#[derive(Debug, serde::Serialize, serde::Deserialize, schemars::JsonSchema)]
pub struct ExecResult {
pub exit_code: i32,
pub output: String,
pub duration: Duration,
pub timed_out: bool,
}
impl From<escalate_server::ExecResult> for ExecResult {
fn from(result: escalate_server::ExecResult) -> Self {
Self {
exit_code: result.exit_code,
output: result.output,
duration: result.duration,
timed_out: result.timed_out,
}
}
}
#[derive(Clone)]
pub struct ExecTool {
tool_router: ToolRouter<ExecTool>,
bash_path: PathBuf,
execve_wrapper: PathBuf,
policy: Arc<RwLock<Policy>>,
preserve_program_paths: bool,
sandbox_state: Arc<RwLock<Option<SandboxState>>>,
}
#[tool_router]
impl ExecTool {
pub fn new(
bash_path: PathBuf,
execve_wrapper: PathBuf,
policy: Arc<RwLock<Policy>>,
preserve_program_paths: bool,
) -> Self {
Self {
tool_router: Self::tool_router(),
bash_path,
execve_wrapper,
policy,
preserve_program_paths,
sandbox_state: Arc::new(RwLock::new(None)),
}
}
/// Runs a shell command and returns its output. You MUST provide the workdir as an absolute path.
#[tool]
async fn shell(
&self,
context: RequestContext<RoleServer>,
Parameters(params): Parameters<ExecParams>,
) -> Result<CallToolResult, McpError> {
let effective_timeout = Duration::from_millis(
params
.timeout_ms
.unwrap_or(codex_core::exec::DEFAULT_EXEC_COMMAND_TIMEOUT_MS),
);
let stopwatch = Stopwatch::new(effective_timeout);
let cancel_token = stopwatch.cancellation_token();
let sandbox_state =
self.sandbox_state
.read()
.await
.clone()
.unwrap_or_else(|| SandboxState {
sandbox_policy: SandboxPolicy::ReadOnly,
codex_linux_sandbox_exe: None,
sandbox_cwd: PathBuf::from(¶ms.workdir),
});
let escalate_server = EscalateServer::new(
self.bash_path.clone(),
self.execve_wrapper.clone(),
McpEscalationPolicy::new(
self.policy.clone(),
context,
stopwatch.clone(),
self.preserve_program_paths,
),
);
let result = escalate_server
.exec(params, cancel_token, &sandbox_state)
.await
.map_err(|e| McpError::internal_error(e.to_string(), None))?;
Ok(CallToolResult::success(vec![Content::json(
ExecResult::from(result),
)?]))
}
}
#[derive(Default)]
pub struct CodexSandboxStateUpdateMethod;
impl rmcp::model::ConstString for CodexSandboxStateUpdateMethod {
const VALUE: &'static str = MCP_SANDBOX_STATE_METHOD;
}
#[tool_handler]
impl ServerHandler for ExecTool {
fn get_info(&self) -> ServerInfo {
let mut experimental_capabilities = ExperimentalCapabilities::new();
let mut sandbox_state_capability = JsonObject::new();
sandbox_state_capability.insert(
"version".to_string(),
serde_json::Value::String(SANDBOX_STATE_CAPABILITY_VERSION.to_string()),
);
experimental_capabilities.insert(
MCP_SANDBOX_STATE_CAPABILITY.to_string(),
sandbox_state_capability,
);
ServerInfo {
protocol_version: ProtocolVersion::V_2025_06_18,
capabilities: ServerCapabilities::builder()
.enable_tools()
.enable_experimental_with(experimental_capabilities)
.build(),
server_info: Implementation::from_build_env(),
instructions: Some(
"This server provides a tool to execute shell commands and return their output."
.to_string(),
),
}
}
async fn initialize(
&self,
_request: InitializeRequestParam,
_context: RequestContext<RoleServer>,
) -> Result<InitializeResult, McpError> {
Ok(self.get_info())
}
async fn on_custom_request(
&self,
request: CustomRequest,
_context: rmcp::service::RequestContext<rmcp::RoleServer>,
) -> Result<CustomResult, McpError> {
let CustomRequest { method, params, .. } = request;
if method != MCP_SANDBOX_STATE_METHOD {
return Err(McpError::method_not_found::<CodexSandboxStateUpdateMethod>());
}
let Some(params) = params else {
return Err(McpError::invalid_params(
"missing params for sandbox state request".to_string(),
None,
));
};
let Ok(sandbox_state) = serde_json::from_value::<SandboxState>(params.clone()) else {
return Err(McpError::invalid_params(
"failed to deserialize sandbox state".to_string(),
Some(params),
));
};
*self.sandbox_state.write().await = Some(sandbox_state);
Ok(CustomResult::new(json!({})))
}
}
pub(crate) async fn serve(
bash_path: PathBuf,
execve_wrapper: PathBuf,
policy: Arc<RwLock<Policy>>,
preserve_program_paths: bool,
) -> Result<RunningService<RoleServer, ExecTool>, rmcp::service::ServerInitializeError> {
let tool = ExecTool::new(bash_path, execve_wrapper, policy, preserve_program_paths);
tool.serve(stdio()).await
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use serde_json::json;
/// Verify that the way we use serde does not compromise the desired JSON
/// schema via schemars. In particular, ensure that the `login` and
/// `timeout_ms` fields are optional.
#[test]
fn exec_params_json_schema_matches_expected() {
let schema = schemars::schema_for!(ExecParams);
let actual = serde_json::to_value(schema).expect("schema should serialize");
assert_eq!(
actual,
json!({
"$schema": "https://json-schema.org/draft/2020-12/schema",
"title": "ExecParams",
"type": "object",
"properties": {
"command": {
"description": "The bash string to execute.",
"type": "string"
},
"login": {
"description": "Launch Bash with -lc instead of -c: defaults to true.",
"type": ["boolean", "null"]
},
"timeout_ms": {
"description": "The timeout for the command in milliseconds.",
"format": "uint64",
"minimum": 0,
"type": ["integer", "null"]
},
"workdir": {
"description":
"The working directory to execute the command in. Must be an absolute path.",
"type": "string"
}
},
"required": ["command", "workdir"]
})
);
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/src/posix/escalation_policy.rs | codex-rs/exec-server/src/posix/escalation_policy.rs | use std::path::Path;
use crate::posix::escalate_protocol::EscalateAction;
/// Decides what action to take in response to an execve request from a client.
#[async_trait::async_trait]
pub(crate) trait EscalationPolicy: Send + Sync {
async fn determine_action(
&self,
file: &Path,
argv: &[String],
workdir: &Path,
) -> Result<EscalateAction, rmcp::ErrorData>;
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/src/posix/stopwatch.rs | codex-rs/exec-server/src/posix/stopwatch.rs | use std::future::Future;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use tokio::sync::Mutex;
use tokio::sync::Notify;
use tokio_util::sync::CancellationToken;
#[derive(Clone, Debug)]
pub(crate) struct Stopwatch {
limit: Duration,
inner: Arc<Mutex<StopwatchState>>,
notify: Arc<Notify>,
}
#[derive(Debug)]
struct StopwatchState {
elapsed: Duration,
running_since: Option<Instant>,
active_pauses: u32,
}
impl Stopwatch {
pub(crate) fn new(limit: Duration) -> Self {
Self {
inner: Arc::new(Mutex::new(StopwatchState {
elapsed: Duration::ZERO,
running_since: Some(Instant::now()),
active_pauses: 0,
})),
notify: Arc::new(Notify::new()),
limit,
}
}
pub(crate) fn cancellation_token(&self) -> CancellationToken {
let limit = self.limit;
let token = CancellationToken::new();
let cancel = token.clone();
let inner = Arc::clone(&self.inner);
let notify = Arc::clone(&self.notify);
tokio::spawn(async move {
loop {
let (remaining, running) = {
let guard = inner.lock().await;
let elapsed = guard.elapsed
+ guard
.running_since
.map(|since| since.elapsed())
.unwrap_or_default();
if elapsed >= limit {
break;
}
(limit - elapsed, guard.running_since.is_some())
};
if !running {
notify.notified().await;
continue;
}
let sleep = tokio::time::sleep(remaining);
tokio::pin!(sleep);
tokio::select! {
_ = &mut sleep => {
break;
}
_ = notify.notified() => {
continue;
}
}
}
cancel.cancel();
});
token
}
/// Runs `fut`, pausing the stopwatch while the future is pending. The clock
/// resumes automatically when the future completes. Nested/overlapping
/// calls are reference-counted so the stopwatch only resumes when every
/// pause is lifted.
pub(crate) async fn pause_for<F, T>(&self, fut: F) -> T
where
F: Future<Output = T>,
{
self.pause().await;
let result = fut.await;
self.resume().await;
result
}
async fn pause(&self) {
let mut guard = self.inner.lock().await;
guard.active_pauses += 1;
if guard.active_pauses == 1
&& let Some(since) = guard.running_since.take()
{
guard.elapsed += since.elapsed();
self.notify.notify_waiters();
}
}
async fn resume(&self) {
let mut guard = self.inner.lock().await;
if guard.active_pauses == 0 {
return;
}
guard.active_pauses -= 1;
if guard.active_pauses == 0 && guard.running_since.is_none() {
guard.running_since = Some(Instant::now());
self.notify.notify_waiters();
}
}
}
#[cfg(test)]
mod tests {
use super::Stopwatch;
use tokio::time::Duration;
use tokio::time::Instant;
use tokio::time::sleep;
use tokio::time::timeout;
#[tokio::test]
async fn cancellation_receiver_fires_after_limit() {
let stopwatch = Stopwatch::new(Duration::from_millis(50));
let token = stopwatch.cancellation_token();
let start = Instant::now();
token.cancelled().await;
assert!(start.elapsed() >= Duration::from_millis(50));
}
#[tokio::test]
async fn pause_prevents_timeout_until_resumed() {
let stopwatch = Stopwatch::new(Duration::from_millis(50));
let token = stopwatch.cancellation_token();
let pause_handle = tokio::spawn({
let stopwatch = stopwatch.clone();
async move {
stopwatch
.pause_for(async {
sleep(Duration::from_millis(100)).await;
})
.await;
}
});
assert!(
timeout(Duration::from_millis(30), token.cancelled())
.await
.is_err()
);
pause_handle.await.expect("pause task should finish");
token.cancelled().await;
}
#[tokio::test]
async fn overlapping_pauses_only_resume_once() {
let stopwatch = Stopwatch::new(Duration::from_millis(50));
let token = stopwatch.cancellation_token();
// First pause.
let pause1 = {
let stopwatch = stopwatch.clone();
tokio::spawn(async move {
stopwatch
.pause_for(async {
sleep(Duration::from_millis(80)).await;
})
.await;
})
};
// Overlapping pause that ends sooner.
let pause2 = {
let stopwatch = stopwatch.clone();
tokio::spawn(async move {
stopwatch
.pause_for(async {
sleep(Duration::from_millis(30)).await;
})
.await;
})
};
// While both pauses are active, the cancellation should not fire.
assert!(
timeout(Duration::from_millis(40), token.cancelled())
.await
.is_err()
);
pause2.await.expect("short pause should complete");
// Still paused because the long pause is active.
assert!(
timeout(Duration::from_millis(30), token.cancelled())
.await
.is_err()
);
pause1.await.expect("long pause should complete");
// Now the stopwatch should resume and hit the limit shortly after.
token.cancelled().await;
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/src/posix/escalate_protocol.rs | codex-rs/exec-server/src/posix/escalate_protocol.rs | use std::collections::HashMap;
use std::os::fd::RawFd;
use std::path::PathBuf;
use serde::Deserialize;
use serde::Serialize;
/// 'exec-server escalate' reads this to find the inherited FD for the escalate socket.
pub(super) const ESCALATE_SOCKET_ENV_VAR: &str = "CODEX_ESCALATE_SOCKET";
/// The patched bash uses this to wrap exec() calls.
pub(super) const BASH_EXEC_WRAPPER_ENV_VAR: &str = "BASH_EXEC_WRAPPER";
/// The client sends this to the server to request an exec() call.
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)]
pub(super) struct EscalateRequest {
/// The absolute path to the executable to run, i.e. the first arg to exec.
pub(super) file: PathBuf,
/// The argv, including the program name (argv[0]).
pub(super) argv: Vec<String>,
pub(super) workdir: PathBuf,
pub(super) env: HashMap<String, String>,
}
/// The server sends this to the client to respond to an exec() request.
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)]
pub(super) struct EscalateResponse {
pub(super) action: EscalateAction,
}
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)]
pub(super) enum EscalateAction {
/// The command should be run directly by the client.
Run,
/// The command should be escalated to the server for execution.
Escalate,
/// The command should not be executed.
Deny { reason: Option<String> },
}
/// The client sends this to the server to forward its open FDs.
#[derive(Clone, Serialize, Deserialize, Debug)]
pub(super) struct SuperExecMessage {
pub(super) fds: Vec<RawFd>,
}
/// The server responds when the exec()'d command has exited.
#[derive(Clone, Serialize, Deserialize, Debug)]
pub(super) struct SuperExecResult {
pub(super) exit_code: i32,
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/src/posix/escalate_client.rs | codex-rs/exec-server/src/posix/escalate_client.rs | use std::io;
use std::os::fd::AsRawFd;
use std::os::fd::FromRawFd as _;
use std::os::fd::OwnedFd;
use anyhow::Context as _;
use crate::posix::escalate_protocol::BASH_EXEC_WRAPPER_ENV_VAR;
use crate::posix::escalate_protocol::ESCALATE_SOCKET_ENV_VAR;
use crate::posix::escalate_protocol::EscalateAction;
use crate::posix::escalate_protocol::EscalateRequest;
use crate::posix::escalate_protocol::EscalateResponse;
use crate::posix::escalate_protocol::SuperExecMessage;
use crate::posix::escalate_protocol::SuperExecResult;
use crate::posix::socket::AsyncDatagramSocket;
use crate::posix::socket::AsyncSocket;
fn get_escalate_client() -> anyhow::Result<AsyncDatagramSocket> {
// TODO: we should defensively require only calling this once, since AsyncSocket will take ownership of the fd.
let client_fd = std::env::var(ESCALATE_SOCKET_ENV_VAR)?.parse::<i32>()?;
if client_fd < 0 {
return Err(anyhow::anyhow!(
"{ESCALATE_SOCKET_ENV_VAR} is not a valid file descriptor: {client_fd}"
));
}
Ok(unsafe { AsyncDatagramSocket::from_raw_fd(client_fd) }?)
}
pub(crate) async fn run(file: String, argv: Vec<String>) -> anyhow::Result<i32> {
let handshake_client = get_escalate_client()?;
let (server, client) = AsyncSocket::pair()?;
const HANDSHAKE_MESSAGE: [u8; 1] = [0];
handshake_client
.send_with_fds(&HANDSHAKE_MESSAGE, &[server.into_inner().into()])
.await
.context("failed to send handshake datagram")?;
let env = std::env::vars()
.filter(|(k, _)| {
!matches!(
k.as_str(),
ESCALATE_SOCKET_ENV_VAR | BASH_EXEC_WRAPPER_ENV_VAR
)
})
.collect();
client
.send(EscalateRequest {
file: file.clone().into(),
argv: argv.clone(),
workdir: std::env::current_dir()?,
env,
})
.await
.context("failed to send EscalateRequest")?;
let message = client
.receive::<EscalateResponse>()
.await
.context("failed to receive EscalateResponse")?;
match message.action {
EscalateAction::Escalate => {
// TODO: maybe we should send ALL open FDs (except the escalate client)?
let fds_to_send = [
unsafe { OwnedFd::from_raw_fd(io::stdin().as_raw_fd()) },
unsafe { OwnedFd::from_raw_fd(io::stdout().as_raw_fd()) },
unsafe { OwnedFd::from_raw_fd(io::stderr().as_raw_fd()) },
];
// TODO: also forward signals over the super-exec socket
client
.send_with_fds(
SuperExecMessage {
fds: fds_to_send.iter().map(AsRawFd::as_raw_fd).collect(),
},
&fds_to_send,
)
.await
.context("failed to send SuperExecMessage")?;
let SuperExecResult { exit_code } = client.receive::<SuperExecResult>().await?;
Ok(exit_code)
}
EscalateAction::Run => {
// We avoid std::process::Command here because we want to be as transparent as
// possible. std::os::unix::process::CommandExt has .exec() but it does some funky
// stuff with signal masks and dup2() on its standard FDs, which we don't want.
use std::ffi::CString;
let file = CString::new(file).context("NUL in file")?;
let argv_cstrs: Vec<CString> = argv
.iter()
.map(|s| CString::new(s.as_str()).context("NUL in argv"))
.collect::<Result<Vec<_>, _>>()?;
let mut argv: Vec<*const libc::c_char> =
argv_cstrs.iter().map(|s| s.as_ptr()).collect();
argv.push(std::ptr::null());
let err = unsafe {
libc::execv(file.as_ptr(), argv.as_ptr());
std::io::Error::last_os_error()
};
Err(err.into())
}
EscalateAction::Deny { reason } => {
match reason {
Some(reason) => eprintln!("Execution denied: {reason}"),
None => eprintln!("Execution denied"),
}
Ok(1)
}
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/src/posix/escalate_server.rs | codex-rs/exec-server/src/posix/escalate_server.rs | use std::collections::HashMap;
use std::os::fd::AsRawFd;
use std::path::PathBuf;
use std::process::Stdio;
use std::sync::Arc;
use std::time::Duration;
use anyhow::Context as _;
use path_absolutize::Absolutize as _;
use codex_core::SandboxState;
use codex_core::exec::process_exec_tool_call;
use codex_core::sandboxing::SandboxPermissions;
use tokio::process::Command;
use tokio_util::sync::CancellationToken;
use crate::posix::escalate_protocol::BASH_EXEC_WRAPPER_ENV_VAR;
use crate::posix::escalate_protocol::ESCALATE_SOCKET_ENV_VAR;
use crate::posix::escalate_protocol::EscalateAction;
use crate::posix::escalate_protocol::EscalateRequest;
use crate::posix::escalate_protocol::EscalateResponse;
use crate::posix::escalate_protocol::SuperExecMessage;
use crate::posix::escalate_protocol::SuperExecResult;
use crate::posix::escalation_policy::EscalationPolicy;
use crate::posix::mcp::ExecParams;
use crate::posix::socket::AsyncDatagramSocket;
use crate::posix::socket::AsyncSocket;
use codex_core::exec::ExecExpiration;
pub(crate) struct EscalateServer {
bash_path: PathBuf,
execve_wrapper: PathBuf,
policy: Arc<dyn EscalationPolicy>,
}
impl EscalateServer {
pub fn new<P>(bash_path: PathBuf, execve_wrapper: PathBuf, policy: P) -> Self
where
P: EscalationPolicy + Send + Sync + 'static,
{
Self {
bash_path,
execve_wrapper,
policy: Arc::new(policy),
}
}
pub async fn exec(
&self,
params: ExecParams,
cancel_rx: CancellationToken,
sandbox_state: &SandboxState,
) -> anyhow::Result<ExecResult> {
let (escalate_server, escalate_client) = AsyncDatagramSocket::pair()?;
let client_socket = escalate_client.into_inner();
client_socket.set_cloexec(false)?;
let escalate_task = tokio::spawn(escalate_task(escalate_server, self.policy.clone()));
let mut env = std::env::vars().collect::<HashMap<String, String>>();
env.insert(
ESCALATE_SOCKET_ENV_VAR.to_string(),
client_socket.as_raw_fd().to_string(),
);
env.insert(
BASH_EXEC_WRAPPER_ENV_VAR.to_string(),
self.execve_wrapper.to_string_lossy().to_string(),
);
let ExecParams {
command,
workdir,
timeout_ms: _,
login,
} = params;
let result = process_exec_tool_call(
codex_core::exec::ExecParams {
command: vec![
self.bash_path.to_string_lossy().to_string(),
if login == Some(false) {
"-c".to_string()
} else {
"-lc".to_string()
},
command,
],
cwd: PathBuf::from(&workdir),
expiration: ExecExpiration::Cancellation(cancel_rx),
env,
sandbox_permissions: SandboxPermissions::UseDefault,
justification: None,
arg0: None,
},
&sandbox_state.sandbox_policy,
&sandbox_state.sandbox_cwd,
&sandbox_state.codex_linux_sandbox_exe,
None,
)
.await?;
escalate_task.abort();
let result = ExecResult {
exit_code: result.exit_code,
output: result.aggregated_output.text,
duration: result.duration,
timed_out: result.timed_out,
};
Ok(result)
}
}
async fn escalate_task(
socket: AsyncDatagramSocket,
policy: Arc<dyn EscalationPolicy>,
) -> anyhow::Result<()> {
loop {
let (_, mut fds) = socket.receive_with_fds().await?;
if fds.len() != 1 {
tracing::error!("expected 1 fd in datagram handshake, got {}", fds.len());
continue;
}
let stream_socket = AsyncSocket::from_fd(fds.remove(0))?;
let policy = policy.clone();
tokio::spawn(async move {
if let Err(err) = handle_escalate_session_with_policy(stream_socket, policy).await {
tracing::error!("escalate session failed: {err:?}");
}
});
}
}
#[derive(Debug)]
pub(crate) struct ExecResult {
pub(crate) exit_code: i32,
pub(crate) output: String,
pub(crate) duration: Duration,
pub(crate) timed_out: bool,
}
async fn handle_escalate_session_with_policy(
socket: AsyncSocket,
policy: Arc<dyn EscalationPolicy>,
) -> anyhow::Result<()> {
let EscalateRequest {
file,
argv,
workdir,
env,
} = socket.receive::<EscalateRequest>().await?;
let file = PathBuf::from(&file).absolutize()?.into_owned();
let workdir = PathBuf::from(&workdir).absolutize()?.into_owned();
let action = policy
.determine_action(file.as_path(), &argv, &workdir)
.await?;
tracing::debug!("decided {action:?} for {file:?} {argv:?} {workdir:?}");
match action {
EscalateAction::Run => {
socket
.send(EscalateResponse {
action: EscalateAction::Run,
})
.await?;
}
EscalateAction::Escalate => {
socket
.send(EscalateResponse {
action: EscalateAction::Escalate,
})
.await?;
let (msg, fds) = socket
.receive_with_fds::<SuperExecMessage>()
.await
.context("failed to receive SuperExecMessage")?;
if fds.len() != msg.fds.len() {
return Err(anyhow::anyhow!(
"mismatched number of fds in SuperExecMessage: {} in the message, {} from the control message",
msg.fds.len(),
fds.len()
));
}
if msg
.fds
.iter()
.any(|src_fd| fds.iter().any(|dst_fd| dst_fd.as_raw_fd() == *src_fd))
{
return Err(anyhow::anyhow!(
"overlapping fds not yet supported in SuperExecMessage"
));
}
let mut command = Command::new(file);
command
.args(&argv[1..])
.arg0(argv[0].clone())
.envs(&env)
.current_dir(&workdir)
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null());
unsafe {
command.pre_exec(move || {
for (dst_fd, src_fd) in msg.fds.iter().zip(&fds) {
libc::dup2(src_fd.as_raw_fd(), *dst_fd);
}
Ok(())
});
}
let mut child = command.spawn()?;
let exit_status = child.wait().await?;
socket
.send(SuperExecResult {
exit_code: exit_status.code().unwrap_or(127),
})
.await?;
}
EscalateAction::Deny { reason } => {
socket
.send(EscalateResponse {
action: EscalateAction::Deny { reason },
})
.await?;
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
struct DeterministicEscalationPolicy {
action: EscalateAction,
}
#[async_trait::async_trait]
impl EscalationPolicy for DeterministicEscalationPolicy {
async fn determine_action(
&self,
_file: &Path,
_argv: &[String],
_workdir: &Path,
) -> Result<EscalateAction, rmcp::ErrorData> {
Ok(self.action.clone())
}
}
#[tokio::test]
async fn handle_escalate_session_respects_run_in_sandbox_decision() -> anyhow::Result<()> {
let (server, client) = AsyncSocket::pair()?;
let server_task = tokio::spawn(handle_escalate_session_with_policy(
server,
Arc::new(DeterministicEscalationPolicy {
action: EscalateAction::Run,
}),
));
let mut env = HashMap::new();
for i in 0..10 {
let value = "A".repeat(1024);
env.insert(format!("CODEX_TEST_VAR{i}"), value);
}
client
.send(EscalateRequest {
file: PathBuf::from("/bin/echo"),
argv: vec!["echo".to_string()],
workdir: PathBuf::from("/tmp"),
env,
})
.await?;
let response = client.receive::<EscalateResponse>().await?;
assert_eq!(
EscalateResponse {
action: EscalateAction::Run,
},
response
);
server_task.await?
}
#[tokio::test]
async fn handle_escalate_session_executes_escalated_command() -> anyhow::Result<()> {
let (server, client) = AsyncSocket::pair()?;
let server_task = tokio::spawn(handle_escalate_session_with_policy(
server,
Arc::new(DeterministicEscalationPolicy {
action: EscalateAction::Escalate,
}),
));
client
.send(EscalateRequest {
file: PathBuf::from("/bin/sh"),
argv: vec![
"sh".to_string(),
"-c".to_string(),
r#"if [ "$KEY" = VALUE ]; then exit 42; else exit 1; fi"#.to_string(),
],
workdir: std::env::current_dir()?,
env: HashMap::from([("KEY".to_string(), "VALUE".to_string())]),
})
.await?;
let response = client.receive::<EscalateResponse>().await?;
assert_eq!(
EscalateResponse {
action: EscalateAction::Escalate,
},
response
);
client
.send_with_fds(SuperExecMessage { fds: Vec::new() }, &[])
.await?;
let result = client.receive::<SuperExecResult>().await?;
assert_eq!(42, result.exit_code);
server_task.await?
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/src/posix/socket.rs | codex-rs/exec-server/src/posix/socket.rs | use libc::c_uint;
use serde::Deserialize;
use serde::Serialize;
use socket2::Domain;
use socket2::MaybeUninitSlice;
use socket2::MsgHdr;
use socket2::MsgHdrMut;
use socket2::Socket;
use socket2::Type;
use std::io::IoSlice;
use std::mem::MaybeUninit;
use std::os::fd::AsRawFd;
use std::os::fd::FromRawFd;
use std::os::fd::OwnedFd;
use std::os::fd::RawFd;
use tokio::io::Interest;
use tokio::io::unix::AsyncFd;
const MAX_FDS_PER_MESSAGE: usize = 16;
const LENGTH_PREFIX_SIZE: usize = size_of::<u32>();
const MAX_DATAGRAM_SIZE: usize = 8192;
/// Converts a slice of MaybeUninit<T> to a slice of T.
///
/// The caller guarantees that every element of `buf` is initialized.
fn assume_init<T>(buf: &[MaybeUninit<T>]) -> &[T] {
unsafe { std::slice::from_raw_parts(buf.as_ptr().cast(), buf.len()) }
}
fn assume_init_slice<T, const N: usize>(buf: &[MaybeUninit<T>; N]) -> &[T; N] {
unsafe { &*(buf as *const [MaybeUninit<T>; N] as *const [T; N]) }
}
fn assume_init_vec<T>(mut buf: Vec<MaybeUninit<T>>) -> Vec<T> {
unsafe {
let ptr = buf.as_mut_ptr() as *mut T;
let len = buf.len();
let cap = buf.capacity();
std::mem::forget(buf);
Vec::from_raw_parts(ptr, len, cap)
}
}
fn control_space_for_fds(count: usize) -> usize {
unsafe { libc::CMSG_SPACE((count * size_of::<RawFd>()) as _) as usize }
}
/// Extracts the FDs from a SCM_RIGHTS control message.
fn extract_fds(control: &[u8]) -> Vec<OwnedFd> {
let mut fds = Vec::new();
let mut hdr: libc::msghdr = unsafe { std::mem::zeroed() };
hdr.msg_control = control.as_ptr() as *mut libc::c_void;
hdr.msg_controllen = control.len() as _;
let hdr = hdr; // drop mut
let mut cmsg = unsafe { libc::CMSG_FIRSTHDR(&hdr) as *const libc::cmsghdr };
while !cmsg.is_null() {
let level = unsafe { (*cmsg).cmsg_level };
let ty = unsafe { (*cmsg).cmsg_type };
if level == libc::SOL_SOCKET && ty == libc::SCM_RIGHTS {
let data_ptr = unsafe { libc::CMSG_DATA(cmsg).cast::<RawFd>() };
let fd_count: usize = {
let cmsg_data_len =
unsafe { (*cmsg).cmsg_len as usize } - unsafe { libc::CMSG_LEN(0) as usize };
cmsg_data_len / size_of::<RawFd>()
};
for i in 0..fd_count {
let fd = unsafe { data_ptr.add(i).read() };
fds.push(unsafe { OwnedFd::from_raw_fd(fd) });
}
}
cmsg = unsafe { libc::CMSG_NXTHDR(&hdr, cmsg) };
}
fds
}
/// Read a frame from a SOCK_STREAM socket.
///
/// A frame is a message length prefix followed by a payload. FDs may be included in the control
/// message when receiving the frame header.
async fn read_frame(async_socket: &AsyncFd<Socket>) -> std::io::Result<(Vec<u8>, Vec<OwnedFd>)> {
let (message_len, fds) = read_frame_header(async_socket).await?;
let payload = read_frame_payload(async_socket, message_len).await?;
Ok((payload, fds))
}
/// Read the frame header (i.e. length) and any FDs from a SOCK_STREAM socket.
async fn read_frame_header(
async_socket: &AsyncFd<Socket>,
) -> std::io::Result<(usize, Vec<OwnedFd>)> {
let mut header = [MaybeUninit::<u8>::uninit(); LENGTH_PREFIX_SIZE];
let mut filled = 0;
let mut control = vec![MaybeUninit::<u8>::uninit(); control_space_for_fds(MAX_FDS_PER_MESSAGE)];
let mut captured_control = false;
while filled < LENGTH_PREFIX_SIZE {
let mut guard = async_socket.readable().await?;
// The first read should come with a control message containing any FDs.
let result = if !captured_control {
guard.try_io(|inner| {
let mut bufs = [MaybeUninitSlice::new(&mut header[filled..])];
let (read, control_len) = {
let mut msg = MsgHdrMut::new()
.with_buffers(&mut bufs)
.with_control(&mut control);
let read = inner.get_ref().recvmsg(&mut msg, 0)?;
(read, msg.control_len())
};
control.truncate(control_len);
captured_control = true;
Ok(read)
})
} else {
guard.try_io(|inner| inner.get_ref().recv(&mut header[filled..]))
};
let Ok(result) = result else {
// Would block, try again.
continue;
};
let read = result?;
if read == 0 {
return Err(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
"socket closed while receiving frame header",
));
}
filled += read;
assert!(filled <= LENGTH_PREFIX_SIZE);
if filled == LENGTH_PREFIX_SIZE {
let len_bytes = assume_init_slice(&header);
let payload_len = u32::from_le_bytes(*len_bytes) as usize;
let fds = extract_fds(assume_init(&control));
return Ok((payload_len, fds));
}
}
unreachable!("header loop always returns")
}
/// Read `message_len` bytes from a SOCK_STREAM socket.
async fn read_frame_payload(
async_socket: &AsyncFd<Socket>,
message_len: usize,
) -> std::io::Result<Vec<u8>> {
if message_len == 0 {
return Ok(Vec::new());
}
let mut payload = vec![MaybeUninit::<u8>::uninit(); message_len];
let mut filled = 0;
while filled < message_len {
let mut guard = async_socket.readable().await?;
let result = guard.try_io(|inner| inner.get_ref().recv(&mut payload[filled..]));
let Ok(result) = result else {
// Would block, try again.
continue;
};
let read = result?;
if read == 0 {
return Err(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
"socket closed while receiving frame payload",
));
}
filled += read;
assert!(filled <= message_len);
if filled == message_len {
return Ok(assume_init_vec(payload));
}
}
unreachable!("loop exits only after returning payload")
}
fn send_datagram_bytes(socket: &Socket, data: &[u8], fds: &[OwnedFd]) -> std::io::Result<()> {
let control = make_control_message(fds)?;
let payload = [IoSlice::new(data)];
let msg = if control.is_empty() {
MsgHdr::new().with_buffers(&payload)
} else {
MsgHdr::new().with_buffers(&payload).with_control(&control)
};
let written = socket.sendmsg(&msg, 0)?;
if written != data.len() {
return Err(std::io::Error::new(
std::io::ErrorKind::WriteZero,
format!(
"short datagram write: wrote {written} bytes out of {}",
data.len()
),
));
}
Ok(())
}
fn encode_length(len: usize) -> std::io::Result<[u8; LENGTH_PREFIX_SIZE]> {
let len_u32 = u32::try_from(len).map_err(|_| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("message too large: {len}"),
)
})?;
Ok(len_u32.to_le_bytes())
}
fn make_control_message(fds: &[OwnedFd]) -> std::io::Result<Vec<u8>> {
if fds.len() > MAX_FDS_PER_MESSAGE {
Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("too many fds: {}", fds.len()),
))
} else if fds.is_empty() {
Ok(Vec::new())
} else {
let mut control = vec![0u8; control_space_for_fds(fds.len())];
unsafe {
let cmsg = control.as_mut_ptr().cast::<libc::cmsghdr>();
(*cmsg).cmsg_len =
libc::CMSG_LEN(size_of::<RawFd>() as c_uint * fds.len() as c_uint) as _;
(*cmsg).cmsg_level = libc::SOL_SOCKET;
(*cmsg).cmsg_type = libc::SCM_RIGHTS;
let data_ptr = libc::CMSG_DATA(cmsg).cast::<RawFd>();
for (i, fd) in fds.iter().enumerate() {
data_ptr.add(i).write(fd.as_raw_fd());
}
}
Ok(control)
}
}
fn receive_datagram_bytes(socket: &Socket) -> std::io::Result<(Vec<u8>, Vec<OwnedFd>)> {
let mut buffer = vec![MaybeUninit::<u8>::uninit(); MAX_DATAGRAM_SIZE];
let mut control = vec![MaybeUninit::<u8>::uninit(); control_space_for_fds(MAX_FDS_PER_MESSAGE)];
let (read, control_len) = {
let mut bufs = [MaybeUninitSlice::new(&mut buffer)];
let mut msg = MsgHdrMut::new()
.with_buffers(&mut bufs)
.with_control(&mut control);
let read = socket.recvmsg(&mut msg, 0)?;
(read, msg.control_len())
};
let data = assume_init(&buffer[..read]).to_vec();
let fds = extract_fds(assume_init(&control[..control_len]));
Ok((data, fds))
}
pub(crate) struct AsyncSocket {
inner: AsyncFd<Socket>,
}
impl AsyncSocket {
fn new(socket: Socket) -> std::io::Result<AsyncSocket> {
socket.set_nonblocking(true)?;
let async_socket = AsyncFd::new(socket)?;
Ok(AsyncSocket {
inner: async_socket,
})
}
pub fn from_fd(fd: OwnedFd) -> std::io::Result<AsyncSocket> {
AsyncSocket::new(Socket::from(fd))
}
pub fn pair() -> std::io::Result<(AsyncSocket, AsyncSocket)> {
let (server, client) = Socket::pair(Domain::UNIX, Type::STREAM, None)?;
Ok((AsyncSocket::new(server)?, AsyncSocket::new(client)?))
}
pub async fn send_with_fds<T: Serialize>(
&self,
msg: T,
fds: &[OwnedFd],
) -> std::io::Result<()> {
let payload = serde_json::to_vec(&msg)?;
let mut frame = Vec::with_capacity(LENGTH_PREFIX_SIZE + payload.len());
frame.extend_from_slice(&encode_length(payload.len())?);
frame.extend_from_slice(&payload);
send_stream_frame(&self.inner, &frame, fds).await
}
pub async fn receive_with_fds<T: for<'de> Deserialize<'de>>(
&self,
) -> std::io::Result<(T, Vec<OwnedFd>)> {
let (payload, fds) = read_frame(&self.inner).await?;
let message: T = serde_json::from_slice(&payload)?;
Ok((message, fds))
}
pub async fn send<T>(&self, msg: T) -> std::io::Result<()>
where
T: Serialize,
{
self.send_with_fds(&msg, &[]).await
}
pub async fn receive<T: for<'de> Deserialize<'de>>(&self) -> std::io::Result<T> {
let (msg, fds) = self.receive_with_fds().await?;
if !fds.is_empty() {
tracing::warn!("unexpected fds in receive: {}", fds.len());
}
Ok(msg)
}
pub fn into_inner(self) -> Socket {
self.inner.into_inner()
}
}
async fn send_stream_frame(
socket: &AsyncFd<Socket>,
frame: &[u8],
fds: &[OwnedFd],
) -> std::io::Result<()> {
let mut written = 0;
let mut include_fds = !fds.is_empty();
while written < frame.len() {
let mut guard = socket.writable().await?;
let result = guard.try_io(|inner| {
send_stream_chunk(inner.get_ref(), &frame[written..], fds, include_fds)
});
let bytes_written = match result {
Ok(bytes_written) => bytes_written?,
Err(_would_block) => continue,
};
if bytes_written == 0 {
return Err(std::io::Error::new(
std::io::ErrorKind::WriteZero,
"socket closed while sending frame payload",
));
}
written += bytes_written;
include_fds = false;
}
Ok(())
}
fn send_stream_chunk(
socket: &Socket,
frame: &[u8],
fds: &[OwnedFd],
include_fds: bool,
) -> std::io::Result<usize> {
let control = if include_fds {
make_control_message(fds)?
} else {
Vec::new()
};
let payload = [IoSlice::new(frame)];
let msg = if control.is_empty() {
MsgHdr::new().with_buffers(&payload)
} else {
MsgHdr::new().with_buffers(&payload).with_control(&control)
};
socket.sendmsg(&msg, 0)
}
pub(crate) struct AsyncDatagramSocket {
inner: AsyncFd<Socket>,
}
impl AsyncDatagramSocket {
fn new(socket: Socket) -> std::io::Result<Self> {
socket.set_nonblocking(true)?;
Ok(Self {
inner: AsyncFd::new(socket)?,
})
}
pub unsafe fn from_raw_fd(fd: RawFd) -> std::io::Result<Self> {
Self::new(unsafe { Socket::from_raw_fd(fd) })
}
pub fn pair() -> std::io::Result<(Self, Self)> {
let (server, client) = Socket::pair(Domain::UNIX, Type::DGRAM, None)?;
Ok((Self::new(server)?, Self::new(client)?))
}
pub async fn send_with_fds(&self, data: &[u8], fds: &[OwnedFd]) -> std::io::Result<()> {
self.inner
.async_io(Interest::WRITABLE, |socket| {
send_datagram_bytes(socket, data, fds)
})
.await
}
pub async fn receive_with_fds(&self) -> std::io::Result<(Vec<u8>, Vec<OwnedFd>)> {
self.inner
.async_io(Interest::READABLE, receive_datagram_bytes)
.await
}
pub fn into_inner(self) -> Socket {
self.inner.into_inner()
}
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use serde::Deserialize;
use serde::Serialize;
use std::os::fd::AsFd;
use std::os::fd::AsRawFd;
use tempfile::NamedTempFile;
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
struct TestPayload {
id: i32,
label: String,
}
fn fd_list(count: usize) -> std::io::Result<Vec<OwnedFd>> {
let file = NamedTempFile::new()?;
let mut fds = Vec::new();
for _ in 0..count {
fds.push(file.as_fd().try_clone_to_owned()?);
}
Ok(fds)
}
#[tokio::test]
async fn async_socket_round_trips_payload_and_fds() -> std::io::Result<()> {
let (server, client) = AsyncSocket::pair()?;
let payload = TestPayload {
id: 7,
label: "round-trip".to_string(),
};
let send_fds = fd_list(1)?;
let receive_task =
tokio::spawn(async move { server.receive_with_fds::<TestPayload>().await });
client.send_with_fds(payload.clone(), &send_fds).await?;
drop(send_fds);
let (received_payload, received_fds) = receive_task.await.unwrap()?;
assert_eq!(payload, received_payload);
assert_eq!(1, received_fds.len());
let fd_status = unsafe { libc::fcntl(received_fds[0].as_raw_fd(), libc::F_GETFD) };
assert!(
fd_status >= 0,
"expected received file descriptor to be valid, but got {fd_status}",
);
Ok(())
}
#[tokio::test]
async fn async_socket_handles_large_payload() -> std::io::Result<()> {
let (server, client) = AsyncSocket::pair()?;
let payload = vec![b'A'; 10_000];
let receive_task = tokio::spawn(async move { server.receive::<Vec<u8>>().await });
client.send(payload.clone()).await?;
let received_payload = receive_task.await.unwrap()?;
assert_eq!(payload, received_payload);
Ok(())
}
#[tokio::test]
async fn async_datagram_sockets_round_trip_messages() -> std::io::Result<()> {
let (server, client) = AsyncDatagramSocket::pair()?;
let data = b"datagram payload".to_vec();
let send_fds = fd_list(1)?;
let receive_task = tokio::spawn(async move { server.receive_with_fds().await });
client.send_with_fds(&data, &send_fds).await?;
drop(send_fds);
let (received_bytes, received_fds) = receive_task.await.unwrap()?;
assert_eq!(data, received_bytes);
assert_eq!(1, received_fds.len());
Ok(())
}
#[test]
fn send_datagram_bytes_rejects_excessive_fd_counts() -> std::io::Result<()> {
let (socket, _peer) = Socket::pair(Domain::UNIX, Type::DGRAM, None)?;
let fds = fd_list(MAX_FDS_PER_MESSAGE + 1)?;
let err = send_datagram_bytes(&socket, b"hi", &fds).unwrap_err();
assert_eq!(std::io::ErrorKind::InvalidInput, err.kind());
Ok(())
}
#[test]
fn send_stream_chunk_rejects_excessive_fd_counts() -> std::io::Result<()> {
let (socket, _peer) = Socket::pair(Domain::UNIX, Type::STREAM, None)?;
let fds = fd_list(MAX_FDS_PER_MESSAGE + 1)?;
let err = send_stream_chunk(&socket, b"hello", &fds, true).unwrap_err();
assert_eq!(std::io::ErrorKind::InvalidInput, err.kind());
Ok(())
}
#[test]
fn encode_length_errors_for_oversized_messages() {
let err = encode_length(usize::MAX).unwrap_err();
assert_eq!(std::io::ErrorKind::InvalidInput, err.kind());
}
#[tokio::test]
async fn receive_fails_when_peer_closes_before_header() {
let (server, client) = AsyncSocket::pair().expect("failed to create socket pair");
drop(client);
let err = server
.receive::<serde_json::Value>()
.await
.expect_err("expected read failure");
assert_eq!(std::io::ErrorKind::UnexpectedEof, err.kind());
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/src/posix/mcp_escalation_policy.rs | codex-rs/exec-server/src/posix/mcp_escalation_policy.rs | use std::path::Path;
use codex_core::sandboxing::SandboxPermissions;
use codex_execpolicy::Policy;
use rmcp::ErrorData as McpError;
use rmcp::RoleServer;
use rmcp::model::CreateElicitationRequestParam;
use rmcp::model::CreateElicitationResult;
use rmcp::model::ElicitationAction;
use rmcp::model::ElicitationSchema;
use rmcp::service::RequestContext;
use crate::posix::escalate_protocol::EscalateAction;
use crate::posix::escalation_policy::EscalationPolicy;
use crate::posix::stopwatch::Stopwatch;
use std::sync::Arc;
use tokio::sync::RwLock;
#[derive(Debug, PartialEq, Eq)]
pub(crate) enum ExecPolicyOutcome {
Allow {
sandbox_permissions: SandboxPermissions,
},
Prompt {
sandbox_permissions: SandboxPermissions,
},
Forbidden,
}
/// ExecPolicy with access to the MCP RequestContext so that it can leverage
/// elicitations.
pub(crate) struct McpEscalationPolicy {
/// In-memory execpolicy rules that drive how to handle an exec() call.
policy: Arc<RwLock<Policy>>,
context: RequestContext<RoleServer>,
stopwatch: Stopwatch,
preserve_program_paths: bool,
}
impl McpEscalationPolicy {
pub(crate) fn new(
policy: Arc<RwLock<Policy>>,
context: RequestContext<RoleServer>,
stopwatch: Stopwatch,
preserve_program_paths: bool,
) -> Self {
Self {
policy,
context,
stopwatch,
preserve_program_paths,
}
}
async fn prompt(
&self,
file: &Path,
argv: &[String],
workdir: &Path,
context: RequestContext<RoleServer>,
) -> Result<CreateElicitationResult, McpError> {
let args = shlex::try_join(argv.iter().skip(1).map(String::as_str)).unwrap_or_default();
let command = if args.is_empty() {
file.display().to_string()
} else {
format!("{} {}", file.display(), args)
};
self.stopwatch
.pause_for(async {
context
.peer
.create_elicitation(CreateElicitationRequestParam {
message: format!(
"Allow agent to run `{command}` in `{}`?",
workdir.display()
),
requested_schema: ElicitationSchema::builder()
.title("Execution Permission Request")
.optional_string_with("reason", |schema| {
schema.description(
"Optional reason for allowing or denying execution",
)
})
.build()
.map_err(|e| {
McpError::internal_error(
format!("failed to build elicitation schema: {e}"),
None,
)
})?,
})
.await
.map_err(|e| McpError::internal_error(e.to_string(), None))
})
.await
}
}
#[async_trait::async_trait]
impl EscalationPolicy for McpEscalationPolicy {
async fn determine_action(
&self,
file: &Path,
argv: &[String],
workdir: &Path,
) -> Result<EscalateAction, rmcp::ErrorData> {
let policy = self.policy.read().await;
let outcome =
crate::posix::evaluate_exec_policy(&policy, file, argv, self.preserve_program_paths)?;
let action = match outcome {
ExecPolicyOutcome::Allow {
sandbox_permissions,
} => {
if sandbox_permissions.requires_escalated_permissions() {
EscalateAction::Escalate
} else {
EscalateAction::Run
}
}
ExecPolicyOutcome::Prompt {
sandbox_permissions,
} => {
let result = self
.prompt(file, argv, workdir, self.context.clone())
.await?;
// TODO: Extract reason from `result.content`.
match result.action {
ElicitationAction::Accept => {
if sandbox_permissions.requires_escalated_permissions() {
EscalateAction::Escalate
} else {
EscalateAction::Run
}
}
ElicitationAction::Decline => EscalateAction::Deny {
reason: Some("User declined execution".to_string()),
},
ElicitationAction::Cancel => EscalateAction::Deny {
reason: Some("User cancelled execution".to_string()),
},
}
}
ExecPolicyOutcome::Forbidden => EscalateAction::Deny {
reason: Some("Execution forbidden by policy".to_string()),
},
};
Ok(action)
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/src/bin/main_mcp_server.rs | codex-rs/exec-server/src/bin/main_mcp_server.rs | #[cfg(not(unix))]
fn main() {
eprintln!("codex-exec-mcp-server is only implemented for UNIX");
std::process::exit(1);
}
#[cfg(unix)]
pub use codex_exec_server::main_mcp_server as main;
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/src/bin/main_execve_wrapper.rs | codex-rs/exec-server/src/bin/main_execve_wrapper.rs | #[cfg(not(unix))]
fn main() {
eprintln!("codex-execve-wrapper is only implemented for UNIX");
std::process::exit(1);
}
#[cfg(unix)]
pub use codex_exec_server::main_execve_wrapper as main;
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/tests/all.rs | codex-rs/exec-server/tests/all.rs | // Single integration test binary that aggregates all test modules.
// The submodules live in `tests/suite/`.
mod suite;
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/tests/suite/list_tools.rs | codex-rs/exec-server/tests/suite/list_tools.rs | #![allow(clippy::unwrap_used, clippy::expect_used)]
use std::borrow::Cow;
use std::fs;
use std::sync::Arc;
use anyhow::Result;
use exec_server_test_support::create_transport;
use pretty_assertions::assert_eq;
use rmcp::ServiceExt;
use rmcp::model::Tool;
use rmcp::model::object;
use serde_json::json;
use tempfile::TempDir;
/// Verify the list_tools call to the MCP server returns the expected response.
#[tokio::test(flavor = "current_thread")]
async fn list_tools() -> Result<()> {
let codex_home = TempDir::new()?;
let policy_dir = codex_home.path().join("rules");
fs::create_dir_all(&policy_dir)?;
fs::write(
policy_dir.join("default.rules"),
r#"prefix_rule(pattern=["ls"], decision="prompt")"#,
)?;
let dotslash_cache_temp_dir = TempDir::new()?;
let dotslash_cache = dotslash_cache_temp_dir.path();
let transport = create_transport(codex_home.path(), dotslash_cache).await?;
let service = ().serve(transport).await?;
let tools = service.list_tools(Default::default()).await?.tools;
assert_eq!(
vec![Tool {
name: Cow::Borrowed("shell"),
title: None,
description: Some(Cow::Borrowed(
"Runs a shell command and returns its output. You MUST provide the workdir as an absolute path."
)),
input_schema: Arc::new(object(json!({
"$schema": "https://json-schema.org/draft/2020-12/schema",
"properties": {
"command": {
"description": "The bash string to execute.",
"type": "string",
},
"login": {
"description": "Launch Bash with -lc instead of -c: defaults to true.",
"nullable": true,
"type": "boolean",
},
"timeout_ms": {
"description": "The timeout for the command in milliseconds.",
"format": "uint64",
"minimum": 0,
"nullable": true,
"type": "integer",
},
"workdir": {
"description": "The working directory to execute the command in. Must be an absolute path.",
"type": "string",
},
},
"required": [
"command",
"workdir",
],
"title": "ExecParams",
"type": "object",
}))),
output_schema: None,
annotations: None,
icons: None,
meta: None
}],
tools
);
Ok(())
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/tests/suite/accept_elicitation.rs | codex-rs/exec-server/tests/suite/accept_elicitation.rs | #![allow(clippy::unwrap_used, clippy::expect_used)]
use std::borrow::Cow;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Mutex;
use anyhow::Context;
use anyhow::Result;
use anyhow::ensure;
use codex_exec_server::ExecResult;
use exec_server_test_support::InteractiveClient;
use exec_server_test_support::create_transport;
use exec_server_test_support::notify_readable_sandbox;
use exec_server_test_support::write_default_execpolicy;
use maplit::hashset;
use pretty_assertions::assert_eq;
use rmcp::ServiceExt;
use rmcp::model::CallToolRequestParam;
use rmcp::model::CallToolResult;
use rmcp::model::CreateElicitationRequestParam;
use rmcp::model::EmptyResult;
use rmcp::model::ServerResult;
use rmcp::model::object;
use serde_json::json;
use std::os::unix::fs::PermissionsExt;
use std::os::unix::fs::symlink;
use tempfile::TempDir;
use tokio::process::Command;
/// Verify that when using a read-only sandbox and an execpolicy that prompts,
/// the proper elicitation is sent. Upon auto-approving the elicitation, the
/// command should be run privileged outside the sandbox.
#[tokio::test(flavor = "current_thread")]
async fn accept_elicitation_for_prompt_rule() -> Result<()> {
// Configure a stdio transport that will launch the MCP server using
// $CODEX_HOME with an execpolicy that prompts for `git init` commands.
let codex_home = TempDir::new()?;
write_default_execpolicy(
r#"
# Create a rule with `decision = "prompt"` to exercise the elicitation flow.
prefix_rule(
pattern = ["git", "init"],
decision = "prompt",
match = [
"git init ."
],
)
"#,
codex_home.as_ref(),
)
.await?;
let dotslash_cache_temp_dir = TempDir::new()?;
let dotslash_cache = dotslash_cache_temp_dir.path();
let transport = create_transport(codex_home.as_ref(), dotslash_cache).await?;
// Create an MCP client that approves expected elicitation messages.
let project_root = TempDir::new()?;
let project_root_path = project_root.path().canonicalize().unwrap();
let git_path = resolve_git_path().await?;
let expected_elicitation_message = format!(
"Allow agent to run `{} init .` in `{}`?",
git_path,
project_root_path.display()
);
let elicitation_requests: Arc<Mutex<Vec<CreateElicitationRequestParam>>> = Default::default();
let client = InteractiveClient {
elicitations_to_accept: hashset! { expected_elicitation_message.clone() },
elicitation_requests: elicitation_requests.clone(),
};
// Start the MCP server.
let service: rmcp::service::RunningService<rmcp::RoleClient, InteractiveClient> =
client.serve(transport).await?;
// Notify the MCP server about the current sandbox state before making any
// `shell` tool calls.
let linux_sandbox_exe_folder = TempDir::new()?;
let codex_linux_sandbox_exe = if cfg!(target_os = "linux") {
let codex_linux_sandbox_exe = linux_sandbox_exe_folder.path().join("codex-linux-sandbox");
let codex_cli = ensure_codex_cli()?;
symlink(&codex_cli, &codex_linux_sandbox_exe)?;
Some(codex_linux_sandbox_exe)
} else {
None
};
let response =
notify_readable_sandbox(&project_root_path, codex_linux_sandbox_exe, &service).await?;
let ServerResult::EmptyResult(EmptyResult {}) = response else {
panic!("expected EmptyResult from sandbox state notification but found: {response:?}");
};
// Call the shell tool and verify that an elicitation was created and
// auto-approved.
let CallToolResult {
content, is_error, ..
} = service
.call_tool(CallToolRequestParam {
name: Cow::Borrowed("shell"),
arguments: Some(object(json!(
{
"login": false,
"command": "git init .",
"workdir": project_root_path.to_string_lossy(),
}
))),
})
.await?;
let tool_call_content = content
.first()
.expect("expected non-empty content")
.as_text()
.expect("expected text content");
let ExecResult {
exit_code, output, ..
} = serde_json::from_str::<ExecResult>(&tool_call_content.text)?;
let git_init_succeeded = format!(
"Initialized empty Git repository in {}/.git/\n",
project_root_path.display()
);
// Normally, this would be an exact match, but it might include extra output
// if `git config set advice.defaultBranchName false` has not been set.
assert!(
output.contains(&git_init_succeeded),
"expected output `{output}` to contain `{git_init_succeeded}`"
);
assert_eq!(exit_code, 0, "command should succeed");
assert_eq!(is_error, Some(false), "command should succeed");
assert!(
project_root_path.join(".git").is_dir(),
"git repo should exist"
);
let elicitation_messages = elicitation_requests
.lock()
.unwrap()
.iter()
.map(|r| r.message.clone())
.collect::<Vec<_>>();
assert_eq!(vec![expected_elicitation_message], elicitation_messages);
Ok(())
}
fn ensure_codex_cli() -> Result<PathBuf> {
let codex_cli = codex_utils_cargo_bin::cargo_bin("codex")?;
let metadata = codex_cli.metadata().with_context(|| {
format!(
"failed to read metadata for codex binary at {}",
codex_cli.display()
)
})?;
ensure!(
metadata.is_file(),
"expected codex binary at {} to be a file; run `cargo build -p codex-cli --bin codex` before this test",
codex_cli.display()
);
let mode = metadata.permissions().mode();
ensure!(
mode & 0o111 != 0,
"codex binary at {} is not executable (mode {mode:o}); run `cargo build -p codex-cli --bin codex` before this test",
codex_cli.display()
);
Ok(codex_cli)
}
async fn resolve_git_path() -> Result<String> {
let git = Command::new("bash")
.arg("-lc")
.arg("command -v git")
.output()
.await
.context("failed to resolve git via login shell")?;
ensure!(
git.status.success(),
"failed to resolve git via login shell: {}",
String::from_utf8_lossy(&git.stderr)
);
let git_path = String::from_utf8(git.stdout)
.context("git path was not valid utf8")?
.trim()
.to_string();
ensure!(!git_path.is_empty(), "git path should not be empty");
Ok(git_path)
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/tests/suite/mod.rs | codex-rs/exec-server/tests/suite/mod.rs | #[cfg(any(all(target_os = "macos", target_arch = "aarch64"), target_os = "linux"))]
mod accept_elicitation;
#[cfg(any(all(target_os = "macos", target_arch = "aarch64"), target_os = "linux"))]
mod list_tools;
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/exec-server/tests/common/lib.rs | codex-rs/exec-server/tests/common/lib.rs | use codex_core::MCP_SANDBOX_STATE_METHOD;
use codex_core::SandboxState;
use codex_core::protocol::SandboxPolicy;
use rmcp::ClientHandler;
use rmcp::ErrorData as McpError;
use rmcp::RoleClient;
use rmcp::Service;
use rmcp::model::ClientCapabilities;
use rmcp::model::ClientInfo;
use rmcp::model::ClientRequest;
use rmcp::model::CreateElicitationRequestParam;
use rmcp::model::CreateElicitationResult;
use rmcp::model::CustomRequest;
use rmcp::model::ElicitationAction;
use rmcp::model::ServerResult;
use rmcp::service::RunningService;
use rmcp::transport::ConfigureCommandExt;
use rmcp::transport::TokioChildProcess;
use serde_json::json;
use std::collections::HashSet;
use std::path::Path;
use std::path::PathBuf;
use std::process::Stdio;
use std::sync::Arc;
use std::sync::Mutex;
use tokio::process::Command;
pub async fn create_transport<P>(
codex_home: P,
dotslash_cache: P,
) -> anyhow::Result<TokioChildProcess>
where
P: AsRef<Path>,
{
let mcp_executable = codex_utils_cargo_bin::cargo_bin("codex-exec-mcp-server")?;
let execve_wrapper = codex_utils_cargo_bin::cargo_bin("codex-execve-wrapper")?;
// `bash` requires a special lookup when running under Buck because it is a
// _resource_ rather than a binary target.
let bash = if let Some(root) = codex_utils_cargo_bin::buck_project_root()? {
root.join("codex-rs/exec-server/tests/suite/bash")
} else {
Path::new(env!("CARGO_MANIFEST_DIR"))
.join("..")
.join("suite")
.join("bash")
};
// Need to ensure the artifact associated with the bash DotSlash file is
// available before it is run in a read-only sandbox.
let status = Command::new("dotslash")
.arg("--")
.arg("fetch")
.arg(bash.clone())
.env("DOTSLASH_CACHE", dotslash_cache.as_ref())
.status()
.await?;
assert!(status.success(), "dotslash fetch failed: {status:?}");
let transport = TokioChildProcess::new(Command::new(&mcp_executable).configure(|cmd| {
cmd.arg("--bash").arg(bash);
cmd.arg("--execve").arg(&execve_wrapper);
cmd.env("CODEX_HOME", codex_home.as_ref());
cmd.env("DOTSLASH_CACHE", dotslash_cache.as_ref());
// Important: pipe stdio so rmcp can speak JSON-RPC over stdin/stdout
cmd.stdin(Stdio::piped());
cmd.stdout(Stdio::piped());
// Optional but very helpful while debugging:
cmd.stderr(Stdio::inherit());
}))?;
Ok(transport)
}
pub async fn write_default_execpolicy<P>(policy: &str, codex_home: P) -> anyhow::Result<()>
where
P: AsRef<Path>,
{
let policy_dir = codex_home.as_ref().join("rules");
tokio::fs::create_dir_all(&policy_dir).await?;
tokio::fs::write(policy_dir.join("default.rules"), policy).await?;
Ok(())
}
pub async fn notify_readable_sandbox<P, S>(
sandbox_cwd: P,
codex_linux_sandbox_exe: Option<PathBuf>,
service: &RunningService<RoleClient, S>,
) -> anyhow::Result<ServerResult>
where
P: AsRef<Path>,
S: Service<RoleClient> + ClientHandler,
{
let sandbox_state = SandboxState {
sandbox_policy: SandboxPolicy::ReadOnly,
codex_linux_sandbox_exe,
sandbox_cwd: sandbox_cwd.as_ref().to_path_buf(),
};
send_sandbox_state_update(sandbox_state, service).await
}
pub async fn notify_writable_sandbox_only_one_folder<P, S>(
writable_folder: P,
codex_linux_sandbox_exe: Option<PathBuf>,
service: &RunningService<RoleClient, S>,
) -> anyhow::Result<ServerResult>
where
P: AsRef<Path>,
S: Service<RoleClient> + ClientHandler,
{
let sandbox_state = SandboxState {
sandbox_policy: SandboxPolicy::WorkspaceWrite {
// Note that sandbox_cwd will already be included as a writable root
// when the sandbox policy is expanded.
writable_roots: vec![],
network_access: false,
// Disable writes to temp dir because this is a test, so
// writable_folder is likely also under /tmp and we want to be
// strict about what is writable.
exclude_tmpdir_env_var: true,
exclude_slash_tmp: true,
},
codex_linux_sandbox_exe,
sandbox_cwd: writable_folder.as_ref().to_path_buf(),
};
send_sandbox_state_update(sandbox_state, service).await
}
async fn send_sandbox_state_update<S>(
sandbox_state: SandboxState,
service: &RunningService<RoleClient, S>,
) -> anyhow::Result<ServerResult>
where
S: Service<RoleClient> + ClientHandler,
{
let response = service
.send_request(ClientRequest::CustomRequest(CustomRequest::new(
MCP_SANDBOX_STATE_METHOD,
Some(serde_json::to_value(sandbox_state)?),
)))
.await?;
Ok(response)
}
pub struct InteractiveClient {
pub elicitations_to_accept: HashSet<String>,
pub elicitation_requests: Arc<Mutex<Vec<CreateElicitationRequestParam>>>,
}
impl ClientHandler for InteractiveClient {
fn get_info(&self) -> ClientInfo {
let capabilities = ClientCapabilities::builder().enable_elicitation().build();
ClientInfo {
capabilities,
..Default::default()
}
}
fn create_elicitation(
&self,
request: CreateElicitationRequestParam,
_context: rmcp::service::RequestContext<RoleClient>,
) -> impl std::future::Future<Output = Result<CreateElicitationResult, McpError>> + Send + '_
{
self.elicitation_requests
.lock()
.unwrap()
.push(request.clone());
let accept = self.elicitations_to_accept.contains(&request.message);
async move {
if accept {
Ok(CreateElicitationResult {
action: ElicitationAction::Accept,
content: Some(json!({ "approve": true })),
})
} else {
Ok(CreateElicitationResult {
action: ElicitationAction::Decline,
content: None,
})
}
}
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/feedback/src/lib.rs | codex-rs/feedback/src/lib.rs | use std::collections::BTreeMap;
use std::collections::VecDeque;
use std::collections::btree_map::Entry;
use std::fs;
use std::io::Write;
use std::io::{self};
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Mutex;
use std::time::Duration;
use anyhow::Result;
use anyhow::anyhow;
use codex_protocol::ConversationId;
use codex_protocol::protocol::SessionSource;
use tracing::Event;
use tracing::Level;
use tracing::field::Visit;
use tracing_subscriber::Layer;
use tracing_subscriber::filter::Targets;
use tracing_subscriber::fmt::writer::MakeWriter;
use tracing_subscriber::registry::LookupSpan;
const DEFAULT_MAX_BYTES: usize = 4 * 1024 * 1024; // 4 MiB
const SENTRY_DSN: &str =
"https://ae32ed50620d7a7792c1ce5df38b3e3e@o33249.ingest.us.sentry.io/4510195390611458";
const UPLOAD_TIMEOUT_SECS: u64 = 10;
const FEEDBACK_TAGS_TARGET: &str = "feedback_tags";
const MAX_FEEDBACK_TAGS: usize = 64;
#[derive(Clone)]
pub struct CodexFeedback {
inner: Arc<FeedbackInner>,
}
impl Default for CodexFeedback {
fn default() -> Self {
Self::new()
}
}
impl CodexFeedback {
pub fn new() -> Self {
Self::with_capacity(DEFAULT_MAX_BYTES)
}
pub(crate) fn with_capacity(max_bytes: usize) -> Self {
Self {
inner: Arc::new(FeedbackInner::new(max_bytes)),
}
}
pub fn make_writer(&self) -> FeedbackMakeWriter {
FeedbackMakeWriter {
inner: self.inner.clone(),
}
}
/// Returns a [`tracing_subscriber`] layer that captures full-fidelity logs into this feedback
/// ring buffer.
///
/// This is intended for initialization code so call sites don't have to duplicate the exact
/// `fmt::layer()` configuration and filter logic.
pub fn logger_layer<S>(&self) -> impl Layer<S> + Send + Sync + 'static
where
S: tracing::Subscriber + for<'a> LookupSpan<'a>,
{
tracing_subscriber::fmt::layer()
.with_writer(self.make_writer())
.with_ansi(false)
.with_target(false)
// Capture everything, regardless of the caller's `RUST_LOG`, so feedback includes the
// full trace when the user uploads a report.
.with_filter(Targets::new().with_default(Level::TRACE))
}
/// Returns a [`tracing_subscriber`] layer that collects structured metadata for feedback.
///
/// Events with `target: "feedback_tags"` are treated as key/value tags to attach to feedback
/// uploads later.
pub fn metadata_layer<S>(&self) -> impl Layer<S> + Send + Sync + 'static
where
S: tracing::Subscriber + for<'a> LookupSpan<'a>,
{
FeedbackMetadataLayer {
inner: self.inner.clone(),
}
.with_filter(Targets::new().with_target(FEEDBACK_TAGS_TARGET, Level::TRACE))
}
pub fn snapshot(&self, session_id: Option<ConversationId>) -> CodexLogSnapshot {
let bytes = {
let guard = self.inner.ring.lock().expect("mutex poisoned");
guard.snapshot_bytes()
};
let tags = {
let guard = self.inner.tags.lock().expect("mutex poisoned");
guard.clone()
};
CodexLogSnapshot {
bytes,
tags,
thread_id: session_id
.map(|id| id.to_string())
.unwrap_or("no-active-thread-".to_string() + &ConversationId::new().to_string()),
}
}
}
struct FeedbackInner {
ring: Mutex<RingBuffer>,
tags: Mutex<BTreeMap<String, String>>,
}
impl FeedbackInner {
fn new(max_bytes: usize) -> Self {
Self {
ring: Mutex::new(RingBuffer::new(max_bytes)),
tags: Mutex::new(BTreeMap::new()),
}
}
}
#[derive(Clone)]
pub struct FeedbackMakeWriter {
inner: Arc<FeedbackInner>,
}
impl<'a> MakeWriter<'a> for FeedbackMakeWriter {
type Writer = FeedbackWriter;
fn make_writer(&'a self) -> Self::Writer {
FeedbackWriter {
inner: self.inner.clone(),
}
}
}
pub struct FeedbackWriter {
inner: Arc<FeedbackInner>,
}
impl Write for FeedbackWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let mut guard = self.inner.ring.lock().map_err(|_| io::ErrorKind::Other)?;
guard.push_bytes(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
struct RingBuffer {
max: usize,
buf: VecDeque<u8>,
}
impl RingBuffer {
fn new(capacity: usize) -> Self {
Self {
max: capacity,
buf: VecDeque::with_capacity(capacity),
}
}
fn len(&self) -> usize {
self.buf.len()
}
fn push_bytes(&mut self, data: &[u8]) {
if data.is_empty() {
return;
}
// If the incoming chunk is larger than capacity, keep only the trailing bytes.
if data.len() >= self.max {
self.buf.clear();
let start = data.len() - self.max;
self.buf.extend(data[start..].iter().copied());
return;
}
// Evict from the front if we would exceed capacity.
let needed = self.len() + data.len();
if needed > self.max {
let to_drop = needed - self.max;
for _ in 0..to_drop {
let _ = self.buf.pop_front();
}
}
self.buf.extend(data.iter().copied());
}
fn snapshot_bytes(&self) -> Vec<u8> {
self.buf.iter().copied().collect()
}
}
pub struct CodexLogSnapshot {
bytes: Vec<u8>,
tags: BTreeMap<String, String>,
pub thread_id: String,
}
impl CodexLogSnapshot {
pub(crate) fn as_bytes(&self) -> &[u8] {
&self.bytes
}
pub fn save_to_temp_file(&self) -> io::Result<PathBuf> {
let dir = std::env::temp_dir();
let filename = format!("codex-feedback-{}.log", self.thread_id);
let path = dir.join(filename);
fs::write(&path, self.as_bytes())?;
Ok(path)
}
/// Upload feedback to Sentry with optional attachments.
pub fn upload_feedback(
&self,
classification: &str,
reason: Option<&str>,
include_logs: bool,
rollout_path: Option<&std::path::Path>,
session_source: Option<SessionSource>,
) -> Result<()> {
use std::collections::BTreeMap;
use std::fs;
use std::str::FromStr;
use std::sync::Arc;
use sentry::Client;
use sentry::ClientOptions;
use sentry::protocol::Attachment;
use sentry::protocol::Envelope;
use sentry::protocol::EnvelopeItem;
use sentry::protocol::Event;
use sentry::protocol::Level;
use sentry::transports::DefaultTransportFactory;
use sentry::types::Dsn;
// Build Sentry client
let client = Client::from_config(ClientOptions {
dsn: Some(Dsn::from_str(SENTRY_DSN).map_err(|e| anyhow!("invalid DSN: {e}"))?),
transport: Some(Arc::new(DefaultTransportFactory {})),
..Default::default()
});
let cli_version = env!("CARGO_PKG_VERSION");
let mut tags = BTreeMap::from([
(String::from("thread_id"), self.thread_id.to_string()),
(String::from("classification"), classification.to_string()),
(String::from("cli_version"), cli_version.to_string()),
]);
if let Some(source) = session_source.as_ref() {
tags.insert(String::from("session_source"), source.to_string());
}
if let Some(r) = reason {
tags.insert(String::from("reason"), r.to_string());
}
let reserved = [
"thread_id",
"classification",
"cli_version",
"session_source",
"reason",
];
for (key, value) in &self.tags {
if reserved.contains(&key.as_str()) {
continue;
}
if let Entry::Vacant(entry) = tags.entry(key.clone()) {
entry.insert(value.clone());
}
}
let level = match classification {
"bug" | "bad_result" => Level::Error,
_ => Level::Info,
};
let mut envelope = Envelope::new();
let title = format!(
"[{}]: Codex session {}",
display_classification(classification),
self.thread_id
);
let mut event = Event {
level,
message: Some(title.clone()),
tags,
..Default::default()
};
if let Some(r) = reason {
use sentry::protocol::Exception;
use sentry::protocol::Values;
event.exception = Values::from(vec![Exception {
ty: title.clone(),
value: Some(r.to_string()),
..Default::default()
}]);
}
envelope.add_item(EnvelopeItem::Event(event));
if include_logs {
envelope.add_item(EnvelopeItem::Attachment(Attachment {
buffer: self.bytes.clone(),
filename: String::from("codex-logs.log"),
content_type: Some("text/plain".to_string()),
ty: None,
}));
}
if let Some((path, data)) = rollout_path.and_then(|p| fs::read(p).ok().map(|d| (p, d))) {
let fname = path
.file_name()
.map(|s| s.to_string_lossy().to_string())
.unwrap_or_else(|| "rollout.jsonl".to_string());
let content_type = "text/plain".to_string();
envelope.add_item(EnvelopeItem::Attachment(Attachment {
buffer: data,
filename: fname,
content_type: Some(content_type),
ty: None,
}));
}
client.send_envelope(envelope);
client.flush(Some(Duration::from_secs(UPLOAD_TIMEOUT_SECS)));
Ok(())
}
}
fn display_classification(classification: &str) -> String {
match classification {
"bug" => "Bug".to_string(),
"bad_result" => "Bad result".to_string(),
"good_result" => "Good result".to_string(),
_ => "Other".to_string(),
}
}
#[derive(Clone)]
struct FeedbackMetadataLayer {
inner: Arc<FeedbackInner>,
}
impl<S> Layer<S> for FeedbackMetadataLayer
where
S: tracing::Subscriber + for<'a> LookupSpan<'a>,
{
fn on_event(&self, event: &Event<'_>, _ctx: tracing_subscriber::layer::Context<'_, S>) {
// This layer is filtered by `Targets`, but keep the guard anyway in case it is used without
// the filter.
if event.metadata().target() != FEEDBACK_TAGS_TARGET {
return;
}
let mut visitor = FeedbackTagsVisitor::default();
event.record(&mut visitor);
if visitor.tags.is_empty() {
return;
}
let mut guard = self.inner.tags.lock().expect("mutex poisoned");
for (key, value) in visitor.tags {
if guard.len() >= MAX_FEEDBACK_TAGS && !guard.contains_key(&key) {
continue;
}
guard.insert(key, value);
}
}
}
#[derive(Default)]
struct FeedbackTagsVisitor {
tags: BTreeMap<String, String>,
}
impl Visit for FeedbackTagsVisitor {
fn record_i64(&mut self, field: &tracing::field::Field, value: i64) {
self.tags
.insert(field.name().to_string(), value.to_string());
}
fn record_u64(&mut self, field: &tracing::field::Field, value: u64) {
self.tags
.insert(field.name().to_string(), value.to_string());
}
fn record_bool(&mut self, field: &tracing::field::Field, value: bool) {
self.tags
.insert(field.name().to_string(), value.to_string());
}
fn record_f64(&mut self, field: &tracing::field::Field, value: f64) {
self.tags
.insert(field.name().to_string(), value.to_string());
}
fn record_str(&mut self, field: &tracing::field::Field, value: &str) {
self.tags
.insert(field.name().to_string(), value.to_string());
}
fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) {
self.tags
.insert(field.name().to_string(), format!("{value:?}"));
}
}
#[cfg(test)]
mod tests {
use super::*;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
#[test]
fn ring_buffer_drops_front_when_full() {
let fb = CodexFeedback::with_capacity(8);
{
let mut w = fb.make_writer().make_writer();
w.write_all(b"abcdefgh").unwrap();
w.write_all(b"ij").unwrap();
}
let snap = fb.snapshot(None);
// Capacity 8: after writing 10 bytes, we should keep the last 8.
pretty_assertions::assert_eq!(std::str::from_utf8(snap.as_bytes()).unwrap(), "cdefghij");
}
#[test]
fn metadata_layer_records_tags_from_feedback_target() {
let fb = CodexFeedback::new();
let _guard = tracing_subscriber::registry()
.with(fb.metadata_layer())
.set_default();
tracing::info!(target: FEEDBACK_TAGS_TARGET, model = "gpt-5", cached = true, "tags");
let snap = fb.snapshot(None);
pretty_assertions::assert_eq!(snap.tags.get("model").map(String::as_str), Some("gpt-5"));
pretty_assertions::assert_eq!(snap.tags.get("cached").map(String::as_str), Some("true"));
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/backend-client/src/lib.rs | codex-rs/backend-client/src/lib.rs | mod client;
pub mod types;
pub use client::Client;
pub use types::CodeTaskDetailsResponse;
pub use types::CodeTaskDetailsResponseExt;
pub use types::PaginatedListTaskListItem;
pub use types::TaskListItem;
pub use types::TurnAttemptsSiblingTurnsResponse;
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/backend-client/src/client.rs | codex-rs/backend-client/src/client.rs | use crate::types::CodeTaskDetailsResponse;
use crate::types::CreditStatusDetails;
use crate::types::PaginatedListTaskListItem;
use crate::types::RateLimitStatusPayload;
use crate::types::RateLimitWindowSnapshot;
use crate::types::TurnAttemptsSiblingTurnsResponse;
use anyhow::Result;
use codex_core::auth::CodexAuth;
use codex_core::default_client::get_codex_user_agent;
use codex_protocol::account::PlanType as AccountPlanType;
use codex_protocol::protocol::CreditsSnapshot;
use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::RateLimitWindow;
use reqwest::header::AUTHORIZATION;
use reqwest::header::CONTENT_TYPE;
use reqwest::header::HeaderMap;
use reqwest::header::HeaderName;
use reqwest::header::HeaderValue;
use reqwest::header::USER_AGENT;
use serde::de::DeserializeOwned;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PathStyle {
/// /api/codex/…
CodexApi,
/// /wham/…
ChatGptApi,
}
impl PathStyle {
pub fn from_base_url(base_url: &str) -> Self {
if base_url.contains("/backend-api") {
PathStyle::ChatGptApi
} else {
PathStyle::CodexApi
}
}
}
#[derive(Clone, Debug)]
pub struct Client {
base_url: String,
http: reqwest::Client,
bearer_token: Option<String>,
user_agent: Option<HeaderValue>,
chatgpt_account_id: Option<String>,
path_style: PathStyle,
}
impl Client {
pub fn new(base_url: impl Into<String>) -> Result<Self> {
let mut base_url = base_url.into();
// Normalize common ChatGPT hostnames to include /backend-api so we hit the WHAM paths.
// Also trim trailing slashes for consistent URL building.
while base_url.ends_with('/') {
base_url.pop();
}
if (base_url.starts_with("https://chatgpt.com")
|| base_url.starts_with("https://chat.openai.com"))
&& !base_url.contains("/backend-api")
{
base_url = format!("{base_url}/backend-api");
}
let http = reqwest::Client::builder().build()?;
let path_style = PathStyle::from_base_url(&base_url);
Ok(Self {
base_url,
http,
bearer_token: None,
user_agent: None,
chatgpt_account_id: None,
path_style,
})
}
pub async fn from_auth(base_url: impl Into<String>, auth: &CodexAuth) -> Result<Self> {
let token = auth.get_token().await.map_err(anyhow::Error::from)?;
let mut client = Self::new(base_url)?
.with_user_agent(get_codex_user_agent())
.with_bearer_token(token);
if let Some(account_id) = auth.get_account_id() {
client = client.with_chatgpt_account_id(account_id);
}
Ok(client)
}
pub fn with_bearer_token(mut self, token: impl Into<String>) -> Self {
self.bearer_token = Some(token.into());
self
}
pub fn with_user_agent(mut self, ua: impl Into<String>) -> Self {
if let Ok(hv) = HeaderValue::from_str(&ua.into()) {
self.user_agent = Some(hv);
}
self
}
pub fn with_chatgpt_account_id(mut self, account_id: impl Into<String>) -> Self {
self.chatgpt_account_id = Some(account_id.into());
self
}
pub fn with_path_style(mut self, style: PathStyle) -> Self {
self.path_style = style;
self
}
fn headers(&self) -> HeaderMap {
let mut h = HeaderMap::new();
if let Some(ua) = &self.user_agent {
h.insert(USER_AGENT, ua.clone());
} else {
h.insert(USER_AGENT, HeaderValue::from_static("codex-cli"));
}
if let Some(token) = &self.bearer_token {
let value = format!("Bearer {token}");
if let Ok(hv) = HeaderValue::from_str(&value) {
h.insert(AUTHORIZATION, hv);
}
}
if let Some(acc) = &self.chatgpt_account_id
&& let Ok(name) = HeaderName::from_bytes(b"ChatGPT-Account-Id")
&& let Ok(hv) = HeaderValue::from_str(acc)
{
h.insert(name, hv);
}
h
}
async fn exec_request(
&self,
req: reqwest::RequestBuilder,
method: &str,
url: &str,
) -> Result<(String, String)> {
let res = req.send().await?;
let status = res.status();
let ct = res
.headers()
.get(CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
.unwrap_or("")
.to_string();
let body = res.text().await.unwrap_or_default();
if !status.is_success() {
anyhow::bail!("{method} {url} failed: {status}; content-type={ct}; body={body}");
}
Ok((body, ct))
}
fn decode_json<T: DeserializeOwned>(&self, url: &str, ct: &str, body: &str) -> Result<T> {
match serde_json::from_str::<T>(body) {
Ok(v) => Ok(v),
Err(e) => {
anyhow::bail!("Decode error for {url}: {e}; content-type={ct}; body={body}");
}
}
}
pub async fn get_rate_limits(&self) -> Result<RateLimitSnapshot> {
let url = match self.path_style {
PathStyle::CodexApi => format!("{}/api/codex/usage", self.base_url),
PathStyle::ChatGptApi => format!("{}/wham/usage", self.base_url),
};
let req = self.http.get(&url).headers(self.headers());
let (body, ct) = self.exec_request(req, "GET", &url).await?;
let payload: RateLimitStatusPayload = self.decode_json(&url, &ct, &body)?;
Ok(Self::rate_limit_snapshot_from_payload(payload))
}
pub async fn list_tasks(
&self,
limit: Option<i32>,
task_filter: Option<&str>,
environment_id: Option<&str>,
) -> Result<PaginatedListTaskListItem> {
let url = match self.path_style {
PathStyle::CodexApi => format!("{}/api/codex/tasks/list", self.base_url),
PathStyle::ChatGptApi => format!("{}/wham/tasks/list", self.base_url),
};
let req = self.http.get(&url).headers(self.headers());
let req = if let Some(lim) = limit {
req.query(&[("limit", lim)])
} else {
req
};
let req = if let Some(tf) = task_filter {
req.query(&[("task_filter", tf)])
} else {
req
};
let req = if let Some(id) = environment_id {
req.query(&[("environment_id", id)])
} else {
req
};
let (body, ct) = self.exec_request(req, "GET", &url).await?;
self.decode_json::<PaginatedListTaskListItem>(&url, &ct, &body)
}
pub async fn get_task_details(&self, task_id: &str) -> Result<CodeTaskDetailsResponse> {
let (parsed, _body, _ct) = self.get_task_details_with_body(task_id).await?;
Ok(parsed)
}
pub async fn get_task_details_with_body(
&self,
task_id: &str,
) -> Result<(CodeTaskDetailsResponse, String, String)> {
let url = match self.path_style {
PathStyle::CodexApi => format!("{}/api/codex/tasks/{}", self.base_url, task_id),
PathStyle::ChatGptApi => format!("{}/wham/tasks/{}", self.base_url, task_id),
};
let req = self.http.get(&url).headers(self.headers());
let (body, ct) = self.exec_request(req, "GET", &url).await?;
let parsed: CodeTaskDetailsResponse = self.decode_json(&url, &ct, &body)?;
Ok((parsed, body, ct))
}
pub async fn list_sibling_turns(
&self,
task_id: &str,
turn_id: &str,
) -> Result<TurnAttemptsSiblingTurnsResponse> {
let url = match self.path_style {
PathStyle::CodexApi => format!(
"{}/api/codex/tasks/{}/turns/{}/sibling_turns",
self.base_url, task_id, turn_id
),
PathStyle::ChatGptApi => format!(
"{}/wham/tasks/{}/turns/{}/sibling_turns",
self.base_url, task_id, turn_id
),
};
let req = self.http.get(&url).headers(self.headers());
let (body, ct) = self.exec_request(req, "GET", &url).await?;
self.decode_json::<TurnAttemptsSiblingTurnsResponse>(&url, &ct, &body)
}
/// Create a new task (user turn) by POSTing to the appropriate backend path
/// based on `path_style`. Returns the created task id.
pub async fn create_task(&self, request_body: serde_json::Value) -> Result<String> {
let url = match self.path_style {
PathStyle::CodexApi => format!("{}/api/codex/tasks", self.base_url),
PathStyle::ChatGptApi => format!("{}/wham/tasks", self.base_url),
};
let req = self
.http
.post(&url)
.headers(self.headers())
.header(CONTENT_TYPE, HeaderValue::from_static("application/json"))
.json(&request_body);
let (body, ct) = self.exec_request(req, "POST", &url).await?;
// Extract id from JSON: prefer `task.id`; fallback to top-level `id` when present.
match serde_json::from_str::<serde_json::Value>(&body) {
Ok(v) => {
if let Some(id) = v
.get("task")
.and_then(|t| t.get("id"))
.and_then(|s| s.as_str())
{
Ok(id.to_string())
} else if let Some(id) = v.get("id").and_then(|s| s.as_str()) {
Ok(id.to_string())
} else {
anyhow::bail!(
"POST {url} succeeded but no task id found; content-type={ct}; body={body}"
);
}
}
Err(e) => anyhow::bail!("Decode error for {url}: {e}; content-type={ct}; body={body}"),
}
}
// rate limit helpers
fn rate_limit_snapshot_from_payload(payload: RateLimitStatusPayload) -> RateLimitSnapshot {
let rate_limit_details = payload
.rate_limit
.and_then(|inner| inner.map(|boxed| *boxed));
let (primary, secondary) = if let Some(details) = rate_limit_details {
(
Self::map_rate_limit_window(details.primary_window),
Self::map_rate_limit_window(details.secondary_window),
)
} else {
(None, None)
};
RateLimitSnapshot {
primary,
secondary,
credits: Self::map_credits(payload.credits),
plan_type: Some(Self::map_plan_type(payload.plan_type)),
}
}
fn map_rate_limit_window(
window: Option<Option<Box<RateLimitWindowSnapshot>>>,
) -> Option<RateLimitWindow> {
let snapshot = match window {
Some(Some(snapshot)) => *snapshot,
_ => return None,
};
let used_percent = f64::from(snapshot.used_percent);
let window_minutes = Self::window_minutes_from_seconds(snapshot.limit_window_seconds);
let resets_at = Some(i64::from(snapshot.reset_at));
Some(RateLimitWindow {
used_percent,
window_minutes,
resets_at,
})
}
fn map_credits(credits: Option<Option<Box<CreditStatusDetails>>>) -> Option<CreditsSnapshot> {
let details = match credits {
Some(Some(details)) => *details,
_ => return None,
};
Some(CreditsSnapshot {
has_credits: details.has_credits,
unlimited: details.unlimited,
balance: details.balance.and_then(|inner| inner),
})
}
fn map_plan_type(plan_type: crate::types::PlanType) -> AccountPlanType {
match plan_type {
crate::types::PlanType::Free => AccountPlanType::Free,
crate::types::PlanType::Plus => AccountPlanType::Plus,
crate::types::PlanType::Pro => AccountPlanType::Pro,
crate::types::PlanType::Team => AccountPlanType::Team,
crate::types::PlanType::Business => AccountPlanType::Business,
crate::types::PlanType::Enterprise => AccountPlanType::Enterprise,
crate::types::PlanType::Edu | crate::types::PlanType::Education => AccountPlanType::Edu,
crate::types::PlanType::Guest
| crate::types::PlanType::Go
| crate::types::PlanType::FreeWorkspace
| crate::types::PlanType::Quorum
| crate::types::PlanType::K12 => AccountPlanType::Unknown,
}
}
fn window_minutes_from_seconds(seconds: i32) -> Option<i64> {
if seconds <= 0 {
return None;
}
let seconds_i64 = i64::from(seconds);
Some((seconds_i64 + 59) / 60)
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/backend-client/src/types.rs | codex-rs/backend-client/src/types.rs | pub use codex_backend_openapi_models::models::CreditStatusDetails;
pub use codex_backend_openapi_models::models::PaginatedListTaskListItem;
pub use codex_backend_openapi_models::models::PlanType;
pub use codex_backend_openapi_models::models::RateLimitStatusDetails;
pub use codex_backend_openapi_models::models::RateLimitStatusPayload;
pub use codex_backend_openapi_models::models::RateLimitWindowSnapshot;
pub use codex_backend_openapi_models::models::TaskListItem;
use serde::Deserialize;
use serde::de::Deserializer;
use serde_json::Value;
use std::collections::HashMap;
/// Hand-rolled models for the Cloud Tasks task-details response.
/// The generated OpenAPI models are pretty bad. This is a half-step
/// towards hand-rolling them.
#[derive(Clone, Debug, Deserialize)]
pub struct CodeTaskDetailsResponse {
#[serde(default)]
pub current_user_turn: Option<Turn>,
#[serde(default)]
pub current_assistant_turn: Option<Turn>,
#[serde(default)]
pub current_diff_task_turn: Option<Turn>,
}
#[derive(Clone, Debug, Default, Deserialize)]
pub struct Turn {
#[serde(default)]
pub id: Option<String>,
#[serde(default)]
pub attempt_placement: Option<i64>,
#[serde(default, rename = "turn_status")]
pub turn_status: Option<String>,
#[serde(default, deserialize_with = "deserialize_vec")]
pub sibling_turn_ids: Vec<String>,
#[serde(default, deserialize_with = "deserialize_vec")]
pub input_items: Vec<TurnItem>,
#[serde(default, deserialize_with = "deserialize_vec")]
pub output_items: Vec<TurnItem>,
#[serde(default)]
pub worklog: Option<Worklog>,
#[serde(default)]
pub error: Option<TurnError>,
}
#[derive(Clone, Debug, Default, Deserialize)]
pub struct TurnItem {
#[serde(rename = "type", default)]
pub kind: String,
#[serde(default)]
pub role: Option<String>,
#[serde(default, deserialize_with = "deserialize_vec")]
pub content: Vec<ContentFragment>,
#[serde(default)]
pub diff: Option<String>,
#[serde(default)]
pub output_diff: Option<DiffPayload>,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(untagged)]
pub enum ContentFragment {
Structured(StructuredContent),
Text(String),
}
#[derive(Clone, Debug, Default, Deserialize)]
pub struct StructuredContent {
#[serde(rename = "content_type", default)]
pub content_type: Option<String>,
#[serde(default)]
pub text: Option<String>,
}
#[derive(Clone, Debug, Default, Deserialize)]
pub struct DiffPayload {
#[serde(default)]
pub diff: Option<String>,
}
#[derive(Clone, Debug, Default, Deserialize)]
pub struct Worklog {
#[serde(default, deserialize_with = "deserialize_vec")]
pub messages: Vec<WorklogMessage>,
}
#[derive(Clone, Debug, Default, Deserialize)]
pub struct WorklogMessage {
#[serde(default)]
pub author: Option<Author>,
#[serde(default)]
pub content: Option<WorklogContent>,
}
#[derive(Clone, Debug, Default, Deserialize)]
pub struct Author {
#[serde(default)]
pub role: Option<String>,
}
#[derive(Clone, Debug, Default, Deserialize)]
pub struct WorklogContent {
#[serde(default)]
pub parts: Vec<ContentFragment>,
}
#[derive(Clone, Debug, Default, Deserialize)]
pub struct TurnError {
#[serde(default)]
pub code: Option<String>,
#[serde(default)]
pub message: Option<String>,
}
impl ContentFragment {
fn text(&self) -> Option<&str> {
match self {
ContentFragment::Structured(inner) => {
if inner
.content_type
.as_deref()
.map(|ct| ct.eq_ignore_ascii_case("text"))
.unwrap_or(false)
{
inner.text.as_deref().filter(|s| !s.is_empty())
} else {
None
}
}
ContentFragment::Text(raw) => {
if raw.trim().is_empty() {
None
} else {
Some(raw.as_str())
}
}
}
}
}
impl TurnItem {
fn text_values(&self) -> Vec<String> {
self.content
.iter()
.filter_map(|fragment| fragment.text().map(str::to_string))
.collect()
}
fn diff_text(&self) -> Option<String> {
if self.kind == "output_diff" {
if let Some(diff) = &self.diff
&& !diff.is_empty()
{
return Some(diff.clone());
}
} else if self.kind == "pr"
&& let Some(payload) = &self.output_diff
&& let Some(diff) = &payload.diff
&& !diff.is_empty()
{
return Some(diff.clone());
}
None
}
}
impl Turn {
fn unified_diff(&self) -> Option<String> {
self.output_items.iter().find_map(TurnItem::diff_text)
}
fn message_texts(&self) -> Vec<String> {
let mut out: Vec<String> = self
.output_items
.iter()
.filter(|item| item.kind == "message")
.flat_map(TurnItem::text_values)
.collect();
if let Some(log) = &self.worklog {
for message in &log.messages {
if message.is_assistant() {
out.extend(message.text_values());
}
}
}
out
}
fn user_prompt(&self) -> Option<String> {
let parts: Vec<String> = self
.input_items
.iter()
.filter(|item| item.kind == "message")
.filter(|item| {
item.role
.as_deref()
.map(|r| r.eq_ignore_ascii_case("user"))
.unwrap_or(true)
})
.flat_map(TurnItem::text_values)
.collect();
if parts.is_empty() {
None
} else {
Some(parts.join(
"
",
))
}
}
fn error_summary(&self) -> Option<String> {
self.error.as_ref().and_then(TurnError::summary)
}
}
impl WorklogMessage {
fn is_assistant(&self) -> bool {
self.author
.as_ref()
.and_then(|a| a.role.as_deref())
.map(|role| role.eq_ignore_ascii_case("assistant"))
.unwrap_or(false)
}
fn text_values(&self) -> Vec<String> {
self.content
.as_ref()
.map(|content| {
content
.parts
.iter()
.filter_map(|fragment| fragment.text().map(str::to_string))
.collect()
})
.unwrap_or_default()
}
}
impl TurnError {
fn summary(&self) -> Option<String> {
let code = self.code.as_deref().unwrap_or("");
let message = self.message.as_deref().unwrap_or("");
match (code.is_empty(), message.is_empty()) {
(true, true) => None,
(false, true) => Some(code.to_string()),
(true, false) => Some(message.to_string()),
(false, false) => Some(format!("{code}: {message}")),
}
}
}
pub trait CodeTaskDetailsResponseExt {
/// Attempt to extract a unified diff string from the assistant or diff turn.
fn unified_diff(&self) -> Option<String>;
/// Extract assistant text output messages (no diff) from current turns.
fn assistant_text_messages(&self) -> Vec<String>;
/// Extract the user's prompt text from the current user turn, when present.
fn user_text_prompt(&self) -> Option<String>;
/// Extract an assistant error message (if the turn failed and provided one).
fn assistant_error_message(&self) -> Option<String>;
}
impl CodeTaskDetailsResponseExt for CodeTaskDetailsResponse {
fn unified_diff(&self) -> Option<String> {
[
self.current_diff_task_turn.as_ref(),
self.current_assistant_turn.as_ref(),
]
.into_iter()
.flatten()
.find_map(Turn::unified_diff)
}
fn assistant_text_messages(&self) -> Vec<String> {
let mut out = Vec::new();
for turn in [
self.current_diff_task_turn.as_ref(),
self.current_assistant_turn.as_ref(),
]
.into_iter()
.flatten()
{
out.extend(turn.message_texts());
}
out
}
fn user_text_prompt(&self) -> Option<String> {
self.current_user_turn.as_ref().and_then(Turn::user_prompt)
}
fn assistant_error_message(&self) -> Option<String> {
self.current_assistant_turn
.as_ref()
.and_then(Turn::error_summary)
}
}
fn deserialize_vec<'de, D, T>(deserializer: D) -> Result<Vec<T>, D::Error>
where
D: Deserializer<'de>,
T: Deserialize<'de>,
{
Option::<Vec<T>>::deserialize(deserializer).map(|opt| opt.unwrap_or_default())
}
#[derive(Clone, Debug, Deserialize)]
pub struct TurnAttemptsSiblingTurnsResponse {
#[serde(default)]
pub sibling_turns: Vec<HashMap<String, Value>>,
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
fn fixture(name: &str) -> CodeTaskDetailsResponse {
let json = match name {
"diff" => include_str!("../tests/fixtures/task_details_with_diff.json"),
"error" => include_str!("../tests/fixtures/task_details_with_error.json"),
other => panic!("unknown fixture {other}"),
};
serde_json::from_str(json).expect("fixture should deserialize")
}
#[test]
fn unified_diff_prefers_current_diff_task_turn() {
let details = fixture("diff");
let diff = details.unified_diff().expect("diff present");
assert!(diff.contains("diff --git"));
}
#[test]
fn unified_diff_falls_back_to_pr_output_diff() {
let details = fixture("error");
let diff = details.unified_diff().expect("diff from pr output");
assert!(diff.contains("lib.rs"));
}
#[test]
fn assistant_text_messages_extracts_text_content() {
let details = fixture("diff");
let messages = details.assistant_text_messages();
assert_eq!(messages, vec!["Assistant response".to_string()]);
}
#[test]
fn user_text_prompt_joins_parts_with_spacing() {
let details = fixture("diff");
let prompt = details.user_text_prompt().expect("prompt present");
assert_eq!(
prompt,
"First line
Second line"
);
}
#[test]
fn assistant_error_message_combines_code_and_message() {
let details = fixture("error");
let msg = details
.assistant_error_message()
.expect("error should be present");
assert_eq!(msg, "APPLY_FAILED: Patch could not be applied");
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/app_event_sender.rs | codex-rs/tui2/src/app_event_sender.rs | use tokio::sync::mpsc::UnboundedSender;
use crate::app_event::AppEvent;
use crate::session_log;
#[derive(Clone, Debug)]
pub(crate) struct AppEventSender {
pub app_event_tx: UnboundedSender<AppEvent>,
}
impl AppEventSender {
pub(crate) fn new(app_event_tx: UnboundedSender<AppEvent>) -> Self {
Self { app_event_tx }
}
/// Send an event to the app event channel. If it fails, we swallow the
/// error and log it.
pub(crate) fn send(&self, event: AppEvent) {
// Record inbound events for high-fidelity session replay.
// Avoid double-logging Ops; those are logged at the point of submission.
if !matches!(event, AppEvent::CodexOp(_)) {
session_log::log_inbound_app_event(&event);
}
if let Err(e) = self.app_event_tx.send(event) {
tracing::error!("failed to send event: {e}");
}
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/text_formatting.rs | codex-rs/tui2/src/text_formatting.rs | use unicode_segmentation::UnicodeSegmentation;
use unicode_width::UnicodeWidthChar;
use unicode_width::UnicodeWidthStr;
pub(crate) fn capitalize_first(input: &str) -> String {
let mut chars = input.chars();
match chars.next() {
Some(first) => {
let mut capitalized = first.to_uppercase().collect::<String>();
capitalized.push_str(chars.as_str());
capitalized
}
None => String::new(),
}
}
/// Truncate a tool result to fit within the given height and width. If the text is valid JSON, we format it in a compact way before truncating.
/// This is a best-effort approach that may not work perfectly for text where 1 grapheme is rendered as multiple terminal cells.
pub(crate) fn format_and_truncate_tool_result(
text: &str,
max_lines: usize,
line_width: usize,
) -> String {
// Work out the maximum number of graphemes we can display for a result.
// It's not guaranteed that 1 grapheme = 1 cell, so we subtract 1 per line as a fudge factor.
// It also won't handle future terminal resizes properly, but it's an OK approximation for now.
let max_graphemes = (max_lines * line_width).saturating_sub(max_lines);
if let Some(formatted_json) = format_json_compact(text) {
truncate_text(&formatted_json, max_graphemes)
} else {
truncate_text(text, max_graphemes)
}
}
/// Format JSON text in a compact single-line format with spaces for better Ratatui wrapping.
/// Ex: `{"a":"b",c:["d","e"]}` -> `{"a": "b", "c": ["d", "e"]}`
/// Returns the formatted JSON string if the input is valid JSON, otherwise returns None.
/// This is a little complicated, but it's necessary because Ratatui's wrapping is *very* limited
/// and can only do line breaks at whitespace. If we use the default serde_json format, we get lines
/// without spaces that Ratatui can't wrap nicely. If we use the serde_json pretty format as-is,
/// it's much too sparse and uses too many terminal rows.
/// Relevant issue: https://github.com/ratatui/ratatui/issues/293
pub(crate) fn format_json_compact(text: &str) -> Option<String> {
let json = serde_json::from_str::<serde_json::Value>(text).ok()?;
let json_pretty = serde_json::to_string_pretty(&json).unwrap_or_else(|_| json.to_string());
// Convert multi-line pretty JSON to compact single-line format by removing newlines and excess whitespace
let mut result = String::new();
let mut chars = json_pretty.chars().peekable();
let mut in_string = false;
let mut escape_next = false;
// Iterate over the characters in the JSON string, adding spaces after : and , but only when not in a string
while let Some(ch) = chars.next() {
match ch {
'"' if !escape_next => {
in_string = !in_string;
result.push(ch);
}
'\\' if in_string => {
escape_next = !escape_next;
result.push(ch);
}
'\n' | '\r' if !in_string => {
// Skip newlines when not in a string
}
' ' | '\t' if !in_string => {
// Add a space after : and , but only when not in a string
if let Some(&next_ch) = chars.peek()
&& let Some(last_ch) = result.chars().last()
&& (last_ch == ':' || last_ch == ',')
&& !matches!(next_ch, '}' | ']')
{
result.push(' ');
}
}
_ => {
if escape_next && in_string {
escape_next = false;
}
result.push(ch);
}
}
}
Some(result)
}
/// Truncate `text` to `max_graphemes` graphemes. Using graphemes to avoid accidentally truncating in the middle of a multi-codepoint character.
pub(crate) fn truncate_text(text: &str, max_graphemes: usize) -> String {
let mut graphemes = text.grapheme_indices(true);
// Check if there's a grapheme at position max_graphemes (meaning there are more than max_graphemes total)
if let Some((byte_index, _)) = graphemes.nth(max_graphemes) {
// There are more than max_graphemes, so we need to truncate
if max_graphemes >= 3 {
// Truncate to max_graphemes - 3 and add "..." to stay within limit
let mut truncate_graphemes = text.grapheme_indices(true);
if let Some((truncate_byte_index, _)) = truncate_graphemes.nth(max_graphemes - 3) {
let truncated = &text[..truncate_byte_index];
format!("{truncated}...")
} else {
text.to_string()
}
} else {
// max_graphemes < 3, so just return first max_graphemes without "..."
let truncated = &text[..byte_index];
truncated.to_string()
}
} else {
// There are max_graphemes or fewer graphemes, return original text
text.to_string()
}
}
/// Truncate a path-like string to the given display width, keeping leading and trailing segments
/// where possible and inserting a single Unicode ellipsis between them. If an individual segment
/// cannot fit, it is front-truncated with an ellipsis.
pub(crate) fn center_truncate_path(path: &str, max_width: usize) -> String {
if max_width == 0 {
return String::new();
}
if UnicodeWidthStr::width(path) <= max_width {
return path.to_string();
}
let sep = std::path::MAIN_SEPARATOR;
let has_leading_sep = path.starts_with(sep);
let has_trailing_sep = path.ends_with(sep);
let mut raw_segments: Vec<&str> = path.split(sep).collect();
if has_leading_sep && !raw_segments.is_empty() && raw_segments[0].is_empty() {
raw_segments.remove(0);
}
if has_trailing_sep
&& !raw_segments.is_empty()
&& raw_segments.last().is_some_and(|last| last.is_empty())
{
raw_segments.pop();
}
if raw_segments.is_empty() {
if has_leading_sep {
let root = sep.to_string();
if UnicodeWidthStr::width(root.as_str()) <= max_width {
return root;
}
}
return "…".to_string();
}
struct Segment<'a> {
original: &'a str,
text: String,
truncatable: bool,
is_suffix: bool,
}
let assemble = |leading: bool, segments: &[Segment<'_>]| -> String {
let mut result = String::new();
if leading {
result.push(sep);
}
for segment in segments {
if !result.is_empty() && !result.ends_with(sep) {
result.push(sep);
}
result.push_str(segment.text.as_str());
}
result
};
let front_truncate = |original: &str, allowed_width: usize| -> String {
if allowed_width == 0 {
return String::new();
}
if UnicodeWidthStr::width(original) <= allowed_width {
return original.to_string();
}
if allowed_width == 1 {
return "…".to_string();
}
let mut kept: Vec<char> = Vec::new();
let mut used_width = 1; // reserve space for leading ellipsis
for ch in original.chars().rev() {
let ch_width = UnicodeWidthChar::width(ch).unwrap_or(0);
if used_width + ch_width > allowed_width {
break;
}
used_width += ch_width;
kept.push(ch);
}
kept.reverse();
let mut truncated = String::from("…");
for ch in kept {
truncated.push(ch);
}
truncated
};
let mut combos: Vec<(usize, usize)> = Vec::new();
let segment_count = raw_segments.len();
for left in 1..=segment_count {
let min_right = if left == segment_count { 0 } else { 1 };
for right in min_right..=(segment_count - left) {
combos.push((left, right));
}
}
let desired_suffix = if segment_count > 1 {
std::cmp::min(2, segment_count - 1)
} else {
0
};
let mut prioritized: Vec<(usize, usize)> = Vec::new();
let mut fallback: Vec<(usize, usize)> = Vec::new();
for combo in combos {
if combo.1 >= desired_suffix {
prioritized.push(combo);
} else {
fallback.push(combo);
}
}
let sort_combos = |items: &mut Vec<(usize, usize)>| {
items.sort_by(|(left_a, right_a), (left_b, right_b)| {
left_b
.cmp(left_a)
.then_with(|| right_b.cmp(right_a))
.then_with(|| (left_b + right_b).cmp(&(left_a + right_a)))
});
};
sort_combos(&mut prioritized);
sort_combos(&mut fallback);
let fit_segments =
|segments: &mut Vec<Segment<'_>>, allow_front_truncate: bool| -> Option<String> {
loop {
let candidate = assemble(has_leading_sep, segments);
let width = UnicodeWidthStr::width(candidate.as_str());
if width <= max_width {
return Some(candidate);
}
if !allow_front_truncate {
return None;
}
let mut indices: Vec<usize> = Vec::new();
for (idx, seg) in segments.iter().enumerate().rev() {
if seg.truncatable && seg.is_suffix {
indices.push(idx);
}
}
for (idx, seg) in segments.iter().enumerate().rev() {
if seg.truncatable && !seg.is_suffix {
indices.push(idx);
}
}
if indices.is_empty() {
return None;
}
let mut changed = false;
for idx in indices {
let original_width = UnicodeWidthStr::width(segments[idx].original);
if original_width <= max_width && segment_count > 2 {
continue;
}
let seg_width = UnicodeWidthStr::width(segments[idx].text.as_str());
let other_width = width.saturating_sub(seg_width);
let allowed_width = max_width.saturating_sub(other_width).max(1);
let new_text = front_truncate(segments[idx].original, allowed_width);
if new_text != segments[idx].text {
segments[idx].text = new_text;
changed = true;
break;
}
}
if !changed {
return None;
}
}
};
for (left_count, right_count) in prioritized.into_iter().chain(fallback.into_iter()) {
let mut segments: Vec<Segment<'_>> = raw_segments[..left_count]
.iter()
.map(|seg| Segment {
original: seg,
text: (*seg).to_string(),
truncatable: true,
is_suffix: false,
})
.collect();
let need_ellipsis = left_count + right_count < segment_count;
if need_ellipsis {
segments.push(Segment {
original: "…",
text: "…".to_string(),
truncatable: false,
is_suffix: false,
});
}
if right_count > 0 {
segments.extend(
raw_segments[segment_count - right_count..]
.iter()
.map(|seg| Segment {
original: seg,
text: (*seg).to_string(),
truncatable: true,
is_suffix: true,
}),
);
}
let allow_front_truncate = need_ellipsis || segment_count <= 2;
if let Some(candidate) = fit_segments(&mut segments, allow_front_truncate) {
return candidate;
}
}
front_truncate(path, max_width)
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn test_truncate_text() {
let text = "Hello, world!";
let truncated = truncate_text(text, 8);
assert_eq!(truncated, "Hello...");
}
#[test]
fn test_truncate_empty_string() {
let text = "";
let truncated = truncate_text(text, 5);
assert_eq!(truncated, "");
}
#[test]
fn test_truncate_max_graphemes_zero() {
let text = "Hello";
let truncated = truncate_text(text, 0);
assert_eq!(truncated, "");
}
#[test]
fn test_truncate_max_graphemes_one() {
let text = "Hello";
let truncated = truncate_text(text, 1);
assert_eq!(truncated, "H");
}
#[test]
fn test_truncate_max_graphemes_two() {
let text = "Hello";
let truncated = truncate_text(text, 2);
assert_eq!(truncated, "He");
}
#[test]
fn test_truncate_max_graphemes_three_boundary() {
let text = "Hello";
let truncated = truncate_text(text, 3);
assert_eq!(truncated, "...");
}
#[test]
fn test_truncate_text_shorter_than_limit() {
let text = "Hi";
let truncated = truncate_text(text, 10);
assert_eq!(truncated, "Hi");
}
#[test]
fn test_truncate_text_exact_length() {
let text = "Hello";
let truncated = truncate_text(text, 5);
assert_eq!(truncated, "Hello");
}
#[test]
fn test_truncate_emoji() {
let text = "👋🌍🚀✨💫";
let truncated = truncate_text(text, 3);
assert_eq!(truncated, "...");
let truncated_longer = truncate_text(text, 4);
assert_eq!(truncated_longer, "👋...");
}
#[test]
fn test_truncate_unicode_combining_characters() {
let text = "é́ñ̃"; // Characters with combining marks
let truncated = truncate_text(text, 2);
assert_eq!(truncated, "é́ñ̃");
}
#[test]
fn test_truncate_very_long_text() {
let text = "a".repeat(1000);
let truncated = truncate_text(&text, 10);
assert_eq!(truncated, "aaaaaaa...");
assert_eq!(truncated.len(), 10); // 7 'a's + 3 dots
}
#[test]
fn test_format_json_compact_simple_object() {
let json = r#"{ "name": "John", "age": 30 }"#;
let result = format_json_compact(json).unwrap();
assert_eq!(result, r#"{"name": "John", "age": 30}"#);
}
#[test]
fn test_format_json_compact_nested_object() {
let json = r#"{ "user": { "name": "John", "details": { "age": 30, "city": "NYC" } } }"#;
let result = format_json_compact(json).unwrap();
assert_eq!(
result,
r#"{"user": {"name": "John", "details": {"age": 30, "city": "NYC"}}}"#
);
}
#[test]
fn test_center_truncate_doesnt_truncate_short_path() {
let sep = std::path::MAIN_SEPARATOR;
let path = format!("{sep}Users{sep}codex{sep}Public");
let truncated = center_truncate_path(&path, 40);
assert_eq!(truncated, path);
}
#[test]
fn test_center_truncate_truncates_long_path() {
let sep = std::path::MAIN_SEPARATOR;
let path = format!("~{sep}hello{sep}the{sep}fox{sep}is{sep}very{sep}fast");
let truncated = center_truncate_path(&path, 24);
assert_eq!(
truncated,
format!("~{sep}hello{sep}the{sep}…{sep}very{sep}fast")
);
}
#[test]
fn test_center_truncate_truncates_long_windows_path() {
let sep = std::path::MAIN_SEPARATOR;
let path = format!(
"C:{sep}Users{sep}codex{sep}Projects{sep}super{sep}long{sep}windows{sep}path{sep}file.txt"
);
let truncated = center_truncate_path(&path, 36);
let expected = format!("C:{sep}Users{sep}codex{sep}…{sep}path{sep}file.txt");
assert_eq!(truncated, expected);
}
#[test]
fn test_center_truncate_handles_long_segment() {
let sep = std::path::MAIN_SEPARATOR;
let path = format!("~{sep}supercalifragilisticexpialidocious");
let truncated = center_truncate_path(&path, 18);
assert_eq!(truncated, format!("~{sep}…cexpialidocious"));
}
#[test]
fn test_format_json_compact_array() {
let json = r#"[ 1, 2, { "key": "value" }, "string" ]"#;
let result = format_json_compact(json).unwrap();
assert_eq!(result, r#"[1, 2, {"key": "value"}, "string"]"#);
}
#[test]
fn test_format_json_compact_already_compact() {
let json = r#"{"compact":true}"#;
let result = format_json_compact(json).unwrap();
assert_eq!(result, r#"{"compact": true}"#);
}
#[test]
fn test_format_json_compact_with_whitespace() {
let json = r#"
{
"name": "John",
"hobbies": [
"reading",
"coding"
]
}
"#;
let result = format_json_compact(json).unwrap();
assert_eq!(
result,
r#"{"name": "John", "hobbies": ["reading", "coding"]}"#
);
}
#[test]
fn test_format_json_compact_invalid_json() {
let invalid_json = r#"{"invalid": json syntax}"#;
let result = format_json_compact(invalid_json);
assert!(result.is_none());
}
#[test]
fn test_format_json_compact_empty_object() {
let json = r#"{}"#;
let result = format_json_compact(json).unwrap();
assert_eq!(result, "{}");
}
#[test]
fn test_format_json_compact_empty_array() {
let json = r#"[]"#;
let result = format_json_compact(json).unwrap();
assert_eq!(result, "[]");
}
#[test]
fn test_format_json_compact_primitive_values() {
assert_eq!(format_json_compact("42").unwrap(), "42");
assert_eq!(format_json_compact("true").unwrap(), "true");
assert_eq!(format_json_compact("false").unwrap(), "false");
assert_eq!(format_json_compact("null").unwrap(), "null");
assert_eq!(format_json_compact(r#""string""#).unwrap(), r#""string""#);
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/app.rs | codex-rs/tui2/src/app.rs | use crate::app_backtrack::BacktrackState;
use crate::app_event::AppEvent;
use crate::app_event_sender::AppEventSender;
use crate::bottom_pane::ApprovalRequest;
use crate::chatwidget::ChatWidget;
use crate::clipboard_copy;
use crate::custom_terminal::Frame;
use crate::diff_render::DiffSummary;
use crate::exec_command::strip_bash_lc_and_escape;
use crate::file_search::FileSearchManager;
use crate::history_cell::HistoryCell;
use crate::history_cell::UserHistoryCell;
use crate::model_migration::ModelMigrationOutcome;
use crate::model_migration::migration_copy_for_models;
use crate::model_migration::run_model_migration_prompt;
use crate::pager_overlay::Overlay;
use crate::render::highlight::highlight_bash_to_lines;
use crate::render::renderable::Renderable;
use crate::resume_picker::ResumeSelection;
use crate::transcript_copy_ui::TranscriptCopyUi;
use crate::transcript_multi_click::TranscriptMultiClick;
use crate::transcript_selection::TRANSCRIPT_GUTTER_COLS;
use crate::transcript_selection::TranscriptSelection;
use crate::transcript_selection::TranscriptSelectionPoint;
use crate::transcript_view_cache::TranscriptViewCache;
use crate::tui;
use crate::tui::TuiEvent;
use crate::tui::scrolling::MouseScrollState;
use crate::tui::scrolling::ScrollConfig;
use crate::tui::scrolling::ScrollConfigOverrides;
use crate::tui::scrolling::ScrollDirection;
use crate::tui::scrolling::ScrollUpdate;
use crate::tui::scrolling::TranscriptScroll;
use crate::update_action::UpdateAction;
use codex_ansi_escape::ansi_escape_line;
use codex_core::AuthManager;
use codex_core::ConversationManager;
use codex_core::config::Config;
use codex_core::config::edit::ConfigEditsBuilder;
#[cfg(target_os = "windows")]
use codex_core::features::Feature;
use codex_core::models_manager::manager::ModelsManager;
use codex_core::models_manager::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG;
use codex_core::models_manager::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG;
use codex_core::protocol::EventMsg;
use codex_core::protocol::FinalOutput;
use codex_core::protocol::ListSkillsResponseEvent;
use codex_core::protocol::Op;
use codex_core::protocol::SessionSource;
use codex_core::protocol::SkillErrorInfo;
use codex_core::protocol::TokenUsage;
use codex_core::terminal::terminal_info;
use codex_protocol::ConversationId;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelUpgrade;
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
use color_eyre::eyre::Result;
use color_eyre::eyre::WrapErr;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use crossterm::event::KeyEventKind;
use crossterm::event::MouseButton;
use ratatui::buffer::Buffer;
use ratatui::layout::Rect;
use ratatui::style::Stylize;
use ratatui::text::Line;
use ratatui::widgets::Clear;
use ratatui::widgets::Paragraph;
use ratatui::widgets::WidgetRef;
use ratatui::widgets::Wrap;
use std::collections::BTreeMap;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
use tokio::select;
use tokio::sync::mpsc::unbounded_channel;
#[cfg(not(debug_assertions))]
use crate::history_cell::UpdateAvailableHistoryCell;
#[derive(Debug, Clone)]
pub struct AppExitInfo {
pub token_usage: TokenUsage,
pub conversation_id: Option<ConversationId>,
pub update_action: Option<UpdateAction>,
/// ANSI-styled transcript lines to print after the TUI exits.
///
/// These lines are rendered against the same width as the final TUI
/// viewport and include styling (colors, bold, etc.) so that scrollback
/// preserves the visual structure of the on-screen transcript.
pub session_lines: Vec<String>,
}
impl From<AppExitInfo> for codex_tui::AppExitInfo {
fn from(info: AppExitInfo) -> Self {
codex_tui::AppExitInfo {
token_usage: info.token_usage,
conversation_id: info.conversation_id,
update_action: info.update_action.map(Into::into),
}
}
}
fn session_summary(
token_usage: TokenUsage,
conversation_id: Option<ConversationId>,
) -> Option<SessionSummary> {
if token_usage.is_zero() {
return None;
}
let usage_line = FinalOutput::from(token_usage).to_string();
let resume_command =
conversation_id.map(|conversation_id| format!("codex resume {conversation_id}"));
Some(SessionSummary {
usage_line,
resume_command,
})
}
fn errors_for_cwd(cwd: &Path, response: &ListSkillsResponseEvent) -> Vec<SkillErrorInfo> {
response
.skills
.iter()
.find(|entry| entry.cwd.as_path() == cwd)
.map(|entry| entry.errors.clone())
.unwrap_or_default()
}
fn emit_skill_load_warnings(app_event_tx: &AppEventSender, errors: &[SkillErrorInfo]) {
if errors.is_empty() {
return;
}
let error_count = errors.len();
app_event_tx.send(AppEvent::InsertHistoryCell(Box::new(
crate::history_cell::new_warning_event(format!(
"Skipped loading {error_count} skill(s) due to invalid SKILL.md files."
)),
)));
for error in errors {
let path = error.path.display();
let message = error.message.as_str();
app_event_tx.send(AppEvent::InsertHistoryCell(Box::new(
crate::history_cell::new_warning_event(format!("{path}: {message}")),
)));
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
struct SessionSummary {
usage_line: String,
resume_command: Option<String>,
}
fn should_show_model_migration_prompt(
current_model: &str,
target_model: &str,
seen_migrations: &BTreeMap<String, String>,
available_models: &[ModelPreset],
) -> bool {
if target_model == current_model {
return false;
}
if let Some(seen_target) = seen_migrations.get(current_model)
&& seen_target == target_model
{
return false;
}
if available_models
.iter()
.any(|preset| preset.model == current_model && preset.upgrade.is_some())
{
return true;
}
if available_models
.iter()
.any(|preset| preset.upgrade.as_ref().map(|u| u.id.as_str()) == Some(target_model))
{
return true;
}
false
}
fn migration_prompt_hidden(config: &Config, migration_config_key: &str) -> bool {
match migration_config_key {
HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG => config
.notices
.hide_gpt_5_1_codex_max_migration_prompt
.unwrap_or(false),
HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG => {
config.notices.hide_gpt5_1_migration_prompt.unwrap_or(false)
}
_ => false,
}
}
async fn handle_model_migration_prompt_if_needed(
tui: &mut tui::Tui,
config: &mut Config,
model: &str,
app_event_tx: &AppEventSender,
models_manager: Arc<ModelsManager>,
) -> Option<AppExitInfo> {
let available_models = models_manager.list_models(config).await;
let upgrade = available_models
.iter()
.find(|preset| preset.model == model)
.and_then(|preset| preset.upgrade.as_ref());
if let Some(ModelUpgrade {
id: target_model,
reasoning_effort_mapping,
migration_config_key,
..
}) = upgrade
{
if migration_prompt_hidden(config, migration_config_key.as_str()) {
return None;
}
let target_model = target_model.to_string();
if !should_show_model_migration_prompt(
model,
&target_model,
&config.notices.model_migrations,
&available_models,
) {
return None;
}
let current_preset = available_models.iter().find(|preset| preset.model == model);
let target_preset = available_models
.iter()
.find(|preset| preset.model == target_model);
let target_display_name = target_preset
.map(|preset| preset.display_name.clone())
.unwrap_or_else(|| target_model.clone());
let heading_label = if target_display_name == model {
target_model.clone()
} else {
target_display_name.clone()
};
let target_description = target_preset.and_then(|preset| {
if preset.description.is_empty() {
None
} else {
Some(preset.description.clone())
}
});
let can_opt_out = current_preset.is_some();
let prompt_copy = migration_copy_for_models(
model,
&target_model,
heading_label,
target_description,
can_opt_out,
);
match run_model_migration_prompt(tui, prompt_copy).await {
ModelMigrationOutcome::Accepted => {
app_event_tx.send(AppEvent::PersistModelMigrationPromptAcknowledged {
from_model: model.to_string(),
to_model: target_model.clone(),
});
config.model = Some(target_model.clone());
let mapped_effort = if let Some(reasoning_effort_mapping) = reasoning_effort_mapping
&& let Some(reasoning_effort) = config.model_reasoning_effort
{
reasoning_effort_mapping
.get(&reasoning_effort)
.cloned()
.or(config.model_reasoning_effort)
} else {
config.model_reasoning_effort
};
config.model_reasoning_effort = mapped_effort;
app_event_tx.send(AppEvent::UpdateModel(target_model.clone()));
app_event_tx.send(AppEvent::UpdateReasoningEffort(mapped_effort));
app_event_tx.send(AppEvent::PersistModelSelection {
model: target_model.clone(),
effort: mapped_effort,
});
}
ModelMigrationOutcome::Rejected => {
app_event_tx.send(AppEvent::PersistModelMigrationPromptAcknowledged {
from_model: model.to_string(),
to_model: target_model.clone(),
});
}
ModelMigrationOutcome::Exit => {
return Some(AppExitInfo {
token_usage: TokenUsage::default(),
conversation_id: None,
update_action: None,
session_lines: Vec::new(),
});
}
}
}
None
}
pub(crate) struct App {
pub(crate) server: Arc<ConversationManager>,
pub(crate) app_event_tx: AppEventSender,
pub(crate) chat_widget: ChatWidget,
pub(crate) auth_manager: Arc<AuthManager>,
/// Config is stored here so we can recreate ChatWidgets as needed.
pub(crate) config: Config,
pub(crate) current_model: String,
pub(crate) active_profile: Option<String>,
pub(crate) file_search: FileSearchManager,
pub(crate) transcript_cells: Vec<Arc<dyn HistoryCell>>,
transcript_view_cache: TranscriptViewCache,
#[allow(dead_code)]
transcript_scroll: TranscriptScroll,
transcript_selection: TranscriptSelection,
transcript_multi_click: TranscriptMultiClick,
transcript_view_top: usize,
transcript_total_lines: usize,
transcript_copy_ui: TranscriptCopyUi,
// Pager overlay state (Transcript or Static like Diff)
pub(crate) overlay: Option<Overlay>,
pub(crate) deferred_history_lines: Vec<Line<'static>>,
has_emitted_history_lines: bool,
pub(crate) enhanced_keys_supported: bool,
/// Controls the animation thread that sends CommitTick events.
pub(crate) commit_anim_running: Arc<AtomicBool>,
scroll_config: ScrollConfig,
scroll_state: MouseScrollState,
// Esc-backtracking state grouped
pub(crate) backtrack: crate::app_backtrack::BacktrackState,
pub(crate) feedback: codex_feedback::CodexFeedback,
/// Set when the user confirms an update; propagated on exit.
pub(crate) pending_update_action: Option<UpdateAction>,
/// Ignore the next ShutdownComplete event when we're intentionally
/// stopping a conversation (e.g., before starting a new one).
suppress_shutdown_complete: bool,
// One-shot suppression of the next world-writable scan after user confirmation.
skip_world_writable_scan_once: bool,
}
impl App {
async fn shutdown_current_conversation(&mut self) {
if let Some(conversation_id) = self.chat_widget.conversation_id() {
self.suppress_shutdown_complete = true;
self.chat_widget.submit_op(Op::Shutdown);
self.server.remove_conversation(&conversation_id).await;
}
}
#[allow(clippy::too_many_arguments)]
pub async fn run(
tui: &mut tui::Tui,
auth_manager: Arc<AuthManager>,
mut config: Config,
active_profile: Option<String>,
initial_prompt: Option<String>,
initial_images: Vec<PathBuf>,
resume_selection: ResumeSelection,
feedback: codex_feedback::CodexFeedback,
is_first_run: bool,
) -> Result<AppExitInfo> {
use tokio_stream::StreamExt;
let (app_event_tx, mut app_event_rx) = unbounded_channel();
let app_event_tx = AppEventSender::new(app_event_tx);
let conversation_manager = Arc::new(ConversationManager::new(
auth_manager.clone(),
SessionSource::Cli,
));
let mut model = conversation_manager
.get_models_manager()
.get_model(&config.model, &config)
.await;
let exit_info = handle_model_migration_prompt_if_needed(
tui,
&mut config,
model.as_str(),
&app_event_tx,
conversation_manager.get_models_manager(),
)
.await;
if let Some(exit_info) = exit_info {
return Ok(exit_info);
}
if let Some(updated_model) = config.model.clone() {
model = updated_model;
}
let enhanced_keys_supported = tui.enhanced_keys_supported();
let mut chat_widget = match resume_selection {
ResumeSelection::StartFresh | ResumeSelection::Exit => {
let init = crate::chatwidget::ChatWidgetInit {
config: config.clone(),
frame_requester: tui.frame_requester(),
app_event_tx: app_event_tx.clone(),
initial_prompt: initial_prompt.clone(),
initial_images: initial_images.clone(),
enhanced_keys_supported,
auth_manager: auth_manager.clone(),
models_manager: conversation_manager.get_models_manager(),
feedback: feedback.clone(),
is_first_run,
model: model.clone(),
};
ChatWidget::new(init, conversation_manager.clone())
}
ResumeSelection::Resume(path) => {
let resumed = conversation_manager
.resume_conversation_from_rollout(
config.clone(),
path.clone(),
auth_manager.clone(),
)
.await
.wrap_err_with(|| {
format!("Failed to resume session from {}", path.display())
})?;
let init = crate::chatwidget::ChatWidgetInit {
config: config.clone(),
frame_requester: tui.frame_requester(),
app_event_tx: app_event_tx.clone(),
initial_prompt: initial_prompt.clone(),
initial_images: initial_images.clone(),
enhanced_keys_supported,
auth_manager: auth_manager.clone(),
models_manager: conversation_manager.get_models_manager(),
feedback: feedback.clone(),
is_first_run,
model: model.clone(),
};
ChatWidget::new_from_existing(
init,
resumed.conversation,
resumed.session_configured,
)
}
};
chat_widget.maybe_prompt_windows_sandbox_enable();
let file_search = FileSearchManager::new(config.cwd.clone(), app_event_tx.clone());
#[cfg(not(debug_assertions))]
let upgrade_version = crate::updates::get_upgrade_version(&config);
let scroll_config = ScrollConfig::from_terminal(
&terminal_info(),
ScrollConfigOverrides {
events_per_tick: config.tui_scroll_events_per_tick,
wheel_lines_per_tick: config.tui_scroll_wheel_lines,
trackpad_lines_per_tick: config.tui_scroll_trackpad_lines,
trackpad_accel_events: config.tui_scroll_trackpad_accel_events,
trackpad_accel_max: config.tui_scroll_trackpad_accel_max,
mode: Some(config.tui_scroll_mode),
wheel_tick_detect_max_ms: config.tui_scroll_wheel_tick_detect_max_ms,
wheel_like_max_duration_ms: config.tui_scroll_wheel_like_max_duration_ms,
invert_direction: config.tui_scroll_invert,
},
);
let copy_selection_shortcut = crate::transcript_copy_ui::detect_copy_selection_shortcut();
let mut app = Self {
server: conversation_manager.clone(),
app_event_tx,
chat_widget,
auth_manager: auth_manager.clone(),
config,
current_model: model.clone(),
active_profile,
file_search,
enhanced_keys_supported,
transcript_cells: Vec::new(),
transcript_view_cache: TranscriptViewCache::new(),
transcript_scroll: TranscriptScroll::default(),
transcript_selection: TranscriptSelection::default(),
transcript_multi_click: TranscriptMultiClick::default(),
transcript_view_top: 0,
transcript_total_lines: 0,
transcript_copy_ui: TranscriptCopyUi::new_with_shortcut(copy_selection_shortcut),
overlay: None,
deferred_history_lines: Vec::new(),
has_emitted_history_lines: false,
commit_anim_running: Arc::new(AtomicBool::new(false)),
scroll_config,
scroll_state: MouseScrollState::default(),
backtrack: BacktrackState::default(),
feedback: feedback.clone(),
pending_update_action: None,
suppress_shutdown_complete: false,
skip_world_writable_scan_once: false,
};
// On startup, if Agent mode (workspace-write) or ReadOnly is active, warn about world-writable dirs on Windows.
#[cfg(target_os = "windows")]
{
let should_check = codex_core::get_platform_sandbox().is_some()
&& matches!(
app.config.sandbox_policy.get(),
codex_core::protocol::SandboxPolicy::WorkspaceWrite { .. }
| codex_core::protocol::SandboxPolicy::ReadOnly
)
&& !app
.config
.notices
.hide_world_writable_warning
.unwrap_or(false);
if should_check {
let cwd = app.config.cwd.clone();
let env_map: std::collections::HashMap<String, String> = std::env::vars().collect();
let tx = app.app_event_tx.clone();
let logs_base_dir = app.config.codex_home.clone();
let sandbox_policy = app.config.sandbox_policy.get().clone();
Self::spawn_world_writable_scan(cwd, env_map, logs_base_dir, sandbox_policy, tx);
}
}
#[cfg(not(debug_assertions))]
if let Some(latest_version) = upgrade_version {
app.handle_event(
tui,
AppEvent::InsertHistoryCell(Box::new(UpdateAvailableHistoryCell::new(
latest_version,
crate::update_action::get_update_action(),
))),
)
.await?;
}
let tui_events = tui.event_stream();
tokio::pin!(tui_events);
tui.frame_requester().schedule_frame();
while select! {
Some(event) = app_event_rx.recv() => {
app.handle_event(tui, event).await?
}
Some(event) = tui_events.next() => {
app.handle_tui_event(tui, event).await?
}
} {}
let width = tui.terminal.last_known_screen_size.width;
let session_lines = if width == 0 {
Vec::new()
} else {
let transcript =
crate::transcript_render::build_transcript_lines(&app.transcript_cells, width);
let (lines, line_meta) = (transcript.lines, transcript.meta);
let is_user_cell: Vec<bool> = app
.transcript_cells
.iter()
.map(|cell| cell.as_any().is::<UserHistoryCell>())
.collect();
crate::transcript_render::render_lines_to_ansi(&lines, &line_meta, &is_user_cell, width)
};
tui.terminal.clear()?;
Ok(AppExitInfo {
token_usage: app.token_usage(),
conversation_id: app.chat_widget.conversation_id(),
update_action: app.pending_update_action,
session_lines,
})
}
pub(crate) async fn handle_tui_event(
&mut self,
tui: &mut tui::Tui,
event: TuiEvent,
) -> Result<bool> {
if matches!(&event, TuiEvent::Draw) {
self.handle_scroll_tick(tui);
}
if self.overlay.is_some() {
let _ = self.handle_backtrack_overlay_event(tui, event).await?;
} else {
match event {
TuiEvent::Key(key_event) => {
self.handle_key_event(tui, key_event).await;
}
TuiEvent::Mouse(mouse_event) => {
self.handle_mouse_event(tui, mouse_event);
}
TuiEvent::Paste(pasted) => {
// Many terminals convert newlines to \r when pasting (e.g., iTerm2),
// but tui-textarea expects \n. Normalize CR to LF.
// [tui-textarea]: https://github.com/rhysd/tui-textarea/blob/4d18622eeac13b309e0ff6a55a46ac6706da68cf/src/textarea.rs#L782-L783
// [iTerm2]: https://github.com/gnachman/iTerm2/blob/5d0c0d9f68523cbd0494dad5422998964a2ecd8d/sources/iTermPasteHelper.m#L206-L216
let pasted = pasted.replace("\r", "\n");
self.chat_widget.handle_paste(pasted);
}
TuiEvent::Draw => {
self.chat_widget.maybe_post_pending_notification(tui);
if self
.chat_widget
.handle_paste_burst_tick(tui.frame_requester())
{
return Ok(true);
}
let cells = self.transcript_cells.clone();
tui.draw(tui.terminal.size()?.height, |frame| {
let chat_height = self.chat_widget.desired_height(frame.area().width);
let chat_top = self.render_transcript_cells(frame, &cells, chat_height);
let chat_area = Rect {
x: frame.area().x,
y: chat_top,
width: frame.area().width,
height: chat_height.min(
frame
.area()
.height
.saturating_sub(chat_top.saturating_sub(frame.area().y)),
),
};
self.chat_widget.render(chat_area, frame.buffer);
let chat_bottom = chat_area.y.saturating_add(chat_area.height);
if chat_bottom < frame.area().bottom() {
Clear.render_ref(
Rect {
x: frame.area().x,
y: chat_bottom,
width: frame.area().width,
height: frame.area().bottom().saturating_sub(chat_bottom),
},
frame.buffer,
);
}
if let Some((x, y)) = self.chat_widget.cursor_pos(chat_area) {
frame.set_cursor_position((x, y));
}
})?;
let transcript_scrolled =
!matches!(self.transcript_scroll, TranscriptScroll::ToBottom);
let selection_active = matches!(
(self.transcript_selection.anchor, self.transcript_selection.head),
(Some(a), Some(b)) if a != b
);
let scroll_position = if self.transcript_total_lines == 0 {
None
} else {
Some((
self.transcript_view_top.saturating_add(1),
self.transcript_total_lines,
))
};
self.chat_widget.set_transcript_ui_state(
transcript_scrolled,
selection_active,
scroll_position,
self.copy_selection_key(),
);
}
}
}
Ok(true)
}
pub(crate) fn render_transcript_cells(
&mut self,
frame: &mut Frame,
cells: &[Arc<dyn HistoryCell>],
chat_height: u16,
) -> u16 {
let area = frame.area();
if area.width == 0 || area.height == 0 {
self.transcript_scroll = TranscriptScroll::default();
self.transcript_view_top = 0;
self.transcript_total_lines = 0;
return area.bottom().saturating_sub(chat_height);
}
let chat_height = chat_height.min(area.height);
let max_transcript_height = area.height.saturating_sub(chat_height);
if max_transcript_height == 0 {
self.transcript_scroll = TranscriptScroll::default();
self.transcript_view_top = 0;
self.transcript_total_lines = 0;
return area.y;
}
let transcript_area = Rect {
x: area.x,
y: area.y,
width: area.width,
height: max_transcript_height,
};
self.transcript_view_cache
.ensure_wrapped(cells, transcript_area.width);
let total_lines = self.transcript_view_cache.lines().len();
if total_lines == 0 {
Clear.render_ref(transcript_area, frame.buffer);
self.transcript_scroll = TranscriptScroll::default();
self.transcript_view_top = 0;
self.transcript_total_lines = 0;
return area.y;
}
self.transcript_total_lines = total_lines;
let max_visible = std::cmp::min(max_transcript_height as usize, total_lines);
let max_start = total_lines.saturating_sub(max_visible);
let (scroll_state, top_offset) = {
let line_meta = self.transcript_view_cache.line_meta();
self.transcript_scroll.resolve_top(line_meta, max_start)
};
self.transcript_scroll = scroll_state;
self.transcript_view_top = top_offset;
let transcript_visible_height = max_visible as u16;
let chat_top = if total_lines <= max_transcript_height as usize {
let gap = if transcript_visible_height == 0 { 0 } else { 1 };
area.y
.saturating_add(transcript_visible_height)
.saturating_add(gap)
} else {
area.bottom().saturating_sub(chat_height)
};
let clear_height = chat_top.saturating_sub(area.y);
if clear_height > 0 {
Clear.render_ref(
Rect {
x: area.x,
y: area.y,
width: area.width,
height: clear_height,
},
frame.buffer,
);
}
let transcript_area = Rect {
x: area.x,
y: area.y,
width: area.width,
height: transcript_visible_height,
};
// Cache a few viewports worth of rasterized rows so redraws during streaming can cheaply
// copy already-rendered `Cell`s instead of re-running grapheme segmentation.
self.transcript_view_cache
.set_raster_capacity(max_visible.saturating_mul(4).max(256));
for (row_index, line_index) in (top_offset..total_lines).enumerate() {
if row_index >= max_visible {
break;
}
let y = transcript_area.y + row_index as u16;
let row_area = Rect {
x: transcript_area.x,
y,
width: transcript_area.width,
height: 1,
};
self.transcript_view_cache
.render_row_index_into(line_index, row_area, frame.buffer);
}
self.apply_transcript_selection(transcript_area, frame.buffer);
if let (Some(anchor), Some(head)) = (
self.transcript_selection.anchor,
self.transcript_selection.head,
) && anchor != head
{
self.transcript_copy_ui.render_copy_pill(
transcript_area,
frame.buffer,
(anchor.line_index, anchor.column),
(head.line_index, head.column),
self.transcript_view_top,
self.transcript_total_lines,
);
} else {
self.transcript_copy_ui.clear_affordance();
}
chat_top
}
/// Handle mouse interaction in the main transcript view.
///
/// - Mouse wheel movement scrolls the conversation history using stream-based
/// normalization (events-per-line factor, discrete vs. continuous streams),
/// independent of the terminal's own scrollback.
/// - Mouse drags adjust a text selection defined in terms of
/// flattened transcript lines and columns, so the selection is anchored
/// to the underlying content rather than absolute screen rows.
/// - When the user drags to extend a selection while the view is following the bottom
/// and a task is actively running (e.g., streaming a response), the scroll mode is
/// first converted into an anchored position so that ongoing updates no longer move
/// the viewport under the selection. A simple click without a drag does not change
/// scroll behavior.
/// - Mouse events outside the transcript area (e.g. over the composer/footer) must not
/// start or mutate transcript selection state. A left-click outside the transcript
/// clears any existing transcript selection so the user can dismiss the highlight.
fn handle_mouse_event(
&mut self,
tui: &mut tui::Tui,
mouse_event: crossterm::event::MouseEvent,
) {
use crossterm::event::MouseEventKind;
if self.overlay.is_some() {
return;
}
let size = tui.terminal.last_known_screen_size;
let width = size.width;
let height = size.height;
if width == 0 || height == 0 {
return;
}
let chat_height = self.chat_widget.desired_height(width);
if chat_height >= height {
return;
}
// Only handle events over the transcript area above the composer.
let transcript_height = height.saturating_sub(chat_height);
if transcript_height == 0 {
return;
}
let transcript_area = Rect {
x: 0,
y: 0,
width,
height: transcript_height,
};
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | true |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/exec_command.rs | codex-rs/tui2/src/exec_command.rs | use std::path::Path;
use std::path::PathBuf;
use codex_core::parse_command::extract_shell_command;
use dirs::home_dir;
use shlex::try_join;
pub(crate) fn escape_command(command: &[String]) -> String {
try_join(command.iter().map(String::as_str)).unwrap_or_else(|_| command.join(" "))
}
pub(crate) fn strip_bash_lc_and_escape(command: &[String]) -> String {
if let Some((_, script)) = extract_shell_command(command) {
return script.to_string();
}
escape_command(command)
}
/// If `path` is absolute and inside $HOME, return the part *after* the home
/// directory; otherwise, return the path as-is. Note if `path` is the homedir,
/// this will return and empty path.
pub(crate) fn relativize_to_home<P>(path: P) -> Option<PathBuf>
where
P: AsRef<Path>,
{
let path = path.as_ref();
if !path.is_absolute() {
// If the path is not absolute, we can’t do anything with it.
return None;
}
let home_dir = home_dir()?;
let rel = path.strip_prefix(&home_dir).ok()?;
Some(rel.to_path_buf())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_escape_command() {
let args = vec!["foo".into(), "bar baz".into(), "weird&stuff".into()];
let cmdline = escape_command(&args);
assert_eq!(cmdline, "foo 'bar baz' 'weird&stuff'");
}
#[test]
fn test_strip_bash_lc_and_escape() {
// Test bash
let args = vec!["bash".into(), "-lc".into(), "echo hello".into()];
let cmdline = strip_bash_lc_and_escape(&args);
assert_eq!(cmdline, "echo hello");
// Test zsh
let args = vec!["zsh".into(), "-lc".into(), "echo hello".into()];
let cmdline = strip_bash_lc_and_escape(&args);
assert_eq!(cmdline, "echo hello");
// Test absolute path to zsh
let args = vec!["/usr/bin/zsh".into(), "-lc".into(), "echo hello".into()];
let cmdline = strip_bash_lc_and_escape(&args);
assert_eq!(cmdline, "echo hello");
// Test absolute path to bash
let args = vec!["/bin/bash".into(), "-lc".into(), "echo hello".into()];
let cmdline = strip_bash_lc_and_escape(&args);
assert_eq!(cmdline, "echo hello");
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/chatwidget.rs | codex-rs/tui2/src/chatwidget.rs | use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use codex_app_server_protocol::AuthMode;
use codex_backend_client::Client as BackendClient;
use codex_core::config::Config;
use codex_core::config::ConstraintResult;
use codex_core::config::types::Notifications;
use codex_core::git_info::current_branch_name;
use codex_core::git_info::local_git_branches;
use codex_core::models_manager::manager::ModelsManager;
use codex_core::project_doc::DEFAULT_PROJECT_DOC_FILENAME;
use codex_core::protocol::AgentMessageDeltaEvent;
use codex_core::protocol::AgentMessageEvent;
use codex_core::protocol::AgentReasoningDeltaEvent;
use codex_core::protocol::AgentReasoningEvent;
use codex_core::protocol::AgentReasoningRawContentDeltaEvent;
use codex_core::protocol::AgentReasoningRawContentEvent;
use codex_core::protocol::ApplyPatchApprovalRequestEvent;
use codex_core::protocol::BackgroundEventEvent;
use codex_core::protocol::CreditsSnapshot;
use codex_core::protocol::DeprecationNoticeEvent;
use codex_core::protocol::ErrorEvent;
use codex_core::protocol::Event;
use codex_core::protocol::EventMsg;
use codex_core::protocol::ExecApprovalRequestEvent;
use codex_core::protocol::ExecCommandBeginEvent;
use codex_core::protocol::ExecCommandEndEvent;
use codex_core::protocol::ExecCommandSource;
use codex_core::protocol::ExitedReviewModeEvent;
use codex_core::protocol::ListCustomPromptsResponseEvent;
use codex_core::protocol::ListSkillsResponseEvent;
use codex_core::protocol::McpListToolsResponseEvent;
use codex_core::protocol::McpStartupCompleteEvent;
use codex_core::protocol::McpStartupStatus;
use codex_core::protocol::McpStartupUpdateEvent;
use codex_core::protocol::McpToolCallBeginEvent;
use codex_core::protocol::McpToolCallEndEvent;
use codex_core::protocol::Op;
use codex_core::protocol::PatchApplyBeginEvent;
use codex_core::protocol::RateLimitSnapshot;
use codex_core::protocol::ReviewRequest;
use codex_core::protocol::ReviewTarget;
use codex_core::protocol::SkillsListEntry;
use codex_core::protocol::StreamErrorEvent;
use codex_core::protocol::TaskCompleteEvent;
use codex_core::protocol::TerminalInteractionEvent;
use codex_core::protocol::TokenUsage;
use codex_core::protocol::TokenUsageInfo;
use codex_core::protocol::TurnAbortReason;
use codex_core::protocol::TurnDiffEvent;
use codex_core::protocol::UndoCompletedEvent;
use codex_core::protocol::UndoStartedEvent;
use codex_core::protocol::UserMessageEvent;
use codex_core::protocol::ViewImageToolCallEvent;
use codex_core::protocol::WarningEvent;
use codex_core::protocol::WebSearchBeginEvent;
use codex_core::protocol::WebSearchEndEvent;
use codex_core::skills::model::SkillMetadata;
use codex_protocol::ConversationId;
use codex_protocol::account::PlanType;
use codex_protocol::approvals::ElicitationRequestEvent;
use codex_protocol::parse_command::ParsedCommand;
use codex_protocol::user_input::UserInput;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use crossterm::event::KeyEventKind;
use crossterm::event::KeyModifiers;
use rand::Rng;
use ratatui::buffer::Buffer;
use ratatui::layout::Rect;
use ratatui::style::Color;
use ratatui::style::Stylize;
use ratatui::text::Line;
use ratatui::widgets::Paragraph;
use ratatui::widgets::Wrap;
use tokio::sync::mpsc::UnboundedSender;
use tokio::task::JoinHandle;
use tracing::debug;
use crate::app_event::AppEvent;
use crate::app_event_sender::AppEventSender;
use crate::bottom_pane::ApprovalRequest;
use crate::bottom_pane::BottomPane;
use crate::bottom_pane::BottomPaneParams;
use crate::bottom_pane::CancellationEvent;
use crate::bottom_pane::InputResult;
use crate::bottom_pane::SelectionAction;
use crate::bottom_pane::SelectionItem;
use crate::bottom_pane::SelectionViewParams;
use crate::bottom_pane::custom_prompt_view::CustomPromptView;
use crate::bottom_pane::popup_consts::standard_popup_hint_line;
use crate::clipboard_paste::paste_image_to_temp_png;
use crate::diff_render::display_path_for;
use crate::exec_cell::CommandOutput;
use crate::exec_cell::ExecCell;
use crate::exec_cell::new_active_exec_command;
use crate::get_git_diff::get_git_diff;
use crate::history_cell;
use crate::history_cell::AgentMessageCell;
use crate::history_cell::HistoryCell;
use crate::history_cell::McpToolCallCell;
use crate::history_cell::PlainHistoryCell;
use crate::markdown::append_markdown;
use crate::render::Insets;
use crate::render::renderable::ColumnRenderable;
use crate::render::renderable::FlexRenderable;
use crate::render::renderable::Renderable;
use crate::render::renderable::RenderableExt;
use crate::render::renderable::RenderableItem;
use crate::slash_command::SlashCommand;
use crate::status::RateLimitSnapshotDisplay;
use crate::text_formatting::truncate_text;
use crate::tui::FrameRequester;
mod interrupts;
use self::interrupts::InterruptManager;
mod agent;
use self::agent::spawn_agent;
use self::agent::spawn_agent_from_existing;
mod session_header;
use self::session_header::SessionHeader;
use crate::streaming::controller::StreamController;
use std::path::Path;
use chrono::Local;
use codex_common::approval_presets::ApprovalPreset;
use codex_common::approval_presets::builtin_approval_presets;
use codex_core::AuthManager;
use codex_core::CodexAuth;
use codex_core::ConversationManager;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::SandboxPolicy;
use codex_file_search::FileMatch;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::plan_tool::UpdatePlanArgs;
use strum::IntoEnumIterator;
const USER_SHELL_COMMAND_HELP_TITLE: &str = "Prefix a command with ! to run it locally";
const USER_SHELL_COMMAND_HELP_HINT: &str = "Example: !ls";
// Track information about an in-flight exec command.
struct RunningCommand {
command: Vec<String>,
parsed_cmd: Vec<ParsedCommand>,
source: ExecCommandSource,
}
struct UnifiedExecWaitState {
command_display: String,
}
impl UnifiedExecWaitState {
fn new(command_display: String) -> Self {
Self { command_display }
}
fn is_duplicate(&self, command_display: &str) -> bool {
self.command_display == command_display
}
}
const RATE_LIMIT_WARNING_THRESHOLDS: [f64; 3] = [75.0, 90.0, 95.0];
const NUDGE_MODEL_SLUG: &str = "gpt-5.1-codex-mini";
const RATE_LIMIT_SWITCH_PROMPT_THRESHOLD: f64 = 90.0;
#[derive(Default)]
struct RateLimitWarningState {
secondary_index: usize,
primary_index: usize,
}
impl RateLimitWarningState {
fn take_warnings(
&mut self,
secondary_used_percent: Option<f64>,
secondary_window_minutes: Option<i64>,
primary_used_percent: Option<f64>,
primary_window_minutes: Option<i64>,
) -> Vec<String> {
let reached_secondary_cap =
matches!(secondary_used_percent, Some(percent) if percent == 100.0);
let reached_primary_cap = matches!(primary_used_percent, Some(percent) if percent == 100.0);
if reached_secondary_cap || reached_primary_cap {
return Vec::new();
}
let mut warnings = Vec::new();
if let Some(secondary_used_percent) = secondary_used_percent {
let mut highest_secondary: Option<f64> = None;
while self.secondary_index < RATE_LIMIT_WARNING_THRESHOLDS.len()
&& secondary_used_percent >= RATE_LIMIT_WARNING_THRESHOLDS[self.secondary_index]
{
highest_secondary = Some(RATE_LIMIT_WARNING_THRESHOLDS[self.secondary_index]);
self.secondary_index += 1;
}
if let Some(threshold) = highest_secondary {
let limit_label = secondary_window_minutes
.map(get_limits_duration)
.unwrap_or_else(|| "weekly".to_string());
let remaining_percent = 100.0 - threshold;
warnings.push(format!(
"Heads up, you have less than {remaining_percent:.0}% of your {limit_label} limit left. Run /status for a breakdown."
));
}
}
if let Some(primary_used_percent) = primary_used_percent {
let mut highest_primary: Option<f64> = None;
while self.primary_index < RATE_LIMIT_WARNING_THRESHOLDS.len()
&& primary_used_percent >= RATE_LIMIT_WARNING_THRESHOLDS[self.primary_index]
{
highest_primary = Some(RATE_LIMIT_WARNING_THRESHOLDS[self.primary_index]);
self.primary_index += 1;
}
if let Some(threshold) = highest_primary {
let limit_label = primary_window_minutes
.map(get_limits_duration)
.unwrap_or_else(|| "5h".to_string());
let remaining_percent = 100.0 - threshold;
warnings.push(format!(
"Heads up, you have less than {remaining_percent:.0}% of your {limit_label} limit left. Run /status for a breakdown."
));
}
}
warnings
}
}
pub(crate) fn get_limits_duration(windows_minutes: i64) -> String {
const MINUTES_PER_HOUR: i64 = 60;
const MINUTES_PER_DAY: i64 = 24 * MINUTES_PER_HOUR;
const MINUTES_PER_WEEK: i64 = 7 * MINUTES_PER_DAY;
const MINUTES_PER_MONTH: i64 = 30 * MINUTES_PER_DAY;
const ROUNDING_BIAS_MINUTES: i64 = 3;
let windows_minutes = windows_minutes.max(0);
if windows_minutes <= MINUTES_PER_DAY.saturating_add(ROUNDING_BIAS_MINUTES) {
let adjusted = windows_minutes.saturating_add(ROUNDING_BIAS_MINUTES);
let hours = std::cmp::max(1, adjusted / MINUTES_PER_HOUR);
format!("{hours}h")
} else if windows_minutes <= MINUTES_PER_WEEK.saturating_add(ROUNDING_BIAS_MINUTES) {
"weekly".to_string()
} else if windows_minutes <= MINUTES_PER_MONTH.saturating_add(ROUNDING_BIAS_MINUTES) {
"monthly".to_string()
} else {
"annual".to_string()
}
}
/// Common initialization parameters shared by all `ChatWidget` constructors.
pub(crate) struct ChatWidgetInit {
pub(crate) config: Config,
pub(crate) frame_requester: FrameRequester,
pub(crate) app_event_tx: AppEventSender,
pub(crate) initial_prompt: Option<String>,
pub(crate) initial_images: Vec<PathBuf>,
pub(crate) enhanced_keys_supported: bool,
pub(crate) auth_manager: Arc<AuthManager>,
pub(crate) models_manager: Arc<ModelsManager>,
pub(crate) feedback: codex_feedback::CodexFeedback,
pub(crate) is_first_run: bool,
pub(crate) model: String,
}
#[derive(Default)]
enum RateLimitSwitchPromptState {
#[default]
Idle,
Pending,
Shown,
}
pub(crate) struct ChatWidget {
app_event_tx: AppEventSender,
codex_op_tx: UnboundedSender<Op>,
bottom_pane: BottomPane,
active_cell: Option<Box<dyn HistoryCell>>,
config: Config,
model: String,
auth_manager: Arc<AuthManager>,
models_manager: Arc<ModelsManager>,
session_header: SessionHeader,
initial_user_message: Option<UserMessage>,
token_info: Option<TokenUsageInfo>,
rate_limit_snapshot: Option<RateLimitSnapshotDisplay>,
plan_type: Option<PlanType>,
rate_limit_warnings: RateLimitWarningState,
rate_limit_switch_prompt: RateLimitSwitchPromptState,
rate_limit_poller: Option<JoinHandle<()>>,
// Stream lifecycle controller
stream_controller: Option<StreamController>,
running_commands: HashMap<String, RunningCommand>,
suppressed_exec_calls: HashSet<String>,
last_unified_wait: Option<UnifiedExecWaitState>,
task_complete_pending: bool,
mcp_startup_status: Option<HashMap<String, McpStartupStatus>>,
// Queue of interruptive UI events deferred during an active write cycle
interrupts: InterruptManager,
// Accumulates the current reasoning block text to extract a header
reasoning_buffer: String,
// Accumulates full reasoning content for transcript-only recording
full_reasoning_buffer: String,
// Current status header shown in the status indicator.
current_status_header: String,
// Previous status header to restore after a transient stream retry.
retry_status_header: Option<String>,
conversation_id: Option<ConversationId>,
frame_requester: FrameRequester,
// Whether to include the initial welcome banner on session configured
show_welcome_banner: bool,
// When resuming an existing session (selected via resume picker), avoid an
// immediate redraw on SessionConfigured to prevent a gratuitous UI flicker.
suppress_session_configured_redraw: bool,
// User messages queued while a turn is in progress
queued_user_messages: VecDeque<UserMessage>,
// Pending notification to show when unfocused on next Draw
pending_notification: Option<Notification>,
// Simple review mode flag; used to adjust layout and banners.
is_review_mode: bool,
// Snapshot of token usage to restore after review mode exits.
pre_review_token_info: Option<Option<TokenUsageInfo>>,
// Whether to add a final message separator after the last message
needs_final_message_separator: bool,
last_rendered_width: std::cell::Cell<Option<usize>>,
// Feedback sink for /feedback
feedback: codex_feedback::CodexFeedback,
// Current session rollout path (if known)
current_rollout_path: Option<PathBuf>,
}
struct UserMessage {
text: String,
image_paths: Vec<PathBuf>,
}
impl From<String> for UserMessage {
fn from(text: String) -> Self {
Self {
text,
image_paths: Vec::new(),
}
}
}
impl From<&str> for UserMessage {
fn from(text: &str) -> Self {
Self {
text: text.to_string(),
image_paths: Vec::new(),
}
}
}
fn create_initial_user_message(text: String, image_paths: Vec<PathBuf>) -> Option<UserMessage> {
if text.is_empty() && image_paths.is_empty() {
None
} else {
Some(UserMessage { text, image_paths })
}
}
impl ChatWidget {
fn flush_answer_stream_with_separator(&mut self) {
if let Some(mut controller) = self.stream_controller.take()
&& let Some(cell) = controller.finalize()
{
self.add_boxed_history(cell);
}
}
/// Update the status indicator header and details.
///
/// Passing `None` clears any existing details.
fn set_status(&mut self, header: String, details: Option<String>) {
self.current_status_header = header.clone();
self.bottom_pane.update_status(header, details);
}
/// Convenience wrapper around [`Self::set_status`];
/// updates the status indicator header and clears any existing details.
fn set_status_header(&mut self, header: String) {
self.set_status(header, None);
}
fn restore_retry_status_header_if_present(&mut self) {
if let Some(header) = self.retry_status_header.take() {
self.set_status_header(header);
}
}
// --- Small event handlers ---
fn on_session_configured(&mut self, event: codex_core::protocol::SessionConfiguredEvent) {
self.bottom_pane
.set_history_metadata(event.history_log_id, event.history_entry_count);
self.set_skills(None);
self.conversation_id = Some(event.session_id);
self.current_rollout_path = Some(event.rollout_path.clone());
let initial_messages = event.initial_messages.clone();
let model_for_header = event.model.clone();
self.session_header.set_model(&model_for_header);
self.add_to_history(history_cell::new_session_info(
&self.config,
&model_for_header,
event,
self.show_welcome_banner,
));
if let Some(messages) = initial_messages {
self.replay_initial_messages(messages);
}
// Ask codex-core to enumerate custom prompts for this session.
self.submit_op(Op::ListCustomPrompts);
self.submit_op(Op::ListSkills {
cwds: Vec::new(),
force_reload: false,
});
if let Some(user_message) = self.initial_user_message.take() {
self.submit_user_message(user_message);
}
if !self.suppress_session_configured_redraw {
self.request_redraw();
}
}
fn set_skills(&mut self, skills: Option<Vec<SkillMetadata>>) {
self.bottom_pane.set_skills(skills);
}
fn set_skills_from_response(&mut self, response: &ListSkillsResponseEvent) {
let skills = skills_for_cwd(&self.config.cwd, &response.skills);
self.set_skills(Some(skills));
}
pub(crate) fn open_feedback_note(
&mut self,
category: crate::app_event::FeedbackCategory,
include_logs: bool,
) {
// Build a fresh snapshot at the time of opening the note overlay.
let snapshot = self.feedback.snapshot(self.conversation_id);
let rollout = if include_logs {
self.current_rollout_path.clone()
} else {
None
};
let view = crate::bottom_pane::FeedbackNoteView::new(
category,
snapshot,
rollout,
self.app_event_tx.clone(),
include_logs,
);
self.bottom_pane.show_view(Box::new(view));
self.request_redraw();
}
pub(crate) fn open_feedback_consent(&mut self, category: crate::app_event::FeedbackCategory) {
let params = crate::bottom_pane::feedback_upload_consent_params(
self.app_event_tx.clone(),
category,
self.current_rollout_path.clone(),
);
self.bottom_pane.show_selection_view(params);
self.request_redraw();
}
fn on_agent_message(&mut self, message: String) {
// If we have a stream_controller, then the final agent message is redundant and will be a
// duplicate of what has already been streamed.
if self.stream_controller.is_none() {
self.handle_streaming_delta(message);
}
self.flush_answer_stream_with_separator();
self.handle_stream_finished();
self.request_redraw();
}
fn on_agent_message_delta(&mut self, delta: String) {
self.handle_streaming_delta(delta);
}
fn on_agent_reasoning_delta(&mut self, delta: String) {
// For reasoning deltas, do not stream to history. Accumulate the
// current reasoning block and extract the first bold element
// (between **/**) as the chunk header. Show this header as status.
self.reasoning_buffer.push_str(&delta);
if let Some(header) = extract_first_bold(&self.reasoning_buffer) {
// Update the shimmer header to the extracted reasoning chunk header.
self.set_status_header(header);
} else {
// Fallback while we don't yet have a bold header: leave existing header as-is.
}
self.request_redraw();
}
fn on_agent_reasoning_final(&mut self) {
// At the end of a reasoning block, record transcript-only content.
self.full_reasoning_buffer.push_str(&self.reasoning_buffer);
if !self.full_reasoning_buffer.is_empty() {
let cell =
history_cell::new_reasoning_summary_block(self.full_reasoning_buffer.clone());
self.add_boxed_history(cell);
}
self.reasoning_buffer.clear();
self.full_reasoning_buffer.clear();
self.request_redraw();
}
fn on_reasoning_section_break(&mut self) {
// Start a new reasoning block for header extraction and accumulate transcript.
self.full_reasoning_buffer.push_str(&self.reasoning_buffer);
self.full_reasoning_buffer.push_str("\n\n");
self.reasoning_buffer.clear();
}
// Raw reasoning uses the same flow as summarized reasoning
fn on_task_started(&mut self) {
self.bottom_pane.clear_ctrl_c_quit_hint();
self.bottom_pane.set_task_running(true);
self.retry_status_header = None;
self.bottom_pane.set_interrupt_hint_visible(true);
self.set_status_header(String::from("Working"));
self.full_reasoning_buffer.clear();
self.reasoning_buffer.clear();
self.request_redraw();
}
fn on_task_complete(&mut self, last_agent_message: Option<String>) {
// If a stream is currently active, finalize it.
self.flush_answer_stream_with_separator();
// Mark task stopped and request redraw now that all content is in history.
self.bottom_pane.set_task_running(false);
self.running_commands.clear();
self.suppressed_exec_calls.clear();
self.last_unified_wait = None;
self.request_redraw();
// If there is a queued user message, send exactly one now to begin the next turn.
self.maybe_send_next_queued_input();
// Emit a notification when the turn completes (suppressed if focused).
self.notify(Notification::AgentTurnComplete {
response: last_agent_message.unwrap_or_default(),
});
self.maybe_show_pending_rate_limit_prompt();
}
pub(crate) fn set_token_info(&mut self, info: Option<TokenUsageInfo>) {
match info {
Some(info) => self.apply_token_info(info),
None => {
self.bottom_pane.set_context_window(None, None);
self.token_info = None;
}
}
}
fn apply_token_info(&mut self, info: TokenUsageInfo) {
let percent = self.context_remaining_percent(&info);
let used_tokens = self.context_used_tokens(&info, percent.is_some());
self.bottom_pane.set_context_window(percent, used_tokens);
self.token_info = Some(info);
}
fn context_remaining_percent(&self, info: &TokenUsageInfo) -> Option<i64> {
info.model_context_window.map(|window| {
info.last_token_usage
.percent_of_context_window_remaining(window)
})
}
fn context_used_tokens(&self, info: &TokenUsageInfo, percent_known: bool) -> Option<i64> {
if percent_known {
return None;
}
Some(info.total_token_usage.tokens_in_context_window())
}
fn restore_pre_review_token_info(&mut self) {
if let Some(saved) = self.pre_review_token_info.take() {
match saved {
Some(info) => self.apply_token_info(info),
None => {
self.bottom_pane.set_context_window(None, None);
self.token_info = None;
}
}
}
}
pub(crate) fn on_rate_limit_snapshot(&mut self, snapshot: Option<RateLimitSnapshot>) {
if let Some(mut snapshot) = snapshot {
if snapshot.credits.is_none() {
snapshot.credits = self
.rate_limit_snapshot
.as_ref()
.and_then(|display| display.credits.as_ref())
.map(|credits| CreditsSnapshot {
has_credits: credits.has_credits,
unlimited: credits.unlimited,
balance: credits.balance.clone(),
});
}
self.plan_type = snapshot.plan_type.or(self.plan_type);
let warnings = self.rate_limit_warnings.take_warnings(
snapshot
.secondary
.as_ref()
.map(|window| window.used_percent),
snapshot
.secondary
.as_ref()
.and_then(|window| window.window_minutes),
snapshot.primary.as_ref().map(|window| window.used_percent),
snapshot
.primary
.as_ref()
.and_then(|window| window.window_minutes),
);
let high_usage = snapshot
.secondary
.as_ref()
.map(|w| w.used_percent >= RATE_LIMIT_SWITCH_PROMPT_THRESHOLD)
.unwrap_or(false)
|| snapshot
.primary
.as_ref()
.map(|w| w.used_percent >= RATE_LIMIT_SWITCH_PROMPT_THRESHOLD)
.unwrap_or(false);
if high_usage
&& !self.rate_limit_switch_prompt_hidden()
&& self.model != NUDGE_MODEL_SLUG
&& !matches!(
self.rate_limit_switch_prompt,
RateLimitSwitchPromptState::Shown
)
{
self.rate_limit_switch_prompt = RateLimitSwitchPromptState::Pending;
}
let display = crate::status::rate_limit_snapshot_display(&snapshot, Local::now());
self.rate_limit_snapshot = Some(display);
if !warnings.is_empty() {
for warning in warnings {
self.add_to_history(history_cell::new_warning_event(warning));
}
self.request_redraw();
}
} else {
self.rate_limit_snapshot = None;
}
}
/// Finalize any active exec as failed and stop/clear running UI state.
fn finalize_turn(&mut self) {
// Ensure any spinner is replaced by a red ✗ and flushed into history.
self.finalize_active_cell_as_failed();
// Reset running state and clear streaming buffers.
self.bottom_pane.set_task_running(false);
self.running_commands.clear();
self.suppressed_exec_calls.clear();
self.last_unified_wait = None;
self.stream_controller = None;
self.maybe_show_pending_rate_limit_prompt();
}
fn on_error(&mut self, message: String) {
self.finalize_turn();
self.add_to_history(history_cell::new_error_event(message));
self.request_redraw();
// After an error ends the turn, try sending the next queued input.
self.maybe_send_next_queued_input();
}
fn on_warning(&mut self, message: impl Into<String>) {
self.add_to_history(history_cell::new_warning_event(message.into()));
self.request_redraw();
}
fn on_mcp_startup_update(&mut self, ev: McpStartupUpdateEvent) {
let mut status = self.mcp_startup_status.take().unwrap_or_default();
if let McpStartupStatus::Failed { error } = &ev.status {
self.on_warning(error);
}
status.insert(ev.server, ev.status);
self.mcp_startup_status = Some(status);
self.bottom_pane.set_task_running(true);
if let Some(current) = &self.mcp_startup_status {
let total = current.len();
let mut starting: Vec<_> = current
.iter()
.filter_map(|(name, state)| {
if matches!(state, McpStartupStatus::Starting) {
Some(name)
} else {
None
}
})
.collect();
starting.sort();
if let Some(first) = starting.first() {
let completed = total.saturating_sub(starting.len());
let max_to_show = 3;
let mut to_show: Vec<String> = starting
.iter()
.take(max_to_show)
.map(ToString::to_string)
.collect();
if starting.len() > max_to_show {
to_show.push("…".to_string());
}
let header = if total > 1 {
format!(
"Starting MCP servers ({completed}/{total}): {}",
to_show.join(", ")
)
} else {
format!("Booting MCP server: {first}")
};
self.set_status_header(header);
}
}
self.request_redraw();
}
fn on_mcp_startup_complete(&mut self, ev: McpStartupCompleteEvent) {
let mut parts = Vec::new();
if !ev.failed.is_empty() {
let failed_servers: Vec<_> = ev.failed.iter().map(|f| f.server.clone()).collect();
parts.push(format!("failed: {}", failed_servers.join(", ")));
}
if !ev.cancelled.is_empty() {
self.on_warning(format!(
"MCP startup interrupted. The following servers were not initialized: {}",
ev.cancelled.join(", ")
));
}
if !parts.is_empty() {
self.on_warning(format!("MCP startup incomplete ({})", parts.join("; ")));
}
self.mcp_startup_status = None;
self.bottom_pane.set_task_running(false);
self.maybe_send_next_queued_input();
self.request_redraw();
}
/// Handle a turn aborted due to user interrupt (Esc).
/// When there are queued user messages, restore them into the composer
/// separated by newlines rather than auto‑submitting the next one.
fn on_interrupted_turn(&mut self, reason: TurnAbortReason) {
// Finalize, log a gentle prompt, and clear running state.
self.finalize_turn();
if reason != TurnAbortReason::ReviewEnded {
self.add_to_history(history_cell::new_error_event(
"Conversation interrupted - tell the model what to do differently. Something went wrong? Hit `/feedback` to report the issue.".to_owned(),
));
}
// If any messages were queued during the task, restore them into the composer.
if !self.queued_user_messages.is_empty() {
let queued_text = self
.queued_user_messages
.iter()
.map(|m| m.text.clone())
.collect::<Vec<_>>()
.join("\n");
let existing_text = self.bottom_pane.composer_text();
let combined = if existing_text.is_empty() {
queued_text
} else if queued_text.is_empty() {
existing_text
} else {
format!("{queued_text}\n{existing_text}")
};
self.bottom_pane.set_composer_text(combined);
// Clear the queue and update the status indicator list.
self.queued_user_messages.clear();
self.refresh_queued_user_messages();
}
self.request_redraw();
}
fn on_plan_update(&mut self, update: UpdatePlanArgs) {
self.add_to_history(history_cell::new_plan_update(update));
}
fn on_exec_approval_request(&mut self, id: String, ev: ExecApprovalRequestEvent) {
let id2 = id.clone();
let ev2 = ev.clone();
self.defer_or_handle(
|q| q.push_exec_approval(id, ev),
|s| s.handle_exec_approval_now(id2, ev2),
);
}
fn on_apply_patch_approval_request(&mut self, id: String, ev: ApplyPatchApprovalRequestEvent) {
let id2 = id.clone();
let ev2 = ev.clone();
self.defer_or_handle(
|q| q.push_apply_patch_approval(id, ev),
|s| s.handle_apply_patch_approval_now(id2, ev2),
);
}
fn on_elicitation_request(&mut self, ev: ElicitationRequestEvent) {
let ev2 = ev.clone();
self.defer_or_handle(
|q| q.push_elicitation(ev),
|s| s.handle_elicitation_request_now(ev2),
);
}
fn on_exec_command_begin(&mut self, ev: ExecCommandBeginEvent) {
self.flush_answer_stream_with_separator();
let ev2 = ev.clone();
self.defer_or_handle(|q| q.push_exec_begin(ev), |s| s.handle_exec_begin_now(ev2));
}
fn on_exec_command_output_delta(
&mut self,
_ev: codex_core::protocol::ExecCommandOutputDeltaEvent,
) {
// TODO: Handle streaming exec output if/when implemented
}
fn on_terminal_interaction(&mut self, _ev: TerminalInteractionEvent) {
// TODO: Handle once design is ready
}
fn on_patch_apply_begin(&mut self, event: PatchApplyBeginEvent) {
self.add_to_history(history_cell::new_patch_event(
event.changes,
&self.config.cwd,
));
}
fn on_view_image_tool_call(&mut self, event: ViewImageToolCallEvent) {
self.flush_answer_stream_with_separator();
self.add_to_history(history_cell::new_view_image_tool_call(
event.path,
&self.config.cwd,
));
self.request_redraw();
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | true |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/lib.rs | codex-rs/tui2/src/lib.rs | // Forbid accidental stdout/stderr writes in the *library* portion of the TUI.
// The standalone `codex-tui` binary prints a short help message before the
// alternate‑screen mode starts; that file opts‑out locally via `allow`.
#![deny(clippy::print_stdout, clippy::print_stderr)]
#![deny(clippy::disallowed_methods)]
use additional_dirs::add_dir_warning_message;
use app::App;
pub use app::AppExitInfo;
use codex_app_server_protocol::AuthMode;
use codex_common::oss::ensure_oss_provider_ready;
use codex_common::oss::get_default_model_for_oss_provider;
use codex_core::AuthManager;
use codex_core::CodexAuth;
use codex_core::INTERACTIVE_SESSION_SOURCES;
use codex_core::RolloutRecorder;
use codex_core::auth::enforce_login_restrictions;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config::find_codex_home;
use codex_core::config::load_config_as_toml_with_cli_overrides;
use codex_core::config::resolve_oss_provider;
use codex_core::find_conversation_path_by_id_str;
use codex_core::get_platform_sandbox;
use codex_core::protocol::AskForApproval;
use codex_protocol::config_types::SandboxMode;
use codex_utils_absolute_path::AbsolutePathBuf;
use std::fs::OpenOptions;
use std::path::PathBuf;
use tracing::error;
use tracing_appender::non_blocking;
use tracing_subscriber::EnvFilter;
use tracing_subscriber::prelude::*;
mod additional_dirs;
mod app;
mod app_backtrack;
mod app_event;
mod app_event_sender;
mod ascii_animation;
mod bottom_pane;
mod chatwidget;
mod cli;
mod clipboard_copy;
mod clipboard_paste;
mod color;
pub mod custom_terminal;
mod diff_render;
mod exec_cell;
mod exec_command;
mod file_search;
mod frames;
mod get_git_diff;
mod history_cell;
pub mod insert_history;
mod key_hint;
pub mod live_wrap;
mod markdown;
mod markdown_render;
mod markdown_stream;
mod model_migration;
mod notifications;
pub mod onboarding;
mod oss_selection;
mod pager_overlay;
pub mod public_widgets;
mod render;
mod resume_picker;
mod selection_list;
mod session_log;
mod shimmer;
mod slash_command;
mod status;
mod status_indicator_widget;
mod streaming;
mod style;
mod terminal_palette;
mod text_formatting;
mod tooltips;
mod transcript_copy;
mod transcript_copy_ui;
mod transcript_multi_click;
mod transcript_render;
mod transcript_selection;
mod transcript_view_cache;
mod tui;
mod ui_consts;
pub mod update_action;
mod update_prompt;
mod updates;
mod version;
mod wrapping;
#[cfg(test)]
pub mod test_backend;
use crate::onboarding::TrustDirectorySelection;
use crate::onboarding::onboarding_screen::OnboardingScreenArgs;
use crate::onboarding::onboarding_screen::run_onboarding_app;
use crate::tui::Tui;
pub use cli::Cli;
pub use markdown_render::render_markdown_text;
pub use public_widgets::composer_input::ComposerAction;
pub use public_widgets::composer_input::ComposerInput;
use std::io::Write as _;
// (tests access modules directly within the crate)
pub async fn run_main(
mut cli: Cli,
codex_linux_sandbox_exe: Option<PathBuf>,
) -> std::io::Result<AppExitInfo> {
let (sandbox_mode, approval_policy) = if cli.full_auto {
(
Some(SandboxMode::WorkspaceWrite),
Some(AskForApproval::OnRequest),
)
} else if cli.dangerously_bypass_approvals_and_sandbox {
(
Some(SandboxMode::DangerFullAccess),
Some(AskForApproval::Never),
)
} else {
(
cli.sandbox_mode.map(Into::<SandboxMode>::into),
cli.approval_policy.map(Into::into),
)
};
// Map the legacy --search flag to the new feature toggle.
if cli.web_search {
cli.config_overrides
.raw_overrides
.push("features.web_search_request=true".to_string());
}
// When using `--oss`, let the bootstrapper pick the model (defaulting to
// gpt-oss:20b) and ensure it is present locally. Also, force the built‑in
let raw_overrides = cli.config_overrides.raw_overrides.clone();
// `oss` model provider.
let overrides_cli = codex_common::CliConfigOverrides { raw_overrides };
let cli_kv_overrides = match overrides_cli.parse_overrides() {
// Parse `-c` overrides from the CLI.
Ok(v) => v,
#[allow(clippy::print_stderr)]
Err(e) => {
eprintln!("Error parsing -c overrides: {e}");
std::process::exit(1);
}
};
// we load config.toml here to determine project state.
#[allow(clippy::print_stderr)]
let codex_home = match find_codex_home() {
Ok(codex_home) => codex_home.to_path_buf(),
Err(err) => {
eprintln!("Error finding codex home: {err}");
std::process::exit(1);
}
};
let cwd = cli.cwd.clone();
let config_cwd = match cwd.as_deref() {
Some(path) => AbsolutePathBuf::from_absolute_path(path.canonicalize()?)?,
None => AbsolutePathBuf::current_dir()?,
};
#[allow(clippy::print_stderr)]
let config_toml = match load_config_as_toml_with_cli_overrides(
&codex_home,
&config_cwd,
cli_kv_overrides.clone(),
)
.await
{
Ok(config_toml) => config_toml,
Err(err) => {
eprintln!("Error loading config.toml: {err}");
std::process::exit(1);
}
};
let model_provider_override = if cli.oss {
let resolved = resolve_oss_provider(
cli.oss_provider.as_deref(),
&config_toml,
cli.config_profile.clone(),
);
if let Some(provider) = resolved {
Some(provider)
} else {
// No provider configured, prompt the user
let provider = oss_selection::select_oss_provider(&codex_home).await?;
if provider == "__CANCELLED__" {
return Err(std::io::Error::other(
"OSS provider selection was cancelled by user",
));
}
Some(provider)
}
} else {
None
};
// When using `--oss`, let the bootstrapper pick the model based on selected provider
let model = if let Some(model) = &cli.model {
Some(model.clone())
} else if cli.oss {
// Use the provider from model_provider_override
model_provider_override
.as_ref()
.and_then(|provider_id| get_default_model_for_oss_provider(provider_id))
.map(std::borrow::ToOwned::to_owned)
} else {
None // No model specified, will use the default.
};
let additional_dirs = cli.add_dir.clone();
let overrides = ConfigOverrides {
model,
review_model: None,
approval_policy,
sandbox_mode,
cwd,
model_provider: model_provider_override.clone(),
config_profile: cli.config_profile.clone(),
codex_linux_sandbox_exe,
base_instructions: None,
developer_instructions: None,
compact_prompt: None,
include_apply_patch_tool: None,
show_raw_agent_reasoning: cli.oss.then_some(true),
tools_web_search_request: None,
additional_writable_roots: additional_dirs,
};
let config = load_config_or_exit(cli_kv_overrides.clone(), overrides.clone()).await;
if let Some(warning) = add_dir_warning_message(&cli.add_dir, config.sandbox_policy.get()) {
#[allow(clippy::print_stderr)]
{
eprintln!("Error adding directories: {warning}");
std::process::exit(1);
}
}
#[allow(clippy::print_stderr)]
if let Err(err) = enforce_login_restrictions(&config).await {
eprintln!("{err}");
std::process::exit(1);
}
let active_profile = config.active_profile.clone();
let log_dir = codex_core::config::log_dir(&config)?;
std::fs::create_dir_all(&log_dir)?;
// Open (or create) your log file, appending to it.
let mut log_file_opts = OpenOptions::new();
log_file_opts.create(true).append(true);
// Ensure the file is only readable and writable by the current user.
// Doing the equivalent to `chmod 600` on Windows is quite a bit more code
// and requires the Windows API crates, so we can reconsider that when
// Codex CLI is officially supported on Windows.
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
log_file_opts.mode(0o600);
}
let log_file = log_file_opts.open(log_dir.join("codex-tui.log"))?;
// Wrap file in non‑blocking writer.
let (non_blocking, _guard) = non_blocking(log_file);
// use RUST_LOG env var, default to info for codex crates.
let env_filter = || {
EnvFilter::try_from_default_env().unwrap_or_else(|_| {
EnvFilter::new("codex_core=info,codex_tui=info,codex_rmcp_client=info")
})
};
let file_layer = tracing_subscriber::fmt::layer()
.with_writer(non_blocking)
// `with_target(true)` is the default, but we previously disabled it for file output.
// Keep it enabled so we can selectively enable targets via `RUST_LOG=...` and then
// grep for a specific module/target while troubleshooting.
.with_target(true)
.with_ansi(false)
.with_span_events(tracing_subscriber::fmt::format::FmtSpan::CLOSE)
.with_filter(env_filter());
let feedback = codex_feedback::CodexFeedback::new();
let feedback_layer = feedback.logger_layer();
let feedback_metadata_layer = feedback.metadata_layer();
if cli.oss && model_provider_override.is_some() {
// We're in the oss section, so provider_id should be Some
// Let's handle None case gracefully though just in case
let provider_id = match model_provider_override.as_ref() {
Some(id) => id,
None => {
error!("OSS provider unexpectedly not set when oss flag is used");
return Err(std::io::Error::other(
"OSS provider not set but oss flag was used",
));
}
};
ensure_oss_provider_ready(provider_id, &config).await?;
}
let otel = codex_core::otel_init::build_provider(&config, env!("CARGO_PKG_VERSION"));
#[allow(clippy::print_stderr)]
let otel = match otel {
Ok(otel) => otel,
Err(e) => {
eprintln!("Could not create otel exporter: {e}");
std::process::exit(1);
}
};
let otel_logger_layer = otel.as_ref().and_then(|o| o.logger_layer());
let otel_tracing_layer = otel.as_ref().and_then(|o| o.tracing_layer());
let _ = tracing_subscriber::registry()
.with(file_layer)
.with(feedback_layer)
.with(feedback_metadata_layer)
.with(otel_tracing_layer)
.with(otel_logger_layer)
.try_init();
let terminal_info = codex_core::terminal::terminal_info();
tracing::info!(terminal = ?terminal_info, "Detected terminal info");
run_ratatui_app(
cli,
config,
overrides,
cli_kv_overrides,
active_profile,
feedback,
)
.await
.map_err(|err| std::io::Error::other(err.to_string()))
}
async fn run_ratatui_app(
cli: Cli,
initial_config: Config,
overrides: ConfigOverrides,
cli_kv_overrides: Vec<(String, toml::Value)>,
active_profile: Option<String>,
feedback: codex_feedback::CodexFeedback,
) -> color_eyre::Result<AppExitInfo> {
color_eyre::install()?;
// Forward panic reports through tracing so they appear in the UI status
// line, but do not swallow the default/color-eyre panic handler.
// Chain to the previous hook so users still get a rich panic report
// (including backtraces) after we restore the terminal.
let prev_hook = std::panic::take_hook();
std::panic::set_hook(Box::new(move |info| {
tracing::error!("panic: {info}");
prev_hook(info);
}));
let mut terminal = tui::init()?;
terminal.clear()?;
let mut tui = Tui::new(terminal);
#[cfg(not(debug_assertions))]
{
use crate::update_prompt::UpdatePromptOutcome;
let skip_update_prompt = cli.prompt.as_ref().is_some_and(|prompt| !prompt.is_empty());
if !skip_update_prompt {
match update_prompt::run_update_prompt_if_needed(&mut tui, &initial_config).await? {
UpdatePromptOutcome::Continue => {}
UpdatePromptOutcome::RunUpdate(action) => {
crate::tui::restore()?;
return Ok(AppExitInfo {
token_usage: codex_core::protocol::TokenUsage::default(),
conversation_id: None,
update_action: Some(action),
session_lines: Vec::new(),
});
}
}
}
}
// Initialize high-fidelity session event logging if enabled.
session_log::maybe_init(&initial_config);
let auth_manager = AuthManager::shared(
initial_config.codex_home.clone(),
false,
initial_config.cli_auth_credentials_store_mode,
);
let login_status = get_login_status(&initial_config);
let should_show_trust_screen = should_show_trust_screen(&initial_config);
let should_show_onboarding =
should_show_onboarding(login_status, &initial_config, should_show_trust_screen);
let config = if should_show_onboarding {
let onboarding_result = run_onboarding_app(
OnboardingScreenArgs {
show_login_screen: should_show_login_screen(login_status, &initial_config),
show_trust_screen: should_show_trust_screen,
login_status,
auth_manager: auth_manager.clone(),
config: initial_config.clone(),
},
&mut tui,
)
.await?;
if onboarding_result.should_exit {
restore();
session_log::log_session_end();
let _ = tui.terminal.clear();
return Ok(AppExitInfo {
token_usage: codex_core::protocol::TokenUsage::default(),
conversation_id: None,
update_action: None,
session_lines: Vec::new(),
});
}
// if the user acknowledged windows or made an explicit decision ato trust the directory, reload the config accordingly
if onboarding_result
.directory_trust_decision
.map(|d| d == TrustDirectorySelection::Trust)
.unwrap_or(false)
{
load_config_or_exit(cli_kv_overrides, overrides).await
} else {
initial_config
}
} else {
initial_config
};
// Determine resume behavior: explicit id, then resume last, then picker.
let resume_selection = if let Some(id_str) = cli.resume_session_id.as_deref() {
match find_conversation_path_by_id_str(&config.codex_home, id_str).await? {
Some(path) => resume_picker::ResumeSelection::Resume(path),
None => {
error!("Error finding conversation path: {id_str}");
restore();
session_log::log_session_end();
let _ = tui.terminal.clear();
if let Err(err) = writeln!(
std::io::stdout(),
"No saved session found with ID {id_str}. Run `codex resume` without an ID to choose from existing sessions."
) {
error!("Failed to write resume error message: {err}");
}
return Ok(AppExitInfo {
token_usage: codex_core::protocol::TokenUsage::default(),
conversation_id: None,
update_action: None,
session_lines: Vec::new(),
});
}
}
} else if cli.resume_last {
let provider_filter = vec![config.model_provider_id.clone()];
match RolloutRecorder::list_conversations(
&config.codex_home,
1,
None,
INTERACTIVE_SESSION_SOURCES,
Some(provider_filter.as_slice()),
&config.model_provider_id,
)
.await
{
Ok(page) => page
.items
.first()
.map(|it| resume_picker::ResumeSelection::Resume(it.path.clone()))
.unwrap_or(resume_picker::ResumeSelection::StartFresh),
Err(_) => resume_picker::ResumeSelection::StartFresh,
}
} else if cli.resume_picker {
match resume_picker::run_resume_picker(
&mut tui,
&config.codex_home,
&config.model_provider_id,
cli.resume_show_all,
)
.await?
{
resume_picker::ResumeSelection::Exit => {
restore();
session_log::log_session_end();
return Ok(AppExitInfo {
token_usage: codex_core::protocol::TokenUsage::default(),
conversation_id: None,
update_action: None,
session_lines: Vec::new(),
});
}
other => other,
}
} else {
resume_picker::ResumeSelection::StartFresh
};
let Cli { prompt, images, .. } = cli;
// Run the main chat + transcript UI on the terminal's alternate screen so
// the entire viewport can be used without polluting normal scrollback. This
// mirrors the behavior of the legacy TUI but keeps inline mode available
// for smaller prompts like onboarding and model migration.
let _ = tui.enter_alt_screen();
let app_result = App::run(
&mut tui,
auth_manager,
config,
active_profile,
prompt,
images,
resume_selection,
feedback,
should_show_trust_screen, // Proxy to: is it a first run in this directory?
)
.await;
let _ = tui.leave_alt_screen();
restore();
if let Ok(exit_info) = &app_result {
let mut stdout = std::io::stdout();
for line in exit_info.session_lines.iter() {
let _ = writeln!(stdout, "{line}");
}
if !exit_info.session_lines.is_empty() {
let _ = writeln!(stdout);
}
}
// Mark the end of the recorded session.
session_log::log_session_end();
// ignore error when collecting usage – report underlying error instead
app_result
}
#[expect(
clippy::print_stderr,
reason = "TUI should no longer be displayed, so we can write to stderr."
)]
fn restore() {
if let Err(err) = tui::restore() {
eprintln!(
"failed to restore terminal. Run `reset` or restart your terminal to recover: {err}"
);
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum LoginStatus {
AuthMode(AuthMode),
NotAuthenticated,
}
fn get_login_status(config: &Config) -> LoginStatus {
if config.model_provider.requires_openai_auth {
// Reading the OpenAI API key is an async operation because it may need
// to refresh the token. Block on it.
let codex_home = config.codex_home.clone();
match CodexAuth::from_auth_storage(&codex_home, config.cli_auth_credentials_store_mode) {
Ok(Some(auth)) => LoginStatus::AuthMode(auth.mode),
Ok(None) => LoginStatus::NotAuthenticated,
Err(err) => {
error!("Failed to read auth.json: {err}");
LoginStatus::NotAuthenticated
}
}
} else {
LoginStatus::NotAuthenticated
}
}
async fn load_config_or_exit(
cli_kv_overrides: Vec<(String, toml::Value)>,
overrides: ConfigOverrides,
) -> Config {
#[allow(clippy::print_stderr)]
match Config::load_with_cli_overrides_and_harness_overrides(cli_kv_overrides, overrides).await {
Ok(config) => config,
Err(err) => {
eprintln!("Error loading configuration: {err}");
std::process::exit(1);
}
}
}
/// Determine if user has configured a sandbox / approval policy,
/// or if the current cwd project is already trusted. If not, we need to
/// show the trust screen.
fn should_show_trust_screen(config: &Config) -> bool {
if cfg!(target_os = "windows") && get_platform_sandbox().is_none() {
// If the experimental sandbox is not enabled, Native Windows cannot enforce sandboxed write access; skip the trust prompt entirely.
return false;
}
if config.did_user_set_custom_approval_policy_or_sandbox_mode {
// Respect explicit approval/sandbox overrides made by the user.
return false;
}
// otherwise, show only if no trust decision has been made
config.active_project.trust_level.is_none()
}
fn should_show_onboarding(
login_status: LoginStatus,
config: &Config,
show_trust_screen: bool,
) -> bool {
if show_trust_screen {
return true;
}
should_show_login_screen(login_status, config)
}
fn should_show_login_screen(login_status: LoginStatus, config: &Config) -> bool {
// Only show the login screen for providers that actually require OpenAI auth
// (OpenAI or equivalents). For OSS/other providers, skip login entirely.
if !config.model_provider.requires_openai_auth {
return false;
}
login_status == LoginStatus::NotAuthenticated
}
#[cfg(test)]
mod tests {
use super::*;
use codex_core::config::ConfigBuilder;
use codex_core::config::ProjectConfig;
use serial_test::serial;
use tempfile::TempDir;
async fn build_config(temp_dir: &TempDir) -> std::io::Result<Config> {
ConfigBuilder::default()
.codex_home(temp_dir.path().to_path_buf())
.build()
.await
}
#[tokio::test]
#[serial]
async fn windows_skips_trust_prompt_without_sandbox() -> std::io::Result<()> {
let temp_dir = TempDir::new()?;
let mut config = build_config(&temp_dir).await?;
config.did_user_set_custom_approval_policy_or_sandbox_mode = false;
config.active_project = ProjectConfig { trust_level: None };
config.set_windows_sandbox_globally(false);
let should_show = should_show_trust_screen(&config);
if cfg!(target_os = "windows") {
assert!(
!should_show,
"Windows trust prompt should always be skipped on native Windows"
);
} else {
assert!(
should_show,
"Non-Windows should still show trust prompt when project is untrusted"
);
}
Ok(())
}
#[tokio::test]
#[serial]
async fn windows_shows_trust_prompt_with_sandbox() -> std::io::Result<()> {
let temp_dir = TempDir::new()?;
let mut config = build_config(&temp_dir).await?;
config.did_user_set_custom_approval_policy_or_sandbox_mode = false;
config.active_project = ProjectConfig { trust_level: None };
config.set_windows_sandbox_globally(true);
let should_show = should_show_trust_screen(&config);
if cfg!(target_os = "windows") {
assert!(
should_show,
"Windows trust prompt should be shown on native Windows with sandbox enabled"
);
} else {
assert!(
should_show,
"Non-Windows should still show trust prompt when project is untrusted"
);
}
Ok(())
}
#[tokio::test]
async fn untrusted_project_skips_trust_prompt() -> std::io::Result<()> {
use codex_protocol::config_types::TrustLevel;
let temp_dir = TempDir::new()?;
let mut config = build_config(&temp_dir).await?;
config.did_user_set_custom_approval_policy_or_sandbox_mode = false;
config.active_project = ProjectConfig {
trust_level: Some(TrustLevel::Untrusted),
};
let should_show = should_show_trust_screen(&config);
assert!(
!should_show,
"Trust prompt should not be shown for projects explicitly marked as untrusted"
);
Ok(())
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/frames.rs | codex-rs/tui2/src/frames.rs | use std::time::Duration;
// Embed animation frames for each variant at compile time.
macro_rules! frames_for {
($dir:literal) => {
[
include_str!(concat!("../frames/", $dir, "/frame_1.txt")),
include_str!(concat!("../frames/", $dir, "/frame_2.txt")),
include_str!(concat!("../frames/", $dir, "/frame_3.txt")),
include_str!(concat!("../frames/", $dir, "/frame_4.txt")),
include_str!(concat!("../frames/", $dir, "/frame_5.txt")),
include_str!(concat!("../frames/", $dir, "/frame_6.txt")),
include_str!(concat!("../frames/", $dir, "/frame_7.txt")),
include_str!(concat!("../frames/", $dir, "/frame_8.txt")),
include_str!(concat!("../frames/", $dir, "/frame_9.txt")),
include_str!(concat!("../frames/", $dir, "/frame_10.txt")),
include_str!(concat!("../frames/", $dir, "/frame_11.txt")),
include_str!(concat!("../frames/", $dir, "/frame_12.txt")),
include_str!(concat!("../frames/", $dir, "/frame_13.txt")),
include_str!(concat!("../frames/", $dir, "/frame_14.txt")),
include_str!(concat!("../frames/", $dir, "/frame_15.txt")),
include_str!(concat!("../frames/", $dir, "/frame_16.txt")),
include_str!(concat!("../frames/", $dir, "/frame_17.txt")),
include_str!(concat!("../frames/", $dir, "/frame_18.txt")),
include_str!(concat!("../frames/", $dir, "/frame_19.txt")),
include_str!(concat!("../frames/", $dir, "/frame_20.txt")),
include_str!(concat!("../frames/", $dir, "/frame_21.txt")),
include_str!(concat!("../frames/", $dir, "/frame_22.txt")),
include_str!(concat!("../frames/", $dir, "/frame_23.txt")),
include_str!(concat!("../frames/", $dir, "/frame_24.txt")),
include_str!(concat!("../frames/", $dir, "/frame_25.txt")),
include_str!(concat!("../frames/", $dir, "/frame_26.txt")),
include_str!(concat!("../frames/", $dir, "/frame_27.txt")),
include_str!(concat!("../frames/", $dir, "/frame_28.txt")),
include_str!(concat!("../frames/", $dir, "/frame_29.txt")),
include_str!(concat!("../frames/", $dir, "/frame_30.txt")),
include_str!(concat!("../frames/", $dir, "/frame_31.txt")),
include_str!(concat!("../frames/", $dir, "/frame_32.txt")),
include_str!(concat!("../frames/", $dir, "/frame_33.txt")),
include_str!(concat!("../frames/", $dir, "/frame_34.txt")),
include_str!(concat!("../frames/", $dir, "/frame_35.txt")),
include_str!(concat!("../frames/", $dir, "/frame_36.txt")),
]
};
}
pub(crate) const FRAMES_DEFAULT: [&str; 36] = frames_for!("default");
pub(crate) const FRAMES_CODEX: [&str; 36] = frames_for!("codex");
pub(crate) const FRAMES_OPENAI: [&str; 36] = frames_for!("openai");
pub(crate) const FRAMES_BLOCKS: [&str; 36] = frames_for!("blocks");
pub(crate) const FRAMES_DOTS: [&str; 36] = frames_for!("dots");
pub(crate) const FRAMES_HASH: [&str; 36] = frames_for!("hash");
pub(crate) const FRAMES_HBARS: [&str; 36] = frames_for!("hbars");
pub(crate) const FRAMES_VBARS: [&str; 36] = frames_for!("vbars");
pub(crate) const FRAMES_SHAPES: [&str; 36] = frames_for!("shapes");
pub(crate) const FRAMES_SLUG: [&str; 36] = frames_for!("slug");
pub(crate) const ALL_VARIANTS: &[&[&str]] = &[
&FRAMES_DEFAULT,
&FRAMES_CODEX,
&FRAMES_OPENAI,
&FRAMES_BLOCKS,
&FRAMES_DOTS,
&FRAMES_HASH,
&FRAMES_HBARS,
&FRAMES_VBARS,
&FRAMES_SHAPES,
&FRAMES_SLUG,
];
pub(crate) const FRAME_TICK_DEFAULT: Duration = Duration::from_millis(80);
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/transcript_copy_ui.rs | codex-rs/tui2/src/transcript_copy_ui.rs | //! Transcript-selection copy UX helpers.
//!
//! # Background
//!
//! TUI2 owns a logical transcript viewport (with history that can live outside the visible buffer),
//! plus its own selection model. Terminal-native selection/copy does not work reliably in this
//! setup because:
//!
//! - The selection can extend outside the current viewport, while terminal selection can't.
//! - We want to exclude non-content regions (like the left gutter) from copied text.
//! - The terminal may intercept some keybindings before the app ever sees them.
//!
//! This module centralizes:
//!
//! - The effective "copy selection" shortcut (so the footer and affordance stay in sync).
//! - Key matching for triggering copy (with terminal quirks handled in one place).
//! - A small on-screen clickable "⧉ copy …" pill rendered near the current selection.
//!
//! # VS Code shortcut rationale
//!
//! VS Code's integrated terminal commonly captures `Ctrl+Shift+C` for its own copy behavior and
//! does not forward the keypress to applications running inside the terminal. Since we can't
//! observe it via crossterm, we advertise and accept `Ctrl+Y` in that environment.
//!
//! Clipboard text reconstruction (preserving indentation, joining soft-wrapped
//! prose, and emitting Markdown source markers) lives in `transcript_copy`.
use codex_core::terminal::TerminalName;
use codex_core::terminal::terminal_info;
use crossterm::event::KeyCode;
use crossterm::event::KeyModifiers;
use ratatui::buffer::Buffer;
use ratatui::layout::Rect;
use ratatui::style::Color;
use ratatui::style::Modifier;
use ratatui::style::Style;
use ratatui::text::Line;
use ratatui::text::Span;
use ratatui::widgets::Paragraph;
use ratatui::widgets::WidgetRef;
use unicode_width::UnicodeWidthStr;
use crate::key_hint;
use crate::key_hint::KeyBinding;
use crate::transcript_selection::TRANSCRIPT_GUTTER_COLS;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
/// The shortcut we advertise and accept for "copy selection".
pub(crate) enum CopySelectionShortcut {
CtrlShiftC,
CtrlY,
}
/// Returns the best shortcut to advertise/accept for "copy selection".
///
/// VS Code's integrated terminal typically captures `Ctrl+Shift+C` for its own copy behavior and
/// does not forward it to applications running inside the terminal. That means we can't reliably
/// observe it via crossterm, so we use `Ctrl+Y` there.
///
/// We use both the terminal name (when available) and `VSCODE_IPC_HOOK_CLI` because the terminal
/// name can be `Unknown` early during startup in some environments.
pub(crate) fn detect_copy_selection_shortcut() -> CopySelectionShortcut {
let info = terminal_info();
if info.name == TerminalName::VsCode || std::env::var_os("VSCODE_IPC_HOOK_CLI").is_some() {
return CopySelectionShortcut::CtrlY;
}
CopySelectionShortcut::CtrlShiftC
}
pub(crate) fn key_binding_for(shortcut: CopySelectionShortcut) -> KeyBinding {
match shortcut {
CopySelectionShortcut::CtrlShiftC => key_hint::ctrl_shift(KeyCode::Char('c')),
CopySelectionShortcut::CtrlY => key_hint::ctrl(KeyCode::Char('y')),
}
}
/// Whether the given `(ch, modifiers)` should trigger "copy selection".
///
/// Terminal/event edge cases:
/// - Some terminals report `Ctrl+Shift+C` as `Char('C')` with `CONTROL` only, baking the shift into
/// the character. We accept both `c` and `C` in `CtrlShiftC` mode (including VS Code).
/// - Some environments intercept `Ctrl+Shift+C` before the app sees it. We keep `Ctrl+Y` as a
/// fallback in `CtrlShiftC` mode to preserve a working key path.
pub(crate) fn is_copy_selection_key(
shortcut: CopySelectionShortcut,
ch: char,
modifiers: KeyModifiers,
) -> bool {
if !modifiers.contains(KeyModifiers::CONTROL) {
return false;
}
match shortcut {
CopySelectionShortcut::CtrlY => ch == 'y' && modifiers == KeyModifiers::CONTROL,
CopySelectionShortcut::CtrlShiftC => {
(matches!(ch, 'c' | 'C') && (modifiers.contains(KeyModifiers::SHIFT) || ch == 'C'))
// Fallback for environments that intercept Ctrl+Shift+C.
|| (ch == 'y' && modifiers == KeyModifiers::CONTROL)
}
}
}
/// UI state for the on-screen copy affordance shown near an active selection.
///
/// This tracks a `Rect` for hit-testing so we can treat the pill as a clickable button.
#[derive(Debug)]
pub(crate) struct TranscriptCopyUi {
shortcut: CopySelectionShortcut,
dragging: bool,
affordance_rect: Option<Rect>,
}
impl TranscriptCopyUi {
/// Creates a new instance using the provided shortcut.
pub(crate) fn new_with_shortcut(shortcut: CopySelectionShortcut) -> Self {
Self {
shortcut,
dragging: false,
affordance_rect: None,
}
}
pub(crate) fn key_binding(&self) -> KeyBinding {
key_binding_for(self.shortcut)
}
pub(crate) fn is_copy_key(&self, ch: char, modifiers: KeyModifiers) -> bool {
is_copy_selection_key(self.shortcut, ch, modifiers)
}
pub(crate) fn set_dragging(&mut self, dragging: bool) {
self.dragging = dragging;
}
pub(crate) fn clear_affordance(&mut self) {
self.affordance_rect = None;
}
/// Returns `true` if the last rendered pill contains `(x, y)`.
///
/// `render_copy_pill()` sets `affordance_rect` and `clear_affordance()` clears it, so callers
/// should treat this as "hit test against the current frame's affordance".
pub(crate) fn hit_test(&self, x: u16, y: u16) -> bool {
self.affordance_rect
.is_some_and(|r| x >= r.x && x < r.right() && y >= r.y && y < r.bottom())
}
/// Render the copy "pill" just below the visible end of the selection.
///
/// Inputs are expressed in logical transcript coordinates:
/// - `anchor`/`head`: `(line_index, column)` in the wrapped transcript (not screen rows).
/// - `view_top`: first logical line index currently visible in `area`.
/// - `total_lines`: total number of logical transcript lines.
///
/// Placement details / edge cases:
/// - We hide the pill while dragging to avoid accidental clicks during selection updates.
/// - We only render if some part of the selection is visible, and there's room for a line
/// below it inside `area`.
/// - We scan the buffer to find the last non-space cell on each candidate row so the pill can
/// sit "near content", not far to the right past trailing whitespace.
///
/// Important: this assumes the transcript content has already been rendered into `buf` for the
/// current frame, since the placement logic derives `text_end` by inspecting buffer contents.
pub(crate) fn render_copy_pill(
&mut self,
area: Rect,
buf: &mut Buffer,
anchor: (usize, u16),
head: (usize, u16),
view_top: usize,
total_lines: usize,
) {
// Reset every frame. If we don't render (e.g. selection is off-screen) we shouldn't keep
// an old hit target around.
self.affordance_rect = None;
if self.dragging || total_lines == 0 {
return;
}
// Skip the transcript gutter (line numbers, diff markers, etc.). Selection/copy operates on
// transcript content only.
let base_x = area.x.saturating_add(TRANSCRIPT_GUTTER_COLS);
let max_x = area.right().saturating_sub(1);
if base_x > max_x {
return;
}
// Normalize to a start/end pair so the rest of the code can assume forward order.
let mut start = anchor;
let mut end = head;
if (end.0 < start.0) || (end.0 == start.0 && end.1 < start.1) {
std::mem::swap(&mut start, &mut end);
}
// We want to place the pill *near the visible end of the selection*, which means:
// - Find the last visible transcript line that intersects the selection.
// - Find the rightmost selected column on that line (clamped to actual rendered text).
// - Place the pill one row below that point.
let visible_start = view_top;
let visible_end = view_top
.saturating_add(area.height as usize)
.min(total_lines);
let mut last_visible_segment: Option<(u16, u16)> = None;
for (row_index, line_index) in (visible_start..visible_end).enumerate() {
// Skip lines outside the selection range.
if line_index < start.0 || line_index > end.0 {
continue;
}
let y = area.y + row_index as u16;
// Look for the rightmost non-space cell on this row so we can clamp the pill placement
// to real content. (The transcript renderer often pads the row with spaces.)
let mut last_text_x = None;
for x in base_x..=max_x {
let cell = &buf[(x, y)];
if cell.symbol() != " " {
last_text_x = Some(x);
}
}
let Some(text_end) = last_text_x else {
continue;
};
let line_end_col = if line_index == end.0 {
end.1
} else {
// For multi-line selections, treat intermediate lines as selected "to the end" so
// the pill doesn't jump left unexpectedly when only the final line has an explicit
// end column.
max_x.saturating_sub(base_x)
};
let row_sel_end = base_x.saturating_add(line_end_col).min(max_x);
if row_sel_end < base_x {
continue;
}
// Clamp the selection end to `text_end` so we don't place the pill far to the right on
// lines that are mostly blank (or padded).
let to_x = row_sel_end.min(text_end);
last_visible_segment = Some((y, to_x));
}
// If nothing in the selection is visible, don't show the affordance.
let Some((y, to_x)) = last_visible_segment else {
return;
};
// Place the pill on the row below the last visible selection segment.
let Some(y) = y.checked_add(1).filter(|y| *y < area.bottom()) else {
return;
};
let key_label: Span<'static> = self.key_binding().into();
let key_label = key_label.content.as_ref().to_string();
let pill_text = format!(" ⧉ copy {key_label} ");
let pill_width = UnicodeWidthStr::width(pill_text.as_str());
if pill_width == 0 || area.width == 0 {
return;
}
let pill_width = (pill_width as u16).min(area.width);
// Prefer a small gap between the selected content and the pill so we don't visually merge
// into the highlighted selection block.
let desired_x = to_x.saturating_add(2);
let max_start_x = area.right().saturating_sub(pill_width);
let x = if max_start_x < area.x {
area.x
} else {
desired_x.clamp(area.x, max_start_x)
};
let pill_area = Rect::new(x, y, pill_width, 1);
let base_style = Style::new().bg(Color::DarkGray).fg(Color::White);
let icon_style = base_style.add_modifier(Modifier::BOLD).fg(Color::LightCyan);
let bold_style = base_style.add_modifier(Modifier::BOLD);
let mut spans: Vec<Span<'static>> = vec![
Span::styled(" ", base_style),
Span::styled("⧉", icon_style),
Span::styled(" ", base_style),
Span::styled("copy", bold_style),
Span::styled(" ", base_style),
Span::styled(key_label, base_style),
];
spans.push(Span::styled(" ", base_style));
Paragraph::new(vec![Line::from(spans)]).render_ref(pill_area, buf);
self.affordance_rect = Some(pill_area);
}
}
#[cfg(test)]
mod tests {
use super::*;
use ratatui::buffer::Buffer;
fn buf_to_string(buf: &Buffer, area: Rect) -> String {
let mut s = String::new();
for y in area.y..area.bottom() {
for x in area.x..area.right() {
s.push(buf[(x, y)].symbol().chars().next().unwrap_or(' '));
}
s.push('\n');
}
s
}
#[test]
fn ctrl_y_pill_does_not_include_ctrl_shift_c() {
let area = Rect::new(0, 0, 60, 3);
let mut buf = Buffer::empty(area);
for y in 0..area.height {
for x in 2..area.width.saturating_sub(1) {
buf[(x, y)].set_symbol("X");
}
}
let mut ui = TranscriptCopyUi::new_with_shortcut(CopySelectionShortcut::CtrlY);
ui.render_copy_pill(area, &mut buf, (1, 2), (1, 6), 0, 3);
let rendered = buf_to_string(&buf, area);
assert!(rendered.contains("copy"));
assert!(rendered.contains("ctrl + y"));
assert!(!rendered.contains("ctrl + shift + c"));
assert!(ui.affordance_rect.is_some());
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/version.rs | codex-rs/tui2/src/version.rs | /// The current Codex CLI version as embedded at compile time.
pub const CODEX_CLI_VERSION: &str = env!("CARGO_PKG_VERSION");
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/test_backend.rs | codex-rs/tui2/src/test_backend.rs | use std::fmt::{self};
use std::io::Write;
use std::io::{self};
use ratatui::prelude::CrosstermBackend;
use ratatui::backend::Backend;
use ratatui::backend::ClearType;
use ratatui::backend::WindowSize;
use ratatui::buffer::Cell;
use ratatui::layout::Position;
use ratatui::layout::Size;
/// This wraps a CrosstermBackend and a vt100::Parser to mock
/// a "real" terminal.
///
/// Importantly, this wrapper avoids calling any crossterm methods
/// which write to stdout regardless of the writer. This includes:
/// - getting the terminal size
/// - getting the cursor position
pub struct VT100Backend {
crossterm_backend: CrosstermBackend<vt100::Parser>,
}
impl VT100Backend {
/// Creates a new `TestBackend` with the specified width and height.
pub fn new(width: u16, height: u16) -> Self {
crossterm::style::Colored::set_ansi_color_disabled(false);
Self {
crossterm_backend: CrosstermBackend::new(vt100::Parser::new(height, width, 0)),
}
}
pub fn vt100(&self) -> &vt100::Parser {
self.crossterm_backend.writer()
}
}
impl Write for VT100Backend {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.crossterm_backend.writer_mut().write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.crossterm_backend.writer_mut().flush()
}
}
impl fmt::Display for VT100Backend {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.crossterm_backend.writer().screen().contents())
}
}
impl Backend for VT100Backend {
fn draw<'a, I>(&mut self, content: I) -> io::Result<()>
where
I: Iterator<Item = (u16, u16, &'a Cell)>,
{
self.crossterm_backend.draw(content)?;
Ok(())
}
fn hide_cursor(&mut self) -> io::Result<()> {
self.crossterm_backend.hide_cursor()?;
Ok(())
}
fn show_cursor(&mut self) -> io::Result<()> {
self.crossterm_backend.show_cursor()?;
Ok(())
}
fn get_cursor_position(&mut self) -> io::Result<Position> {
Ok(self.vt100().screen().cursor_position().into())
}
fn set_cursor_position<P: Into<Position>>(&mut self, position: P) -> io::Result<()> {
self.crossterm_backend.set_cursor_position(position)
}
fn clear(&mut self) -> io::Result<()> {
self.crossterm_backend.clear()
}
fn clear_region(&mut self, clear_type: ClearType) -> io::Result<()> {
self.crossterm_backend.clear_region(clear_type)
}
fn append_lines(&mut self, line_count: u16) -> io::Result<()> {
self.crossterm_backend.append_lines(line_count)
}
fn size(&self) -> io::Result<Size> {
let (rows, cols) = self.vt100().screen().size();
Ok(Size::new(cols, rows))
}
fn window_size(&mut self) -> io::Result<WindowSize> {
Ok(WindowSize {
columns_rows: self.vt100().screen().size().into(),
// Arbitrary size, we don't rely on this in testing.
pixels: Size {
width: 640,
height: 480,
},
})
}
fn flush(&mut self) -> io::Result<()> {
self.crossterm_backend.writer_mut().flush()
}
fn scroll_region_up(&mut self, region: std::ops::Range<u16>, scroll_by: u16) -> io::Result<()> {
self.crossterm_backend.scroll_region_up(region, scroll_by)
}
fn scroll_region_down(
&mut self,
region: std::ops::Range<u16>,
scroll_by: u16,
) -> io::Result<()> {
self.crossterm_backend.scroll_region_down(region, scroll_by)
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/model_migration.rs | codex-rs/tui2/src/model_migration.rs | use crate::key_hint;
use crate::render::Insets;
use crate::render::renderable::ColumnRenderable;
use crate::render::renderable::Renderable;
use crate::render::renderable::RenderableExt as _;
use crate::selection_list::selection_option_row;
use crate::tui::FrameRequester;
use crate::tui::Tui;
use crate::tui::TuiEvent;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use crossterm::event::KeyEventKind;
use crossterm::event::KeyModifiers;
use ratatui::prelude::Stylize as _;
use ratatui::prelude::Widget;
use ratatui::text::Line;
use ratatui::text::Span;
use ratatui::widgets::Clear;
use ratatui::widgets::Paragraph;
use ratatui::widgets::WidgetRef;
use ratatui::widgets::Wrap;
use tokio_stream::StreamExt;
/// Outcome of the migration prompt.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub(crate) enum ModelMigrationOutcome {
Accepted,
Rejected,
Exit,
}
#[derive(Clone)]
pub(crate) struct ModelMigrationCopy {
pub heading: Vec<Span<'static>>,
pub content: Vec<Line<'static>>,
pub can_opt_out: bool,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum MigrationMenuOption {
TryNewModel,
UseExistingModel,
}
impl MigrationMenuOption {
fn all() -> [Self; 2] {
[Self::TryNewModel, Self::UseExistingModel]
}
fn label(self) -> &'static str {
match self {
Self::TryNewModel => "Try new model",
Self::UseExistingModel => "Use existing model",
}
}
}
pub(crate) fn migration_copy_for_models(
current_model: &str,
target_model: &str,
target_display_name: String,
target_description: Option<String>,
can_opt_out: bool,
) -> ModelMigrationCopy {
let heading_text = Span::from(format!("Try {target_display_name}")).bold();
let description_line = target_description
.filter(|desc| !desc.is_empty())
.map(Line::from)
.unwrap_or_else(|| {
Line::from(format!(
"{target_display_name} is recommended for better performance and reliability."
))
});
let mut content = vec![
Line::from(format!(
"We recommend switching from {current_model} to {target_model}."
)),
Line::from(""),
description_line,
Line::from(""),
];
if can_opt_out {
content.push(Line::from(format!(
"You can continue using {current_model} if you prefer."
)));
} else {
content.push(Line::from("Press enter to continue".dim()));
}
ModelMigrationCopy {
heading: vec![heading_text],
content,
can_opt_out,
}
}
pub(crate) async fn run_model_migration_prompt(
tui: &mut Tui,
copy: ModelMigrationCopy,
) -> ModelMigrationOutcome {
let alt = AltScreenGuard::enter(tui);
let mut screen = ModelMigrationScreen::new(alt.tui.frame_requester(), copy);
let _ = alt.tui.draw(u16::MAX, |frame| {
frame.render_widget_ref(&screen, frame.area());
});
let events = alt.tui.event_stream();
tokio::pin!(events);
while !screen.is_done() {
if let Some(event) = events.next().await {
match event {
TuiEvent::Key(key_event) => screen.handle_key(key_event),
TuiEvent::Mouse(_) => {}
TuiEvent::Paste(_) => {}
TuiEvent::Draw => {
let _ = alt.tui.draw(u16::MAX, |frame| {
frame.render_widget_ref(&screen, frame.area());
});
}
}
} else {
screen.accept();
break;
}
}
screen.outcome()
}
struct ModelMigrationScreen {
request_frame: FrameRequester,
copy: ModelMigrationCopy,
done: bool,
outcome: ModelMigrationOutcome,
highlighted_option: MigrationMenuOption,
}
impl ModelMigrationScreen {
fn new(request_frame: FrameRequester, copy: ModelMigrationCopy) -> Self {
Self {
request_frame,
copy,
done: false,
outcome: ModelMigrationOutcome::Accepted,
highlighted_option: MigrationMenuOption::TryNewModel,
}
}
fn finish_with(&mut self, outcome: ModelMigrationOutcome) {
self.outcome = outcome;
self.done = true;
self.request_frame.schedule_frame();
}
fn accept(&mut self) {
self.finish_with(ModelMigrationOutcome::Accepted);
}
fn reject(&mut self) {
self.finish_with(ModelMigrationOutcome::Rejected);
}
fn exit(&mut self) {
self.finish_with(ModelMigrationOutcome::Exit);
}
fn confirm_selection(&mut self) {
if self.copy.can_opt_out {
match self.highlighted_option {
MigrationMenuOption::TryNewModel => self.accept(),
MigrationMenuOption::UseExistingModel => self.reject(),
}
} else {
self.accept();
}
}
fn highlight_option(&mut self, option: MigrationMenuOption) {
if self.highlighted_option != option {
self.highlighted_option = option;
self.request_frame.schedule_frame();
}
}
fn handle_key(&mut self, key_event: KeyEvent) {
if key_event.kind == KeyEventKind::Release {
return;
}
if is_ctrl_exit_combo(key_event) {
self.exit();
return;
}
if self.copy.can_opt_out {
self.handle_menu_key(key_event.code);
} else if matches!(key_event.code, KeyCode::Esc | KeyCode::Enter) {
self.accept();
}
}
fn is_done(&self) -> bool {
self.done
}
fn outcome(&self) -> ModelMigrationOutcome {
self.outcome
}
}
impl WidgetRef for &ModelMigrationScreen {
fn render_ref(&self, area: ratatui::layout::Rect, buf: &mut ratatui::buffer::Buffer) {
Clear.render(area, buf);
let mut column = ColumnRenderable::new();
column.push("");
column.push(self.heading_line());
column.push(Line::from(""));
self.render_content(&mut column);
if self.copy.can_opt_out {
self.render_menu(&mut column);
}
column.render(area, buf);
}
}
impl ModelMigrationScreen {
fn handle_menu_key(&mut self, code: KeyCode) {
match code {
KeyCode::Up | KeyCode::Char('k') => {
self.highlight_option(MigrationMenuOption::TryNewModel);
}
KeyCode::Down | KeyCode::Char('j') => {
self.highlight_option(MigrationMenuOption::UseExistingModel);
}
KeyCode::Char('1') => {
self.highlight_option(MigrationMenuOption::TryNewModel);
self.accept();
}
KeyCode::Char('2') => {
self.highlight_option(MigrationMenuOption::UseExistingModel);
self.reject();
}
KeyCode::Enter | KeyCode::Esc => self.confirm_selection(),
_ => {}
}
}
fn heading_line(&self) -> Line<'static> {
let mut heading = vec![Span::raw("> ")];
heading.extend(self.copy.heading.iter().cloned());
Line::from(heading)
}
fn render_content(&self, column: &mut ColumnRenderable) {
self.render_lines(&self.copy.content, column);
}
fn render_lines(&self, lines: &[Line<'static>], column: &mut ColumnRenderable) {
for line in lines {
column.push(
Paragraph::new(line.clone())
.wrap(Wrap { trim: false })
.inset(Insets::tlbr(0, 2, 0, 0)),
);
}
}
fn render_menu(&self, column: &mut ColumnRenderable) {
column.push(Line::from(""));
column.push(
Paragraph::new("Choose how you'd like Codex to proceed.")
.wrap(Wrap { trim: false })
.inset(Insets::tlbr(0, 2, 0, 0)),
);
column.push(Line::from(""));
for (idx, option) in MigrationMenuOption::all().into_iter().enumerate() {
column.push(selection_option_row(
idx,
option.label().to_string(),
self.highlighted_option == option,
));
}
column.push(Line::from(""));
column.push(
Line::from(vec![
"Use ".dim(),
key_hint::plain(KeyCode::Up).into(),
"/".dim(),
key_hint::plain(KeyCode::Down).into(),
" to move, press ".dim(),
key_hint::plain(KeyCode::Enter).into(),
" to confirm".dim(),
])
.inset(Insets::tlbr(0, 2, 0, 0)),
);
}
}
// Render the prompt on the terminal's alternate screen so exiting or cancelling
// does not leave a large blank region in the normal scrollback. This does not
// change the prompt's appearance – only where it is drawn.
struct AltScreenGuard<'a> {
tui: &'a mut Tui,
}
impl<'a> AltScreenGuard<'a> {
fn enter(tui: &'a mut Tui) -> Self {
let _ = tui.enter_alt_screen();
Self { tui }
}
}
impl Drop for AltScreenGuard<'_> {
fn drop(&mut self) {
let _ = self.tui.leave_alt_screen();
}
}
fn is_ctrl_exit_combo(key_event: KeyEvent) -> bool {
key_event.modifiers.contains(KeyModifiers::CONTROL)
&& matches!(key_event.code, KeyCode::Char('c') | KeyCode::Char('d'))
}
#[cfg(test)]
mod tests {
use super::ModelMigrationScreen;
use super::migration_copy_for_models;
use crate::custom_terminal::Terminal;
use crate::test_backend::VT100Backend;
use crate::tui::FrameRequester;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use insta::assert_snapshot;
use ratatui::layout::Rect;
#[test]
fn prompt_snapshot() {
let width: u16 = 60;
let height: u16 = 28;
let backend = VT100Backend::new(width, height);
let mut terminal = Terminal::with_options(backend).expect("terminal");
terminal.set_viewport_area(Rect::new(0, 0, width, height));
let screen = ModelMigrationScreen::new(
FrameRequester::test_dummy(),
migration_copy_for_models(
"gpt-5.1-codex-mini",
"gpt-5.1-codex-max",
"gpt-5.1-codex-max".to_string(),
Some("Codex-optimized flagship for deep and fast reasoning.".to_string()),
true,
),
);
{
let mut frame = terminal.get_frame();
frame.render_widget_ref(&screen, frame.area());
}
terminal.flush().expect("flush");
assert_snapshot!("model_migration_prompt", terminal.backend());
}
#[test]
fn prompt_snapshot_gpt5_family() {
let backend = VT100Backend::new(65, 22);
let mut terminal = Terminal::with_options(backend).expect("terminal");
terminal.set_viewport_area(Rect::new(0, 0, 65, 22));
let screen = ModelMigrationScreen::new(
FrameRequester::test_dummy(),
migration_copy_for_models(
"gpt-5",
"gpt-5.1",
"gpt-5.1".to_string(),
Some("Broad world knowledge with strong general reasoning.".to_string()),
false,
),
);
{
let mut frame = terminal.get_frame();
frame.render_widget_ref(&screen, frame.area());
}
terminal.flush().expect("flush");
assert_snapshot!("model_migration_prompt_gpt5_family", terminal.backend());
}
#[test]
fn prompt_snapshot_gpt5_codex() {
let backend = VT100Backend::new(60, 22);
let mut terminal = Terminal::with_options(backend).expect("terminal");
terminal.set_viewport_area(Rect::new(0, 0, 60, 22));
let screen = ModelMigrationScreen::new(
FrameRequester::test_dummy(),
migration_copy_for_models(
"gpt-5-codex",
"gpt-5.1-codex-max",
"gpt-5.1-codex-max".to_string(),
Some("Codex-optimized flagship for deep and fast reasoning.".to_string()),
false,
),
);
{
let mut frame = terminal.get_frame();
frame.render_widget_ref(&screen, frame.area());
}
terminal.flush().expect("flush");
assert_snapshot!("model_migration_prompt_gpt5_codex", terminal.backend());
}
#[test]
fn prompt_snapshot_gpt5_codex_mini() {
let backend = VT100Backend::new(60, 22);
let mut terminal = Terminal::with_options(backend).expect("terminal");
terminal.set_viewport_area(Rect::new(0, 0, 60, 22));
let screen = ModelMigrationScreen::new(
FrameRequester::test_dummy(),
migration_copy_for_models(
"gpt-5-codex-mini",
"gpt-5.1-codex-mini",
"gpt-5.1-codex-mini".to_string(),
Some("Optimized for codex. Cheaper, faster, but less capable.".to_string()),
false,
),
);
{
let mut frame = terminal.get_frame();
frame.render_widget_ref(&screen, frame.area());
}
terminal.flush().expect("flush");
assert_snapshot!("model_migration_prompt_gpt5_codex_mini", terminal.backend());
}
#[test]
fn escape_key_accepts_prompt() {
let mut screen = ModelMigrationScreen::new(
FrameRequester::test_dummy(),
migration_copy_for_models(
"gpt-old",
"gpt-new",
"gpt-new".to_string(),
Some("Latest recommended model for better performance.".to_string()),
true,
),
);
// Simulate pressing Escape
screen.handle_key(KeyEvent::new(
KeyCode::Esc,
crossterm::event::KeyModifiers::NONE,
));
assert!(screen.is_done());
// Esc should not be treated as Exit – it accepts like Enter.
assert!(matches!(
screen.outcome(),
super::ModelMigrationOutcome::Accepted
));
}
#[test]
fn selecting_use_existing_model_rejects_upgrade() {
let mut screen = ModelMigrationScreen::new(
FrameRequester::test_dummy(),
migration_copy_for_models(
"gpt-old",
"gpt-new",
"gpt-new".to_string(),
Some("Latest recommended model for better performance.".to_string()),
true,
),
);
screen.handle_key(KeyEvent::new(
KeyCode::Down,
crossterm::event::KeyModifiers::NONE,
));
screen.handle_key(KeyEvent::new(
KeyCode::Enter,
crossterm::event::KeyModifiers::NONE,
));
assert!(screen.is_done());
assert!(matches!(
screen.outcome(),
super::ModelMigrationOutcome::Rejected
));
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/shimmer.rs | codex-rs/tui2/src/shimmer.rs | use std::sync::OnceLock;
use std::time::Duration;
use std::time::Instant;
use ratatui::style::Color;
use ratatui::style::Modifier;
use ratatui::style::Style;
use ratatui::text::Span;
use crate::color::blend;
use crate::terminal_palette::default_bg;
use crate::terminal_palette::default_fg;
static PROCESS_START: OnceLock<Instant> = OnceLock::new();
fn elapsed_since_start() -> Duration {
let start = PROCESS_START.get_or_init(Instant::now);
start.elapsed()
}
pub(crate) fn shimmer_spans(text: &str) -> Vec<Span<'static>> {
let chars: Vec<char> = text.chars().collect();
if chars.is_empty() {
return Vec::new();
}
// Use time-based sweep synchronized to process start.
let padding = 10usize;
let period = chars.len() + padding * 2;
let sweep_seconds = 2.0f32;
let pos_f =
(elapsed_since_start().as_secs_f32() % sweep_seconds) / sweep_seconds * (period as f32);
let pos = pos_f as usize;
let has_true_color = supports_color::on_cached(supports_color::Stream::Stdout)
.map(|level| level.has_16m)
.unwrap_or(false);
let band_half_width = 5.0;
let mut spans: Vec<Span<'static>> = Vec::with_capacity(chars.len());
let base_color = default_fg().unwrap_or((128, 128, 128));
let highlight_color = default_bg().unwrap_or((255, 255, 255));
for (i, ch) in chars.iter().enumerate() {
let i_pos = i as isize + padding as isize;
let pos = pos as isize;
let dist = (i_pos - pos).abs() as f32;
let t = if dist <= band_half_width {
let x = std::f32::consts::PI * (dist / band_half_width);
0.5 * (1.0 + x.cos())
} else {
0.0
};
let style = if has_true_color {
let highlight = t.clamp(0.0, 1.0);
let (r, g, b) = blend(highlight_color, base_color, highlight * 0.9);
// Allow custom RGB colors, as the implementation is thoughtfully
// adjusting the level of the default foreground color.
#[allow(clippy::disallowed_methods)]
{
Style::default()
.fg(Color::Rgb(r, g, b))
.add_modifier(Modifier::BOLD)
}
} else {
color_for_level(t)
};
spans.push(Span::styled(ch.to_string(), style));
}
spans
}
fn color_for_level(intensity: f32) -> Style {
// Tune fallback styling so the shimmer band reads even without RGB support.
if intensity < 0.2 {
Style::default().add_modifier(Modifier::DIM)
} else if intensity < 0.6 {
Style::default()
} else {
Style::default().add_modifier(Modifier::BOLD)
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/pager_overlay.rs | codex-rs/tui2/src/pager_overlay.rs | use std::io::Result;
use std::sync::Arc;
use crate::history_cell::HistoryCell;
use crate::history_cell::UserHistoryCell;
use crate::key_hint;
use crate::key_hint::KeyBinding;
use crate::render::Insets;
use crate::render::renderable::InsetRenderable;
use crate::render::renderable::Renderable;
use crate::style::user_message_style;
use crate::tui;
use crate::tui::TuiEvent;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use crossterm::event::MouseEvent;
use crossterm::event::MouseEventKind;
use ratatui::buffer::Buffer;
use ratatui::buffer::Cell;
use ratatui::layout::Rect;
use ratatui::style::Style;
use ratatui::style::Stylize;
use ratatui::text::Line;
use ratatui::text::Span;
use ratatui::text::Text;
use ratatui::widgets::Clear;
use ratatui::widgets::Paragraph;
use ratatui::widgets::Widget;
use ratatui::widgets::WidgetRef;
use ratatui::widgets::Wrap;
pub(crate) enum Overlay {
Transcript(TranscriptOverlay),
Static(StaticOverlay),
}
impl Overlay {
pub(crate) fn new_transcript(cells: Vec<Arc<dyn HistoryCell>>) -> Self {
Self::Transcript(TranscriptOverlay::new(cells))
}
pub(crate) fn new_static_with_lines(lines: Vec<Line<'static>>, title: String) -> Self {
Self::Static(StaticOverlay::with_title(lines, title))
}
pub(crate) fn new_static_with_renderables(
renderables: Vec<Box<dyn Renderable>>,
title: String,
) -> Self {
Self::Static(StaticOverlay::with_renderables(renderables, title))
}
pub(crate) fn handle_event(&mut self, tui: &mut tui::Tui, event: TuiEvent) -> Result<()> {
match self {
Overlay::Transcript(o) => o.handle_event(tui, event),
Overlay::Static(o) => o.handle_event(tui, event),
}
}
pub(crate) fn is_done(&self) -> bool {
match self {
Overlay::Transcript(o) => o.is_done(),
Overlay::Static(o) => o.is_done(),
}
}
}
const KEY_UP: KeyBinding = key_hint::plain(KeyCode::Up);
const KEY_DOWN: KeyBinding = key_hint::plain(KeyCode::Down);
const KEY_K: KeyBinding = key_hint::plain(KeyCode::Char('k'));
const KEY_J: KeyBinding = key_hint::plain(KeyCode::Char('j'));
const KEY_PAGE_UP: KeyBinding = key_hint::plain(KeyCode::PageUp);
const KEY_PAGE_DOWN: KeyBinding = key_hint::plain(KeyCode::PageDown);
const KEY_SPACE: KeyBinding = key_hint::plain(KeyCode::Char(' '));
const KEY_SHIFT_SPACE: KeyBinding = key_hint::shift(KeyCode::Char(' '));
const KEY_HOME: KeyBinding = key_hint::plain(KeyCode::Home);
const KEY_END: KeyBinding = key_hint::plain(KeyCode::End);
const KEY_CTRL_F: KeyBinding = key_hint::ctrl(KeyCode::Char('f'));
const KEY_CTRL_D: KeyBinding = key_hint::ctrl(KeyCode::Char('d'));
const KEY_CTRL_B: KeyBinding = key_hint::ctrl(KeyCode::Char('b'));
const KEY_CTRL_U: KeyBinding = key_hint::ctrl(KeyCode::Char('u'));
const KEY_Q: KeyBinding = key_hint::plain(KeyCode::Char('q'));
const KEY_ESC: KeyBinding = key_hint::plain(KeyCode::Esc);
const KEY_ENTER: KeyBinding = key_hint::plain(KeyCode::Enter);
const KEY_CTRL_T: KeyBinding = key_hint::ctrl(KeyCode::Char('t'));
const KEY_CTRL_C: KeyBinding = key_hint::ctrl(KeyCode::Char('c'));
// Common pager navigation hints rendered on the first line
const PAGER_KEY_HINTS: &[(&[KeyBinding], &str)] = &[
(&[KEY_UP, KEY_DOWN], "to scroll"),
(&[KEY_PAGE_UP, KEY_PAGE_DOWN], "to page"),
(&[KEY_HOME, KEY_END], "to jump"),
];
// Render a single line of key hints from (key(s), description) pairs.
fn render_key_hints(area: Rect, buf: &mut Buffer, pairs: &[(&[KeyBinding], &str)]) {
let mut spans: Vec<Span<'static>> = vec![" ".into()];
let mut first = true;
for (keys, desc) in pairs {
if !first {
spans.push(" ".into());
}
for (i, key) in keys.iter().enumerate() {
if i > 0 {
spans.push("/".into());
}
spans.push(Span::from(key));
}
spans.push(" ".into());
spans.push(Span::from(desc.to_string()));
first = false;
}
Paragraph::new(vec![Line::from(spans).dim()]).render_ref(area, buf);
}
/// Generic widget for rendering a pager view.
struct PagerView {
renderables: Vec<Box<dyn Renderable>>,
scroll_offset: usize,
title: String,
last_content_height: Option<usize>,
last_rendered_height: Option<usize>,
/// If set, on next render ensure this chunk is visible.
pending_scroll_chunk: Option<usize>,
}
impl PagerView {
fn new(renderables: Vec<Box<dyn Renderable>>, title: String, scroll_offset: usize) -> Self {
Self {
renderables,
scroll_offset,
title,
last_content_height: None,
last_rendered_height: None,
pending_scroll_chunk: None,
}
}
fn content_height(&self, width: u16) -> usize {
self.renderables
.iter()
.map(|c| c.desired_height(width) as usize)
.sum()
}
fn render(&mut self, area: Rect, buf: &mut Buffer) {
Clear.render(area, buf);
self.render_header(area, buf);
let content_area = self.content_area(area);
self.update_last_content_height(content_area.height);
let content_height = self.content_height(content_area.width);
self.last_rendered_height = Some(content_height);
// If there is a pending request to scroll a specific chunk into view,
// satisfy it now that wrapping is up to date for this width.
if let Some(idx) = self.pending_scroll_chunk.take() {
self.ensure_chunk_visible(idx, content_area);
}
self.scroll_offset = self
.scroll_offset
.min(content_height.saturating_sub(content_area.height as usize));
self.render_content(content_area, buf);
self.render_bottom_bar(area, content_area, buf, content_height);
}
fn render_header(&self, area: Rect, buf: &mut Buffer) {
Span::from("/ ".repeat(area.width as usize / 2))
.dim()
.render_ref(area, buf);
let header = format!("/ {}", self.title);
header.dim().render_ref(area, buf);
}
fn render_content(&self, area: Rect, buf: &mut Buffer) {
let mut y = -(self.scroll_offset as isize);
let mut drawn_bottom = area.y;
for renderable in &self.renderables {
let top = y;
let height = renderable.desired_height(area.width) as isize;
y += height;
let bottom = y;
if bottom < area.y as isize {
continue;
}
if top > area.y as isize + area.height as isize {
break;
}
if top < 0 {
let drawn = render_offset_content(area, buf, &**renderable, (-top) as u16);
drawn_bottom = drawn_bottom.max(area.y + drawn);
} else {
let draw_height = (height as u16).min(area.height.saturating_sub(top as u16));
let draw_area = Rect::new(area.x, area.y + top as u16, area.width, draw_height);
renderable.render(draw_area, buf);
drawn_bottom = drawn_bottom.max(draw_area.y.saturating_add(draw_area.height));
}
}
for y in drawn_bottom..area.bottom() {
if area.width == 0 {
break;
}
buf[(area.x, y)] = Cell::from('~');
for x in area.x + 1..area.right() {
buf[(x, y)] = Cell::from(' ');
}
}
}
fn render_bottom_bar(
&self,
full_area: Rect,
content_area: Rect,
buf: &mut Buffer,
total_len: usize,
) {
let sep_y = content_area.bottom();
let sep_rect = Rect::new(full_area.x, sep_y, full_area.width, 1);
Span::from("─".repeat(sep_rect.width as usize))
.dim()
.render_ref(sep_rect, buf);
let percent = if total_len == 0 {
100
} else {
let max_scroll = total_len.saturating_sub(content_area.height as usize);
if max_scroll == 0 {
100
} else {
(((self.scroll_offset.min(max_scroll)) as f32 / max_scroll as f32) * 100.0).round()
as u8
}
};
let pct_text = format!(" {percent}% ");
let pct_w = pct_text.chars().count() as u16;
let pct_x = sep_rect.x + sep_rect.width - pct_w - 1;
Span::from(pct_text)
.dim()
.render_ref(Rect::new(pct_x, sep_rect.y, pct_w, 1), buf);
}
fn handle_key_event(&mut self, tui: &mut tui::Tui, key_event: KeyEvent) -> Result<()> {
match key_event {
e if KEY_UP.is_press(e) || KEY_K.is_press(e) => {
self.scroll_offset = self.scroll_offset.saturating_sub(1);
}
e if KEY_DOWN.is_press(e) || KEY_J.is_press(e) => {
self.scroll_offset = self.scroll_offset.saturating_add(1);
}
e if KEY_PAGE_UP.is_press(e)
|| KEY_SHIFT_SPACE.is_press(e)
|| KEY_CTRL_B.is_press(e) =>
{
let page_height = self.page_height(tui.terminal.viewport_area);
self.scroll_offset = self.scroll_offset.saturating_sub(page_height);
}
e if KEY_PAGE_DOWN.is_press(e) || KEY_SPACE.is_press(e) || KEY_CTRL_F.is_press(e) => {
let page_height = self.page_height(tui.terminal.viewport_area);
self.scroll_offset = self.scroll_offset.saturating_add(page_height);
}
e if KEY_CTRL_D.is_press(e) => {
let area = self.content_area(tui.terminal.viewport_area);
let half_page = (area.height as usize).saturating_add(1) / 2;
self.scroll_offset = self.scroll_offset.saturating_add(half_page);
}
e if KEY_CTRL_U.is_press(e) => {
let area = self.content_area(tui.terminal.viewport_area);
let half_page = (area.height as usize).saturating_add(1) / 2;
self.scroll_offset = self.scroll_offset.saturating_sub(half_page);
}
e if KEY_HOME.is_press(e) => {
self.scroll_offset = 0;
}
e if KEY_END.is_press(e) => {
self.scroll_offset = usize::MAX;
}
_ => {
return Ok(());
}
}
// Request a redraw; the frame scheduler coalesces bursts and clamps to 60fps.
tui.frame_requester().schedule_frame();
Ok(())
}
fn handle_mouse_scroll(&mut self, tui: &mut tui::Tui, event: MouseEvent) -> Result<()> {
let step: usize = 3;
match event.kind {
MouseEventKind::ScrollUp => {
self.scroll_offset = self.scroll_offset.saturating_sub(step);
}
MouseEventKind::ScrollDown => {
self.scroll_offset = self.scroll_offset.saturating_add(step);
}
_ => {
return Ok(());
}
}
// Request a redraw; the frame scheduler coalesces bursts and clamps to 60fps.
tui.frame_requester().schedule_frame();
Ok(())
}
/// Returns the height of one page in content rows.
///
/// Prefers the last rendered content height (excluding header/footer chrome);
/// if no render has occurred yet, falls back to the content area height
/// computed from the given viewport.
fn page_height(&self, viewport_area: Rect) -> usize {
self.last_content_height
.unwrap_or_else(|| self.content_area(viewport_area).height as usize)
}
fn update_last_content_height(&mut self, height: u16) {
self.last_content_height = Some(height as usize);
}
fn content_area(&self, area: Rect) -> Rect {
let mut area = area;
area.y = area.y.saturating_add(1);
area.height = area.height.saturating_sub(2);
area
}
}
impl PagerView {
fn is_scrolled_to_bottom(&self) -> bool {
if self.scroll_offset == usize::MAX {
return true;
}
let Some(height) = self.last_content_height else {
return false;
};
if self.renderables.is_empty() {
return true;
}
let Some(total_height) = self.last_rendered_height else {
return false;
};
if total_height <= height {
return true;
}
let max_scroll = total_height.saturating_sub(height);
self.scroll_offset >= max_scroll
}
/// Request that the given text chunk index be scrolled into view on next render.
fn scroll_chunk_into_view(&mut self, chunk_index: usize) {
self.pending_scroll_chunk = Some(chunk_index);
}
fn ensure_chunk_visible(&mut self, idx: usize, area: Rect) {
if area.height == 0 || idx >= self.renderables.len() {
return;
}
let first = self
.renderables
.iter()
.take(idx)
.map(|r| r.desired_height(area.width) as usize)
.sum();
let last = first + self.renderables[idx].desired_height(area.width) as usize;
let current_top = self.scroll_offset;
let current_bottom = current_top.saturating_add(area.height.saturating_sub(1) as usize);
if first < current_top {
self.scroll_offset = first;
} else if last > current_bottom {
self.scroll_offset = last.saturating_sub(area.height.saturating_sub(1) as usize);
}
}
}
/// A renderable that caches its desired height.
struct CachedRenderable {
renderable: Box<dyn Renderable>,
height: std::cell::Cell<Option<u16>>,
last_width: std::cell::Cell<Option<u16>>,
}
impl CachedRenderable {
fn new(renderable: impl Into<Box<dyn Renderable>>) -> Self {
Self {
renderable: renderable.into(),
height: std::cell::Cell::new(None),
last_width: std::cell::Cell::new(None),
}
}
}
impl Renderable for CachedRenderable {
fn render(&self, area: Rect, buf: &mut Buffer) {
self.renderable.render(area, buf);
}
fn desired_height(&self, width: u16) -> u16 {
if self.last_width.get() != Some(width) {
let height = self.renderable.desired_height(width);
self.height.set(Some(height));
self.last_width.set(Some(width));
}
self.height.get().unwrap_or(0)
}
}
struct CellRenderable {
cell: Arc<dyn HistoryCell>,
style: Style,
}
impl Renderable for CellRenderable {
fn render(&self, area: Rect, buf: &mut Buffer) {
let p =
Paragraph::new(Text::from(self.cell.transcript_lines(area.width))).style(self.style);
p.render(area, buf);
}
fn desired_height(&self, width: u16) -> u16 {
self.cell.desired_transcript_height(width)
}
}
pub(crate) struct TranscriptOverlay {
view: PagerView,
cells: Vec<Arc<dyn HistoryCell>>,
highlight_cell: Option<usize>,
is_done: bool,
}
impl TranscriptOverlay {
pub(crate) fn new(transcript_cells: Vec<Arc<dyn HistoryCell>>) -> Self {
Self {
view: PagerView::new(
Self::render_cells(&transcript_cells, None),
"T R A N S C R I P T".to_string(),
usize::MAX,
),
cells: transcript_cells,
highlight_cell: None,
is_done: false,
}
}
fn render_cells(
cells: &[Arc<dyn HistoryCell>],
highlight_cell: Option<usize>,
) -> Vec<Box<dyn Renderable>> {
cells
.iter()
.enumerate()
.flat_map(|(i, c)| {
let mut v: Vec<Box<dyn Renderable>> = Vec::new();
let mut cell_renderable = if c.as_any().is::<UserHistoryCell>() {
Box::new(CachedRenderable::new(CellRenderable {
cell: c.clone(),
style: if highlight_cell == Some(i) {
user_message_style().reversed()
} else {
user_message_style()
},
})) as Box<dyn Renderable>
} else {
Box::new(CachedRenderable::new(CellRenderable {
cell: c.clone(),
style: Style::default(),
})) as Box<dyn Renderable>
};
if !c.is_stream_continuation() && i > 0 {
cell_renderable = Box::new(InsetRenderable::new(
cell_renderable,
Insets::tlbr(1, 0, 0, 0),
));
}
v.push(cell_renderable);
v
})
.collect()
}
pub(crate) fn insert_cell(&mut self, cell: Arc<dyn HistoryCell>) {
let follow_bottom = self.view.is_scrolled_to_bottom();
self.cells.push(cell);
self.view.renderables = Self::render_cells(&self.cells, self.highlight_cell);
if follow_bottom {
self.view.scroll_offset = usize::MAX;
}
}
pub(crate) fn set_highlight_cell(&mut self, cell: Option<usize>) {
self.highlight_cell = cell;
self.view.renderables = Self::render_cells(&self.cells, self.highlight_cell);
if let Some(idx) = self.highlight_cell {
self.view.scroll_chunk_into_view(idx);
}
}
fn render_hints(&self, area: Rect, buf: &mut Buffer) {
let line1 = Rect::new(area.x, area.y, area.width, 1);
let line2 = Rect::new(area.x, area.y.saturating_add(1), area.width, 1);
render_key_hints(line1, buf, PAGER_KEY_HINTS);
let mut pairs: Vec<(&[KeyBinding], &str)> =
vec![(&[KEY_Q], "to quit"), (&[KEY_ESC], "to edit prev")];
if self.highlight_cell.is_some() {
pairs.push((&[KEY_ENTER], "to edit message"));
}
render_key_hints(line2, buf, &pairs);
}
pub(crate) fn render(&mut self, area: Rect, buf: &mut Buffer) {
let top_h = area.height.saturating_sub(3);
let top = Rect::new(area.x, area.y, area.width, top_h);
let bottom = Rect::new(area.x, area.y + top_h, area.width, 3);
self.view.render(top, buf);
self.render_hints(bottom, buf);
}
}
impl TranscriptOverlay {
pub(crate) fn handle_event(&mut self, tui: &mut tui::Tui, event: TuiEvent) -> Result<()> {
match event {
TuiEvent::Key(key_event) => match key_event {
e if KEY_Q.is_press(e) || KEY_CTRL_C.is_press(e) || KEY_CTRL_T.is_press(e) => {
self.is_done = true;
Ok(())
}
other => self.view.handle_key_event(tui, other),
},
TuiEvent::Mouse(mouse_event) => self.view.handle_mouse_scroll(tui, mouse_event),
TuiEvent::Draw => {
tui.draw(u16::MAX, |frame| {
self.render(frame.area(), frame.buffer);
})?;
Ok(())
}
_ => Ok(()),
}
}
pub(crate) fn is_done(&self) -> bool {
self.is_done
}
}
pub(crate) struct StaticOverlay {
view: PagerView,
is_done: bool,
}
impl StaticOverlay {
pub(crate) fn with_title(lines: Vec<Line<'static>>, title: String) -> Self {
let paragraph = Paragraph::new(Text::from(lines)).wrap(Wrap { trim: false });
Self::with_renderables(vec![Box::new(CachedRenderable::new(paragraph))], title)
}
pub(crate) fn with_renderables(renderables: Vec<Box<dyn Renderable>>, title: String) -> Self {
Self {
view: PagerView::new(renderables, title, 0),
is_done: false,
}
}
fn render_hints(&self, area: Rect, buf: &mut Buffer) {
let line1 = Rect::new(area.x, area.y, area.width, 1);
let line2 = Rect::new(area.x, area.y.saturating_add(1), area.width, 1);
render_key_hints(line1, buf, PAGER_KEY_HINTS);
let pairs: Vec<(&[KeyBinding], &str)> = vec![(&[KEY_Q], "to quit")];
render_key_hints(line2, buf, &pairs);
}
pub(crate) fn render(&mut self, area: Rect, buf: &mut Buffer) {
let top_h = area.height.saturating_sub(3);
let top = Rect::new(area.x, area.y, area.width, top_h);
let bottom = Rect::new(area.x, area.y + top_h, area.width, 3);
self.view.render(top, buf);
self.render_hints(bottom, buf);
}
}
impl StaticOverlay {
pub(crate) fn handle_event(&mut self, tui: &mut tui::Tui, event: TuiEvent) -> Result<()> {
match event {
TuiEvent::Key(key_event) => match key_event {
e if KEY_Q.is_press(e) || KEY_CTRL_C.is_press(e) => {
self.is_done = true;
Ok(())
}
other => self.view.handle_key_event(tui, other),
},
TuiEvent::Mouse(mouse_event) => self.view.handle_mouse_scroll(tui, mouse_event),
TuiEvent::Draw => {
tui.draw(u16::MAX, |frame| {
self.render(frame.area(), frame.buffer);
})?;
Ok(())
}
_ => Ok(()),
}
}
pub(crate) fn is_done(&self) -> bool {
self.is_done
}
}
fn render_offset_content(
area: Rect,
buf: &mut Buffer,
renderable: &dyn Renderable,
scroll_offset: u16,
) -> u16 {
let height = renderable.desired_height(area.width);
let mut tall_buf = Buffer::empty(Rect::new(
0,
0,
area.width,
height.min(area.height + scroll_offset),
));
renderable.render(*tall_buf.area(), &mut tall_buf);
let copy_height = area
.height
.min(tall_buf.area().height.saturating_sub(scroll_offset));
for y in 0..copy_height {
let src_y = y + scroll_offset;
for x in 0..area.width {
buf[(area.x + x, area.y + y)] = tall_buf[(x, src_y)].clone();
}
}
copy_height
}
#[cfg(test)]
mod tests {
use super::*;
use codex_core::protocol::ExecCommandSource;
use codex_core::protocol::ReviewDecision;
use insta::assert_snapshot;
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use crate::exec_cell::CommandOutput;
use crate::history_cell;
use crate::history_cell::HistoryCell;
use crate::history_cell::new_patch_event;
use codex_core::protocol::FileChange;
use codex_protocol::parse_command::ParsedCommand;
use ratatui::Terminal;
use ratatui::backend::TestBackend;
use ratatui::text::Text;
#[derive(Debug)]
struct TestCell {
lines: Vec<Line<'static>>,
}
impl crate::history_cell::HistoryCell for TestCell {
fn display_lines(&self, _width: u16) -> Vec<Line<'static>> {
self.lines.clone()
}
fn transcript_lines(&self, _width: u16) -> Vec<Line<'static>> {
self.lines.clone()
}
}
fn paragraph_block(label: &str, lines: usize) -> Box<dyn Renderable> {
let text = Text::from(
(0..lines)
.map(|i| Line::from(format!("{label}{i}")))
.collect::<Vec<_>>(),
);
Box::new(Paragraph::new(text)) as Box<dyn Renderable>
}
#[test]
fn edit_prev_hint_is_visible() {
let mut overlay = TranscriptOverlay::new(vec![Arc::new(TestCell {
lines: vec![Line::from("hello")],
})]);
// Render into a small buffer and assert the backtrack hint is present
let area = Rect::new(0, 0, 40, 10);
let mut buf = Buffer::empty(area);
overlay.render(area, &mut buf);
// Flatten buffer to a string and check for the hint text
let mut s = String::new();
for y in area.y..area.bottom() {
for x in area.x..area.right() {
s.push(buf[(x, y)].symbol().chars().next().unwrap_or(' '));
}
s.push('\n');
}
assert!(
s.contains("edit prev"),
"expected 'edit prev' hint in overlay footer, got: {s:?}"
);
}
#[test]
fn transcript_overlay_snapshot_basic() {
// Prepare a transcript overlay with a few lines
let mut overlay = TranscriptOverlay::new(vec![
Arc::new(TestCell {
lines: vec![Line::from("alpha")],
}),
Arc::new(TestCell {
lines: vec![Line::from("beta")],
}),
Arc::new(TestCell {
lines: vec![Line::from("gamma")],
}),
]);
let mut term = Terminal::new(TestBackend::new(40, 10)).expect("term");
term.draw(|f| overlay.render(f.area(), f.buffer_mut()))
.expect("draw");
assert_snapshot!(term.backend());
}
fn buffer_to_text(buf: &Buffer, area: Rect) -> String {
let mut out = String::new();
for y in area.y..area.bottom() {
for x in area.x..area.right() {
let symbol = buf[(x, y)].symbol();
if symbol.is_empty() {
out.push(' ');
} else {
out.push(symbol.chars().next().unwrap_or(' '));
}
}
// Trim trailing spaces for stability.
while out.ends_with(' ') {
out.pop();
}
out.push('\n');
}
out
}
#[test]
fn transcript_overlay_apply_patch_scroll_vt100_clears_previous_page() {
let cwd = PathBuf::from("/repo");
let mut cells: Vec<Arc<dyn HistoryCell>> = Vec::new();
let mut approval_changes = HashMap::new();
approval_changes.insert(
PathBuf::from("foo.txt"),
FileChange::Add {
content: "hello\nworld\n".to_string(),
},
);
let approval_cell: Arc<dyn HistoryCell> = Arc::new(new_patch_event(approval_changes, &cwd));
cells.push(approval_cell);
let mut apply_changes = HashMap::new();
apply_changes.insert(
PathBuf::from("foo.txt"),
FileChange::Add {
content: "hello\nworld\n".to_string(),
},
);
let apply_begin_cell: Arc<dyn HistoryCell> = Arc::new(new_patch_event(apply_changes, &cwd));
cells.push(apply_begin_cell);
let apply_end_cell: Arc<dyn HistoryCell> =
history_cell::new_approval_decision_cell(vec!["ls".into()], ReviewDecision::Approved)
.into();
cells.push(apply_end_cell);
let mut exec_cell = crate::exec_cell::new_active_exec_command(
"exec-1".into(),
vec!["bash".into(), "-lc".into(), "ls".into()],
vec![ParsedCommand::Unknown { cmd: "ls".into() }],
ExecCommandSource::Agent,
None,
true,
);
exec_cell.complete_call(
"exec-1",
CommandOutput {
exit_code: 0,
aggregated_output: "src\nREADME.md\n".into(),
formatted_output: "src\nREADME.md\n".into(),
},
Duration::from_millis(420),
);
let exec_cell: Arc<dyn HistoryCell> = Arc::new(exec_cell);
cells.push(exec_cell);
let mut overlay = TranscriptOverlay::new(cells);
let area = Rect::new(0, 0, 80, 12);
let mut buf = Buffer::empty(area);
overlay.render(area, &mut buf);
overlay.view.scroll_offset = 0;
overlay.render(area, &mut buf);
let snapshot = buffer_to_text(&buf, area);
assert_snapshot!("transcript_overlay_apply_patch_scroll_vt100", snapshot);
}
#[test]
fn transcript_overlay_keeps_scroll_pinned_at_bottom() {
let mut overlay = TranscriptOverlay::new(
(0..20)
.map(|i| {
Arc::new(TestCell {
lines: vec![Line::from(format!("line{i}"))],
}) as Arc<dyn HistoryCell>
})
.collect(),
);
let mut term = Terminal::new(TestBackend::new(40, 12)).expect("term");
term.draw(|f| overlay.render(f.area(), f.buffer_mut()))
.expect("draw");
assert!(
overlay.view.is_scrolled_to_bottom(),
"expected initial render to leave view at bottom"
);
overlay.insert_cell(Arc::new(TestCell {
lines: vec!["tail".into()],
}));
assert_eq!(overlay.view.scroll_offset, usize::MAX);
}
#[test]
fn transcript_overlay_preserves_manual_scroll_position() {
let mut overlay = TranscriptOverlay::new(
(0..20)
.map(|i| {
Arc::new(TestCell {
lines: vec![Line::from(format!("line{i}"))],
}) as Arc<dyn HistoryCell>
})
.collect(),
);
let mut term = Terminal::new(TestBackend::new(40, 12)).expect("term");
term.draw(|f| overlay.render(f.area(), f.buffer_mut()))
.expect("draw");
overlay.view.scroll_offset = 0;
overlay.insert_cell(Arc::new(TestCell {
lines: vec!["tail".into()],
}));
assert_eq!(overlay.view.scroll_offset, 0);
}
#[test]
fn static_overlay_snapshot_basic() {
// Prepare a static overlay with a few lines and a title
let mut overlay = StaticOverlay::with_title(
vec!["one".into(), "two".into(), "three".into()],
"S T A T I C".to_string(),
);
let mut term = Terminal::new(TestBackend::new(40, 10)).expect("term");
term.draw(|f| overlay.render(f.area(), f.buffer_mut()))
.expect("draw");
assert_snapshot!(term.backend());
}
/// Render transcript overlay and return visible line numbers (`line-NN`) in order.
fn transcript_line_numbers(overlay: &mut TranscriptOverlay, area: Rect) -> Vec<usize> {
let mut buf = Buffer::empty(area);
overlay.render(area, &mut buf);
let top_h = area.height.saturating_sub(3);
let top = Rect::new(area.x, area.y, area.width, top_h);
let content_area = overlay.view.content_area(top);
let mut nums = Vec::new();
for y in content_area.y..content_area.bottom() {
let mut line = String::new();
for x in content_area.x..content_area.right() {
line.push(buf[(x, y)].symbol().chars().next().unwrap_or(' '));
}
if let Some(n) = line
.split_whitespace()
.find_map(|w| w.strip_prefix("line-"))
.and_then(|s| s.parse().ok())
{
nums.push(n);
}
}
nums
}
#[test]
fn transcript_overlay_paging_is_continuous_and_round_trips() {
let mut overlay = TranscriptOverlay::new(
(0..50)
.map(|i| {
Arc::new(TestCell {
lines: vec![Line::from(format!("line-{i:02}"))],
}) as Arc<dyn HistoryCell>
})
.collect(),
);
let area = Rect::new(0, 0, 40, 15);
// Prime layout so last_content_height is populated and paging uses the real content height.
let mut buf = Buffer::empty(area);
overlay.view.scroll_offset = 0;
overlay.render(area, &mut buf);
let page_height = overlay.view.page_height(area);
// Scenario 1: starting from the top, PageDown should show the next page of content.
overlay.view.scroll_offset = 0;
let page1 = transcript_line_numbers(&mut overlay, area);
let page1_len = page1.len();
let expected_page1: Vec<usize> = (0..page1_len).collect();
assert_eq!(
page1, expected_page1,
"first page should start at line-00 and show a full page of content"
);
overlay.view.scroll_offset = overlay.view.scroll_offset.saturating_add(page_height);
let page2 = transcript_line_numbers(&mut overlay, area);
assert_eq!(
page2.len(),
page1_len,
"second page should have the same number of visible lines as the first page"
);
let expected_page2_first = *page1.last().unwrap() + 1;
assert_eq!(
page2[0], expected_page2_first,
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | true |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/slash_command.rs | codex-rs/tui2/src/slash_command.rs | use strum::IntoEnumIterator;
use strum_macros::AsRefStr;
use strum_macros::EnumIter;
use strum_macros::EnumString;
use strum_macros::IntoStaticStr;
/// Commands that can be invoked by starting a message with a leading slash.
#[derive(
Debug, Clone, Copy, PartialEq, Eq, Hash, EnumString, EnumIter, AsRefStr, IntoStaticStr,
)]
#[strum(serialize_all = "kebab-case")]
pub enum SlashCommand {
// DO NOT ALPHA-SORT! Enum order is presentation order in the popup, so
// more frequently used commands should be listed first.
Model,
Approvals,
Skills,
Review,
New,
Resume,
Init,
Compact,
// Undo,
Diff,
Mention,
Status,
Mcp,
Logout,
Quit,
Exit,
Feedback,
Rollout,
TestApproval,
}
impl SlashCommand {
/// User-visible description shown in the popup.
pub fn description(self) -> &'static str {
match self {
SlashCommand::Feedback => "send logs to maintainers",
SlashCommand::New => "start a new chat during a conversation",
SlashCommand::Init => "create an AGENTS.md file with instructions for Codex",
SlashCommand::Compact => "summarize conversation to prevent hitting the context limit",
SlashCommand::Review => "review my current changes and find issues",
SlashCommand::Resume => "resume a saved chat",
// SlashCommand::Undo => "ask Codex to undo a turn",
SlashCommand::Quit | SlashCommand::Exit => "exit Codex",
SlashCommand::Diff => "show git diff (including untracked files)",
SlashCommand::Mention => "mention a file",
SlashCommand::Skills => "use skills to improve how Codex performs specific tasks",
SlashCommand::Status => "show current session configuration and token usage",
SlashCommand::Model => "choose what model and reasoning effort to use",
SlashCommand::Approvals => "choose what Codex can do without approval",
SlashCommand::Mcp => "list configured MCP tools",
SlashCommand::Logout => "log out of Codex",
SlashCommand::Rollout => "print the rollout file path",
SlashCommand::TestApproval => "test approval request",
}
}
/// Command string without the leading '/'. Provided for compatibility with
/// existing code that expects a method named `command()`.
pub fn command(self) -> &'static str {
self.into()
}
/// Whether this command can be run while a task is in progress.
pub fn available_during_task(self) -> bool {
match self {
SlashCommand::New
| SlashCommand::Resume
| SlashCommand::Init
| SlashCommand::Compact
// | SlashCommand::Undo
| SlashCommand::Model
| SlashCommand::Approvals
| SlashCommand::Review
| SlashCommand::Logout => false,
SlashCommand::Diff
| SlashCommand::Mention
| SlashCommand::Skills
| SlashCommand::Status
| SlashCommand::Mcp
| SlashCommand::Feedback
| SlashCommand::Quit
| SlashCommand::Exit => true,
SlashCommand::Rollout => true,
SlashCommand::TestApproval => true,
}
}
fn is_visible(self) -> bool {
match self {
SlashCommand::Rollout | SlashCommand::TestApproval => cfg!(debug_assertions),
_ => true,
}
}
}
/// Return all built-in commands in a Vec paired with their command string.
pub fn built_in_slash_commands() -> Vec<(&'static str, SlashCommand)> {
SlashCommand::iter()
.filter(|command| command.is_visible())
.map(|c| (c.command(), c))
.collect()
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/cli.rs | codex-rs/tui2/src/cli.rs | use clap::Parser;
use clap::ValueHint;
use codex_common::ApprovalModeCliArg;
use codex_common::CliConfigOverrides;
use std::path::PathBuf;
#[derive(Parser, Debug)]
#[command(version)]
pub struct Cli {
/// Optional user prompt to start the session.
#[arg(value_name = "PROMPT", value_hint = clap::ValueHint::Other)]
pub prompt: Option<String>,
/// Optional image(s) to attach to the initial prompt.
#[arg(long = "image", short = 'i', value_name = "FILE", value_delimiter = ',', num_args = 1..)]
pub images: Vec<PathBuf>,
// Internal controls set by the top-level `codex resume` subcommand.
// These are not exposed as user flags on the base `codex` command.
#[clap(skip)]
pub resume_picker: bool,
#[clap(skip)]
pub resume_last: bool,
/// Internal: resume a specific recorded session by id (UUID). Set by the
/// top-level `codex resume <SESSION_ID>` wrapper; not exposed as a public flag.
#[clap(skip)]
pub resume_session_id: Option<String>,
/// Internal: show all sessions (disables cwd filtering and shows CWD column).
#[clap(skip)]
pub resume_show_all: bool,
/// Model the agent should use.
#[arg(long, short = 'm')]
pub model: Option<String>,
/// Convenience flag to select the local open source model provider. Equivalent to -c
/// model_provider=oss; verifies a local LM Studio or Ollama server is running.
#[arg(long = "oss", default_value_t = false)]
pub oss: bool,
/// Specify which local provider to use (lmstudio or ollama).
/// If not specified with --oss, will use config default or show selection.
#[arg(long = "local-provider")]
pub oss_provider: Option<String>,
/// Configuration profile from config.toml to specify default options.
#[arg(long = "profile", short = 'p')]
pub config_profile: Option<String>,
/// Select the sandbox policy to use when executing model-generated shell
/// commands.
#[arg(long = "sandbox", short = 's')]
pub sandbox_mode: Option<codex_common::SandboxModeCliArg>,
/// Configure when the model requires human approval before executing a command.
#[arg(long = "ask-for-approval", short = 'a')]
pub approval_policy: Option<ApprovalModeCliArg>,
/// Convenience alias for low-friction sandboxed automatic execution (-a on-request, --sandbox workspace-write).
#[arg(long = "full-auto", default_value_t = false)]
pub full_auto: bool,
/// Skip all confirmation prompts and execute commands without sandboxing.
/// EXTREMELY DANGEROUS. Intended solely for running in environments that are externally sandboxed.
#[arg(
long = "dangerously-bypass-approvals-and-sandbox",
alias = "yolo",
default_value_t = false,
conflicts_with_all = ["approval_policy", "full_auto"]
)]
pub dangerously_bypass_approvals_and_sandbox: bool,
/// Tell the agent to use the specified directory as its working root.
#[clap(long = "cd", short = 'C', value_name = "DIR")]
pub cwd: Option<PathBuf>,
/// Enable web search (off by default). When enabled, the native Responses `web_search` tool is available to the model (no per‑call approval).
#[arg(long = "search", default_value_t = false)]
pub web_search: bool,
/// Additional directories that should be writable alongside the primary workspace.
#[arg(long = "add-dir", value_name = "DIR", value_hint = ValueHint::DirPath)]
pub add_dir: Vec<PathBuf>,
#[clap(skip)]
pub config_overrides: CliConfigOverrides,
}
impl From<codex_tui::Cli> for Cli {
fn from(cli: codex_tui::Cli) -> Self {
Self {
prompt: cli.prompt,
images: cli.images,
resume_picker: cli.resume_picker,
resume_last: cli.resume_last,
resume_session_id: cli.resume_session_id,
resume_show_all: cli.resume_show_all,
model: cli.model,
oss: cli.oss,
oss_provider: cli.oss_provider,
config_profile: cli.config_profile,
sandbox_mode: cli.sandbox_mode,
approval_policy: cli.approval_policy,
full_auto: cli.full_auto,
dangerously_bypass_approvals_and_sandbox: cli.dangerously_bypass_approvals_and_sandbox,
cwd: cli.cwd,
web_search: cli.web_search,
add_dir: cli.add_dir,
config_overrides: cli.config_overrides,
}
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/wrapping.rs | codex-rs/tui2/src/wrapping.rs | use ratatui::text::Line;
use ratatui::text::Span;
use std::borrow::Cow;
use std::ops::Range;
use textwrap::Options;
use crate::render::line_utils::push_owned_lines;
pub(crate) fn wrap_ranges<'a, O>(text: &str, width_or_options: O) -> Vec<Range<usize>>
where
O: Into<Options<'a>>,
{
let opts = width_or_options.into();
let mut lines: Vec<Range<usize>> = Vec::new();
for line in textwrap::wrap(text, opts).iter() {
match line {
std::borrow::Cow::Borrowed(slice) => {
let start = unsafe { slice.as_ptr().offset_from(text.as_ptr()) as usize };
let end = start + slice.len();
let trailing_spaces = text[end..].chars().take_while(|c| *c == ' ').count();
lines.push(start..end + trailing_spaces + 1);
}
std::borrow::Cow::Owned(_) => panic!("wrap_ranges: unexpected owned string"),
}
}
lines
}
/// Like `wrap_ranges` but returns ranges without trailing whitespace and
/// without the sentinel extra byte. Suitable for general wrapping where
/// trailing spaces should not be preserved.
pub(crate) fn wrap_ranges_trim<'a, O>(text: &str, width_or_options: O) -> Vec<Range<usize>>
where
O: Into<Options<'a>>,
{
let opts = width_or_options.into();
let mut lines: Vec<Range<usize>> = Vec::new();
for line in textwrap::wrap(text, opts).iter() {
match line {
std::borrow::Cow::Borrowed(slice) => {
let start = unsafe { slice.as_ptr().offset_from(text.as_ptr()) as usize };
let end = start + slice.len();
lines.push(start..end);
}
std::borrow::Cow::Owned(_) => panic!("wrap_ranges_trim: unexpected owned string"),
}
}
lines
}
#[derive(Debug, Clone)]
pub struct RtOptions<'a> {
/// The width in columns at which the text will be wrapped.
pub width: usize,
/// Line ending used for breaking lines.
pub line_ending: textwrap::LineEnding,
/// Indentation used for the first line of output. See the
/// [`Options::initial_indent`] method.
pub initial_indent: Line<'a>,
/// Indentation used for subsequent lines of output. See the
/// [`Options::subsequent_indent`] method.
pub subsequent_indent: Line<'a>,
/// Allow long words to be broken if they cannot fit on a line.
/// When set to `false`, some lines may be longer than
/// `self.width`. See the [`Options::break_words`] method.
pub break_words: bool,
/// Wrapping algorithm to use, see the implementations of the
/// [`WrapAlgorithm`] trait for details.
pub wrap_algorithm: textwrap::WrapAlgorithm,
/// The line breaking algorithm to use, see the [`WordSeparator`]
/// trait for an overview and possible implementations.
pub word_separator: textwrap::WordSeparator,
/// The method for splitting words. This can be used to prohibit
/// splitting words on hyphens, or it can be used to implement
/// language-aware machine hyphenation.
pub word_splitter: textwrap::WordSplitter,
}
impl From<usize> for RtOptions<'_> {
fn from(width: usize) -> Self {
RtOptions::new(width)
}
}
#[allow(dead_code)]
impl<'a> RtOptions<'a> {
pub fn new(width: usize) -> Self {
RtOptions {
width,
line_ending: textwrap::LineEnding::LF,
initial_indent: Line::default(),
subsequent_indent: Line::default(),
break_words: true,
word_separator: textwrap::WordSeparator::new(),
wrap_algorithm: textwrap::WrapAlgorithm::FirstFit,
word_splitter: textwrap::WordSplitter::HyphenSplitter,
}
}
pub fn line_ending(self, line_ending: textwrap::LineEnding) -> Self {
RtOptions {
line_ending,
..self
}
}
pub fn width(self, width: usize) -> Self {
RtOptions { width, ..self }
}
pub fn initial_indent(self, initial_indent: Line<'a>) -> Self {
RtOptions {
initial_indent,
..self
}
}
pub fn subsequent_indent(self, subsequent_indent: Line<'a>) -> Self {
RtOptions {
subsequent_indent,
..self
}
}
pub fn break_words(self, break_words: bool) -> Self {
RtOptions {
break_words,
..self
}
}
pub fn word_separator(self, word_separator: textwrap::WordSeparator) -> RtOptions<'a> {
RtOptions {
word_separator,
..self
}
}
pub fn wrap_algorithm(self, wrap_algorithm: textwrap::WrapAlgorithm) -> RtOptions<'a> {
RtOptions {
wrap_algorithm,
..self
}
}
pub fn word_splitter(self, word_splitter: textwrap::WordSplitter) -> RtOptions<'a> {
RtOptions {
word_splitter,
..self
}
}
}
#[must_use]
pub(crate) fn word_wrap_line<'a, O>(line: &'a Line<'a>, width_or_options: O) -> Vec<Line<'a>>
where
O: Into<RtOptions<'a>>,
{
let (lines, _joiners) = word_wrap_line_with_joiners(line, width_or_options);
lines
}
fn flatten_line_and_bounds<'a>(
line: &'a Line<'a>,
) -> (String, Vec<(Range<usize>, ratatui::style::Style)>) {
// Flatten the line and record span byte ranges.
let mut flat = String::new();
let mut span_bounds = Vec::new();
let mut acc = 0usize;
for s in &line.spans {
let text = s.content.as_ref();
let start = acc;
flat.push_str(text);
acc += text.len();
span_bounds.push((start..acc, s.style));
}
(flat, span_bounds)
}
fn build_wrapped_line_from_range<'a>(
indent: Line<'a>,
original: &'a Line<'a>,
span_bounds: &[(Range<usize>, ratatui::style::Style)],
range: &Range<usize>,
) -> Line<'a> {
let mut out = indent.style(original.style);
let sliced = slice_line_spans(original, span_bounds, range);
let mut spans = out.spans;
spans.append(
&mut sliced
.spans
.into_iter()
.map(|s| s.patch_style(original.style))
.collect(),
);
out.spans = spans;
out
}
/// Wrap a single line and also return, for each output line, the string that should be inserted
/// when joining it to the previous output line as a *soft wrap*.
///
/// - The first output line always has `None`.
/// - Continuation lines have `Some(joiner)` where `joiner` is the exact substring (often spaces,
/// possibly empty) that was skipped at the wrap boundary.
pub(crate) fn word_wrap_line_with_joiners<'a, O>(
line: &'a Line<'a>,
width_or_options: O,
) -> (Vec<Line<'a>>, Vec<Option<String>>)
where
O: Into<RtOptions<'a>>,
{
let (flat, span_bounds) = flatten_line_and_bounds(line);
let rt_opts: RtOptions<'a> = width_or_options.into();
let opts = Options::new(rt_opts.width)
.line_ending(rt_opts.line_ending)
.break_words(rt_opts.break_words)
.wrap_algorithm(rt_opts.wrap_algorithm)
.word_separator(rt_opts.word_separator)
.word_splitter(rt_opts.word_splitter);
let mut out: Vec<Line<'a>> = Vec::new();
let mut joiners: Vec<Option<String>> = Vec::new();
// The first output line uses the initial indent and a reduced available width.
// Compute first line range with reduced width due to initial indent.
let initial_width_available = opts
.width
.saturating_sub(rt_opts.initial_indent.width())
.max(1);
let initial_wrapped = wrap_ranges_trim(&flat, opts.clone().width(initial_width_available));
let Some(first_line_range) = initial_wrapped.first() else {
out.push(rt_opts.initial_indent.clone());
joiners.push(None);
return (out, joiners);
};
let first_line = build_wrapped_line_from_range(
rt_opts.initial_indent.clone(),
line,
&span_bounds,
first_line_range,
);
out.push(first_line);
joiners.push(None);
// Wrap the remainder using subsequent indent width. We also compute the joiner strings that
// were skipped at each wrap boundary so callers can treat these as soft wraps during copy.
let mut base = first_line_range.end;
let skip_leading_spaces = flat[base..].chars().take_while(|c| *c == ' ').count();
let joiner_first = flat[base..base.saturating_add(skip_leading_spaces)].to_string();
base = base.saturating_add(skip_leading_spaces);
let subsequent_width_available = opts
.width
.saturating_sub(rt_opts.subsequent_indent.width())
.max(1);
let remaining = &flat[base..];
let remaining_wrapped = wrap_ranges_trim(remaining, opts.width(subsequent_width_available));
let mut prev_end = 0usize;
for (i, r) in remaining_wrapped.iter().enumerate() {
if r.is_empty() {
continue;
}
// Each continuation line has `Some(joiner)`. The joiner may be empty (e.g. splitting a
// long word), but the distinction from `None` is important: `None` represents a hard break.
let joiner = if i == 0 {
joiner_first.clone()
} else {
remaining[prev_end..r.start].to_string()
};
prev_end = r.end;
let offset_range = (r.start + base)..(r.end + base);
let subsequent_line = build_wrapped_line_from_range(
rt_opts.subsequent_indent.clone(),
line,
&span_bounds,
&offset_range,
);
out.push(subsequent_line);
joiners.push(Some(joiner));
}
(out, joiners)
}
/// Like `word_wrap_lines`, but also returns a parallel vector of soft-wrap joiners.
///
/// The joiner is `None` when the line break is a hard break (between input lines), and `Some`
/// when the line break is a soft wrap continuation produced by the wrapping algorithm.
#[allow(private_bounds)] // IntoLineInput isn't public, but it doesn't really need to be.
pub(crate) fn word_wrap_lines_with_joiners<'a, I, O, L>(
lines: I,
width_or_options: O,
) -> (Vec<Line<'static>>, Vec<Option<String>>)
where
I: IntoIterator<Item = L>,
L: IntoLineInput<'a>,
O: Into<RtOptions<'a>>,
{
let base_opts: RtOptions<'a> = width_or_options.into();
let mut out: Vec<Line<'static>> = Vec::new();
let mut joiners: Vec<Option<String>> = Vec::new();
for (idx, line) in lines.into_iter().enumerate() {
let line_input = line.into_line_input();
let opts = if idx == 0 {
base_opts.clone()
} else {
let mut o = base_opts.clone();
let sub = o.subsequent_indent.clone();
o = o.initial_indent(sub);
o
};
let (wrapped, wrapped_joiners) = word_wrap_line_with_joiners(line_input.as_ref(), opts);
for (l, j) in wrapped.into_iter().zip(wrapped_joiners) {
out.push(crate::render::line_utils::line_to_static(&l));
joiners.push(j);
}
}
(out, joiners)
}
/// Utilities to allow wrapping either borrowed or owned lines.
#[derive(Debug)]
enum LineInput<'a> {
Borrowed(&'a Line<'a>),
Owned(Line<'a>),
}
impl<'a> LineInput<'a> {
fn as_ref(&self) -> &Line<'a> {
match self {
LineInput::Borrowed(line) => line,
LineInput::Owned(line) => line,
}
}
}
/// This trait makes it easier to pass whatever we need into word_wrap_lines.
trait IntoLineInput<'a> {
fn into_line_input(self) -> LineInput<'a>;
}
impl<'a> IntoLineInput<'a> for &'a Line<'a> {
fn into_line_input(self) -> LineInput<'a> {
LineInput::Borrowed(self)
}
}
impl<'a> IntoLineInput<'a> for &'a mut Line<'a> {
fn into_line_input(self) -> LineInput<'a> {
LineInput::Borrowed(self)
}
}
impl<'a> IntoLineInput<'a> for Line<'a> {
fn into_line_input(self) -> LineInput<'a> {
LineInput::Owned(self)
}
}
impl<'a> IntoLineInput<'a> for String {
fn into_line_input(self) -> LineInput<'a> {
LineInput::Owned(Line::from(self))
}
}
impl<'a> IntoLineInput<'a> for &'a str {
fn into_line_input(self) -> LineInput<'a> {
LineInput::Owned(Line::from(self))
}
}
impl<'a> IntoLineInput<'a> for Cow<'a, str> {
fn into_line_input(self) -> LineInput<'a> {
LineInput::Owned(Line::from(self))
}
}
impl<'a> IntoLineInput<'a> for Span<'a> {
fn into_line_input(self) -> LineInput<'a> {
LineInput::Owned(Line::from(self))
}
}
impl<'a> IntoLineInput<'a> for Vec<Span<'a>> {
fn into_line_input(self) -> LineInput<'a> {
LineInput::Owned(Line::from(self))
}
}
/// Wrap a sequence of lines, applying the initial indent only to the very first
/// output line, and using the subsequent indent for all later wrapped pieces.
#[allow(private_bounds)] // IntoLineInput isn't public, but it doesn't really need to be.
pub(crate) fn word_wrap_lines<'a, I, O, L>(lines: I, width_or_options: O) -> Vec<Line<'static>>
where
I: IntoIterator<Item = L>,
L: IntoLineInput<'a>,
O: Into<RtOptions<'a>>,
{
let base_opts: RtOptions<'a> = width_or_options.into();
let mut out: Vec<Line<'static>> = Vec::new();
for (idx, line) in lines.into_iter().enumerate() {
let line_input = line.into_line_input();
let opts = if idx == 0 {
base_opts.clone()
} else {
let mut o = base_opts.clone();
let sub = o.subsequent_indent.clone();
o = o.initial_indent(sub);
o
};
let wrapped = word_wrap_line(line_input.as_ref(), opts);
push_owned_lines(&wrapped, &mut out);
}
out
}
#[allow(dead_code)]
pub(crate) fn word_wrap_lines_borrowed<'a, I, O>(lines: I, width_or_options: O) -> Vec<Line<'a>>
where
I: IntoIterator<Item = &'a Line<'a>>,
O: Into<RtOptions<'a>>,
{
let base_opts: RtOptions<'a> = width_or_options.into();
let mut out: Vec<Line<'a>> = Vec::new();
let mut first = true;
for line in lines.into_iter() {
let opts = if first {
base_opts.clone()
} else {
base_opts
.clone()
.initial_indent(base_opts.subsequent_indent.clone())
};
out.extend(word_wrap_line(line, opts));
first = false;
}
out
}
fn slice_line_spans<'a>(
original: &'a Line<'a>,
span_bounds: &[(Range<usize>, ratatui::style::Style)],
range: &Range<usize>,
) -> Line<'a> {
let start_byte = range.start;
let end_byte = range.end;
let mut acc: Vec<Span<'a>> = Vec::new();
for (i, (range, style)) in span_bounds.iter().enumerate() {
let s = range.start;
let e = range.end;
if e <= start_byte {
continue;
}
if s >= end_byte {
break;
}
let seg_start = start_byte.max(s);
let seg_end = end_byte.min(e);
if seg_end > seg_start {
let local_start = seg_start - s;
let local_end = seg_end - s;
let content = original.spans[i].content.as_ref();
let slice = &content[local_start..local_end];
acc.push(Span {
style: *style,
content: std::borrow::Cow::Borrowed(slice),
});
}
if e >= end_byte {
break;
}
}
Line {
style: original.style,
alignment: original.alignment,
spans: acc,
}
}
#[cfg(test)]
mod tests {
use super::*;
use itertools::Itertools as _;
use pretty_assertions::assert_eq;
use ratatui::style::Color;
use ratatui::style::Stylize;
use std::string::ToString;
fn concat_line(line: &Line) -> String {
line.spans
.iter()
.map(|s| s.content.as_ref())
.collect::<String>()
}
#[test]
fn trivial_unstyled_no_indents_wide_width() {
let line = Line::from("hello");
let out = word_wrap_line(&line, 10);
assert_eq!(out.len(), 1);
assert_eq!(concat_line(&out[0]), "hello");
}
#[test]
fn simple_unstyled_wrap_narrow_width() {
let line = Line::from("hello world");
let out = word_wrap_line(&line, 5);
assert_eq!(out.len(), 2);
assert_eq!(concat_line(&out[0]), "hello");
assert_eq!(concat_line(&out[1]), "world");
}
#[test]
fn simple_styled_wrap_preserves_styles() {
let line = Line::from(vec!["hello ".red(), "world".into()]);
let out = word_wrap_line(&line, 6);
assert_eq!(out.len(), 2);
// First line should carry the red style
assert_eq!(concat_line(&out[0]), "hello");
assert_eq!(out[0].spans.len(), 1);
assert_eq!(out[0].spans[0].style.fg, Some(Color::Red));
// Second line is unstyled
assert_eq!(concat_line(&out[1]), "world");
assert_eq!(out[1].spans.len(), 1);
assert_eq!(out[1].spans[0].style.fg, None);
}
#[test]
fn with_initial_and_subsequent_indents() {
let opts = RtOptions::new(8)
.initial_indent(Line::from("- "))
.subsequent_indent(Line::from(" "));
let line = Line::from("hello world foo");
let out = word_wrap_line(&line, opts);
// Expect three lines with proper prefixes
assert!(concat_line(&out[0]).starts_with("- "));
assert!(concat_line(&out[1]).starts_with(" "));
assert!(concat_line(&out[2]).starts_with(" "));
// And content roughly segmented
assert_eq!(concat_line(&out[0]), "- hello");
assert_eq!(concat_line(&out[1]), " world");
assert_eq!(concat_line(&out[2]), " foo");
}
#[test]
fn empty_initial_indent_subsequent_spaces() {
let opts = RtOptions::new(8)
.initial_indent(Line::from(""))
.subsequent_indent(Line::from(" "));
let line = Line::from("hello world foobar");
let out = word_wrap_line(&line, opts);
assert!(concat_line(&out[0]).starts_with("hello"));
for l in &out[1..] {
assert!(concat_line(l).starts_with(" "));
}
}
#[test]
fn empty_input_yields_single_empty_line() {
let line = Line::from("");
let out = word_wrap_line(&line, 10);
assert_eq!(out.len(), 1);
assert_eq!(concat_line(&out[0]), "");
}
#[test]
fn leading_spaces_preserved_on_first_line() {
let line = Line::from(" hello");
let out = word_wrap_line(&line, 8);
assert_eq!(out.len(), 1);
assert_eq!(concat_line(&out[0]), " hello");
}
#[test]
fn multiple_spaces_between_words_dont_start_next_line_with_spaces() {
let line = Line::from("hello world");
let out = word_wrap_line(&line, 8);
assert_eq!(out.len(), 2);
assert_eq!(concat_line(&out[0]), "hello");
assert_eq!(concat_line(&out[1]), "world");
}
#[test]
fn break_words_false_allows_overflow_for_long_word() {
let opts = RtOptions::new(5).break_words(false);
let line = Line::from("supercalifragilistic");
let out = word_wrap_line(&line, opts);
assert_eq!(out.len(), 1);
assert_eq!(concat_line(&out[0]), "supercalifragilistic");
}
#[test]
fn hyphen_splitter_breaks_at_hyphen() {
let line = Line::from("hello-world");
let out = word_wrap_line(&line, 7);
assert_eq!(out.len(), 2);
assert_eq!(concat_line(&out[0]), "hello-");
assert_eq!(concat_line(&out[1]), "world");
}
#[test]
fn indent_consumes_width_leaving_one_char_space() {
let opts = RtOptions::new(4)
.initial_indent(Line::from(">>>>"))
.subsequent_indent(Line::from("--"));
let line = Line::from("hello");
let out = word_wrap_line(&line, opts);
assert_eq!(out.len(), 3);
assert_eq!(concat_line(&out[0]), ">>>>h");
assert_eq!(concat_line(&out[1]), "--el");
assert_eq!(concat_line(&out[2]), "--lo");
}
#[test]
fn wide_unicode_wraps_by_display_width() {
let line = Line::from("😀😀😀");
let out = word_wrap_line(&line, 4);
assert_eq!(out.len(), 2);
assert_eq!(concat_line(&out[0]), "😀😀");
assert_eq!(concat_line(&out[1]), "😀");
}
#[test]
fn styled_split_within_span_preserves_style() {
use ratatui::style::Stylize;
let line = Line::from(vec!["abcd".red()]);
let out = word_wrap_line(&line, 2);
assert_eq!(out.len(), 2);
assert_eq!(out[0].spans.len(), 1);
assert_eq!(out[1].spans.len(), 1);
assert_eq!(out[0].spans[0].style.fg, Some(Color::Red));
assert_eq!(out[1].spans[0].style.fg, Some(Color::Red));
assert_eq!(concat_line(&out[0]), "ab");
assert_eq!(concat_line(&out[1]), "cd");
}
#[test]
fn wrap_line_with_joiners_matches_word_wrap_line_output() {
let opts = RtOptions::new(8)
.initial_indent(Line::from("- "))
.subsequent_indent(Line::from(" "));
let line = Line::from(vec!["hello ".red(), "world".into()]);
let out = word_wrap_line(&line, opts.clone());
let (with_joiners, joiners) = word_wrap_line_with_joiners(&line, opts);
assert_eq!(
with_joiners.iter().map(concat_line).collect_vec(),
out.iter().map(concat_line).collect_vec()
);
assert_eq!(joiners.len(), with_joiners.len());
assert_eq!(
joiners.first().cloned().unwrap_or(Some("x".to_string())),
None
);
}
#[test]
fn wrap_line_with_joiners_includes_skipped_spaces() {
let line = Line::from("hello world");
let (wrapped, joiners) = word_wrap_line_with_joiners(&line, 8);
assert_eq!(
wrapped.iter().map(concat_line).collect_vec(),
vec!["hello", "world"]
);
assert_eq!(joiners, vec![None, Some(" ".to_string())]);
}
#[test]
fn wrap_line_with_joiners_uses_empty_joiner_for_mid_word_split() {
let line = Line::from("abcd");
let (wrapped, joiners) = word_wrap_line_with_joiners(&line, 2);
assert_eq!(
wrapped.iter().map(concat_line).collect_vec(),
vec!["ab", "cd"]
);
assert_eq!(joiners, vec![None, Some("".to_string())]);
}
#[test]
fn wrap_lines_with_joiners_marks_hard_breaks_between_input_lines() {
let (wrapped, joiners) =
word_wrap_lines_with_joiners([Line::from("hello world"), Line::from("foo bar")], 5);
assert_eq!(
wrapped.iter().map(concat_line).collect_vec(),
vec!["hello", "world", "foo", "bar"]
);
assert_eq!(
joiners,
vec![None, Some(" ".to_string()), None, Some(" ".to_string())]
);
}
#[test]
fn wrap_lines_applies_initial_indent_only_once() {
let opts = RtOptions::new(8)
.initial_indent(Line::from("- "))
.subsequent_indent(Line::from(" "));
let lines = vec![Line::from("hello world"), Line::from("foo bar baz")];
let out = word_wrap_lines(lines, opts);
// Expect: first line prefixed with "- ", subsequent wrapped pieces with " "
// and for the second input line, there should be no "- " prefix on its first piece
let rendered: Vec<String> = out.iter().map(concat_line).collect();
assert!(rendered[0].starts_with("- "));
for r in rendered.iter().skip(1) {
assert!(r.starts_with(" "));
}
}
#[test]
fn wrap_lines_without_indents_is_concat_of_single_wraps() {
let lines = vec![Line::from("hello"), Line::from("world!")];
let out = word_wrap_lines(lines, 10);
let rendered: Vec<String> = out.iter().map(concat_line).collect();
assert_eq!(rendered, vec!["hello", "world!"]);
}
#[test]
fn wrap_lines_borrowed_applies_initial_indent_only_once() {
let opts = RtOptions::new(8)
.initial_indent(Line::from("- "))
.subsequent_indent(Line::from(" "));
let lines = [Line::from("hello world"), Line::from("foo bar baz")];
let out = word_wrap_lines_borrowed(lines.iter(), opts);
let rendered: Vec<String> = out.iter().map(concat_line).collect();
assert!(rendered.first().unwrap().starts_with("- "));
for r in rendered.iter().skip(1) {
assert!(r.starts_with(" "));
}
}
#[test]
fn wrap_lines_borrowed_without_indents_is_concat_of_single_wraps() {
let lines = [Line::from("hello"), Line::from("world!")];
let out = word_wrap_lines_borrowed(lines.iter(), 10);
let rendered: Vec<String> = out.iter().map(concat_line).collect();
assert_eq!(rendered, vec!["hello", "world!"]);
}
#[test]
fn wrap_lines_accepts_borrowed_iterators() {
let lines = [Line::from("hello world"), Line::from("foo bar baz")];
let out = word_wrap_lines(lines, 10);
let rendered: Vec<String> = out.iter().map(concat_line).collect();
assert_eq!(rendered, vec!["hello", "world", "foo bar", "baz"]);
}
#[test]
fn wrap_lines_accepts_str_slices() {
let lines = ["hello world", "goodnight moon"];
let out = word_wrap_lines(lines, 12);
let rendered: Vec<String> = out.iter().map(concat_line).collect();
assert_eq!(rendered, vec!["hello world", "goodnight", "moon"]);
}
#[test]
fn line_height_counts_double_width_emoji() {
let line = "😀😀😀".into(); // each emoji ~ width 2
assert_eq!(word_wrap_line(&line, 4).len(), 2);
assert_eq!(word_wrap_line(&line, 2).len(), 3);
assert_eq!(word_wrap_line(&line, 6).len(), 1);
}
#[test]
fn word_wrap_does_not_split_words_simple_english() {
let sample = "Years passed, and Willowmere thrived in peace and friendship. Mira’s herb garden flourished with both ordinary and enchanted plants, and travelers spoke of the kindness of the woman who tended them.";
let line = Line::from(sample);
let lines = [line];
// Force small width to exercise wrapping at spaces.
let wrapped = word_wrap_lines_borrowed(&lines, 40);
let joined: String = wrapped.iter().map(ToString::to_string).join("\n");
assert_eq!(
joined,
r#"Years passed, and Willowmere thrived in
peace and friendship. Mira’s herb garden
flourished with both ordinary and
enchanted plants, and travelers spoke of
the kindness of the woman who tended
them."#
);
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/oss_selection.rs | codex-rs/tui2/src/oss_selection.rs | use std::io;
use std::sync::LazyLock;
use codex_core::DEFAULT_LMSTUDIO_PORT;
use codex_core::DEFAULT_OLLAMA_PORT;
use codex_core::LMSTUDIO_OSS_PROVIDER_ID;
use codex_core::OLLAMA_OSS_PROVIDER_ID;
use codex_core::config::set_default_oss_provider;
use crossterm::event::Event;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use crossterm::event::KeyEventKind;
use crossterm::event::{self};
use crossterm::execute;
use crossterm::terminal::EnterAlternateScreen;
use crossterm::terminal::LeaveAlternateScreen;
use crossterm::terminal::disable_raw_mode;
use crossterm::terminal::enable_raw_mode;
use ratatui::Terminal;
use ratatui::backend::CrosstermBackend;
use ratatui::buffer::Buffer;
use ratatui::layout::Alignment;
use ratatui::layout::Constraint;
use ratatui::layout::Direction;
use ratatui::layout::Layout;
use ratatui::layout::Margin;
use ratatui::layout::Rect;
use ratatui::prelude::*;
use ratatui::style::Color;
use ratatui::style::Modifier;
use ratatui::style::Style;
use ratatui::text::Line;
use ratatui::text::Span;
use ratatui::widgets::Paragraph;
use ratatui::widgets::Widget;
use ratatui::widgets::WidgetRef;
use ratatui::widgets::Wrap;
use std::time::Duration;
#[derive(Clone)]
struct ProviderOption {
name: String,
status: ProviderStatus,
}
#[derive(Clone)]
enum ProviderStatus {
Running,
NotRunning,
Unknown,
}
/// Options displayed in the *select* mode.
///
/// The `key` is matched case-insensitively.
struct SelectOption {
label: Line<'static>,
description: &'static str,
key: KeyCode,
provider_id: &'static str,
}
static OSS_SELECT_OPTIONS: LazyLock<Vec<SelectOption>> = LazyLock::new(|| {
vec![
SelectOption {
label: Line::from(vec!["L".underlined(), "M Studio".into()]),
description: "Local LM Studio server (default port 1234)",
key: KeyCode::Char('l'),
provider_id: LMSTUDIO_OSS_PROVIDER_ID,
},
SelectOption {
label: Line::from(vec!["O".underlined(), "llama".into()]),
description: "Local Ollama server (default port 11434)",
key: KeyCode::Char('o'),
provider_id: OLLAMA_OSS_PROVIDER_ID,
},
]
});
pub struct OssSelectionWidget<'a> {
select_options: &'a Vec<SelectOption>,
confirmation_prompt: Paragraph<'a>,
/// Currently selected index in *select* mode.
selected_option: usize,
/// Set to `true` once a decision has been sent – the parent view can then
/// remove this widget from its queue.
done: bool,
selection: Option<String>,
}
impl OssSelectionWidget<'_> {
fn new(lmstudio_status: ProviderStatus, ollama_status: ProviderStatus) -> io::Result<Self> {
let providers = vec![
ProviderOption {
name: "LM Studio".to_string(),
status: lmstudio_status,
},
ProviderOption {
name: "Ollama".to_string(),
status: ollama_status,
},
];
let mut contents: Vec<Line> = vec![
Line::from(vec![
"? ".fg(Color::Blue),
"Select an open-source provider".bold(),
]),
Line::from(""),
Line::from(" Choose which local AI server to use for your session."),
Line::from(""),
];
// Add status indicators for each provider
for provider in &providers {
let (status_symbol, status_color) = get_status_symbol_and_color(&provider.status);
contents.push(Line::from(vec![
Span::raw(" "),
Span::styled(status_symbol, Style::default().fg(status_color)),
Span::raw(format!(" {} ", provider.name)),
]));
}
contents.push(Line::from(""));
contents.push(Line::from(" ● Running ○ Not Running").add_modifier(Modifier::DIM));
contents.push(Line::from(""));
contents.push(
Line::from(" Press Enter to select • Ctrl+C to exit").add_modifier(Modifier::DIM),
);
let confirmation_prompt = Paragraph::new(contents).wrap(Wrap { trim: false });
Ok(Self {
select_options: &OSS_SELECT_OPTIONS,
confirmation_prompt,
selected_option: 0,
done: false,
selection: None,
})
}
fn get_confirmation_prompt_height(&self, width: u16) -> u16 {
// Should cache this for last value of width.
self.confirmation_prompt.line_count(width) as u16
}
/// Process a `KeyEvent` coming from crossterm. Always consumes the event
/// while the modal is visible.
/// Process a key event originating from crossterm. As the modal fully
/// captures input while visible, we don't need to report whether the event
/// was consumed—callers can assume it always is.
pub fn handle_key_event(&mut self, key: KeyEvent) -> Option<String> {
if key.kind == KeyEventKind::Press {
self.handle_select_key(key);
}
if self.done {
self.selection.clone()
} else {
None
}
}
/// Normalize a key for comparison.
/// - For `KeyCode::Char`, converts to lowercase for case-insensitive matching.
/// - Other key codes are returned unchanged.
fn normalize_keycode(code: KeyCode) -> KeyCode {
match code {
KeyCode::Char(c) => KeyCode::Char(c.to_ascii_lowercase()),
other => other,
}
}
fn handle_select_key(&mut self, key_event: KeyEvent) {
match key_event.code {
KeyCode::Char('c')
if key_event
.modifiers
.contains(crossterm::event::KeyModifiers::CONTROL) =>
{
self.send_decision("__CANCELLED__".to_string());
}
KeyCode::Left => {
self.selected_option = (self.selected_option + self.select_options.len() - 1)
% self.select_options.len();
}
KeyCode::Right => {
self.selected_option = (self.selected_option + 1) % self.select_options.len();
}
KeyCode::Enter => {
let opt = &self.select_options[self.selected_option];
self.send_decision(opt.provider_id.to_string());
}
KeyCode::Esc => {
self.send_decision(LMSTUDIO_OSS_PROVIDER_ID.to_string());
}
other => {
let normalized = Self::normalize_keycode(other);
if let Some(opt) = self
.select_options
.iter()
.find(|opt| Self::normalize_keycode(opt.key) == normalized)
{
self.send_decision(opt.provider_id.to_string());
}
}
}
}
fn send_decision(&mut self, selection: String) {
self.selection = Some(selection);
self.done = true;
}
/// Returns `true` once the user has made a decision and the widget no
/// longer needs to be displayed.
pub fn is_complete(&self) -> bool {
self.done
}
pub fn desired_height(&self, width: u16) -> u16 {
self.get_confirmation_prompt_height(width) + self.select_options.len() as u16
}
}
impl WidgetRef for &OssSelectionWidget<'_> {
fn render_ref(&self, area: Rect, buf: &mut Buffer) {
let prompt_height = self.get_confirmation_prompt_height(area.width);
let [prompt_chunk, response_chunk] = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(prompt_height), Constraint::Min(0)])
.areas(area);
let lines: Vec<Line> = self
.select_options
.iter()
.enumerate()
.map(|(idx, opt)| {
let style = if idx == self.selected_option {
Style::new().bg(Color::Cyan).fg(Color::Black)
} else {
Style::new().bg(Color::DarkGray)
};
opt.label.clone().alignment(Alignment::Center).style(style)
})
.collect();
let [title_area, button_area, description_area] = Layout::vertical([
Constraint::Length(1),
Constraint::Length(1),
Constraint::Min(0),
])
.areas(response_chunk.inner(Margin::new(1, 0)));
Line::from("Select provider?").render(title_area, buf);
self.confirmation_prompt.clone().render(prompt_chunk, buf);
let areas = Layout::horizontal(
lines
.iter()
.map(|l| Constraint::Length(l.width() as u16 + 2)),
)
.spacing(1)
.split(button_area);
for (idx, area) in areas.iter().enumerate() {
let line = &lines[idx];
line.render(*area, buf);
}
Line::from(self.select_options[self.selected_option].description)
.style(Style::new().italic().fg(Color::DarkGray))
.render(description_area.inner(Margin::new(1, 0)), buf);
}
}
fn get_status_symbol_and_color(status: &ProviderStatus) -> (&'static str, Color) {
match status {
ProviderStatus::Running => ("●", Color::Green),
ProviderStatus::NotRunning => ("○", Color::Red),
ProviderStatus::Unknown => ("?", Color::Yellow),
}
}
pub async fn select_oss_provider(codex_home: &std::path::Path) -> io::Result<String> {
// Check provider statuses first
let lmstudio_status = check_lmstudio_status().await;
let ollama_status = check_ollama_status().await;
// Autoselect if only one is running
match (&lmstudio_status, &ollama_status) {
(ProviderStatus::Running, ProviderStatus::NotRunning) => {
let provider = LMSTUDIO_OSS_PROVIDER_ID.to_string();
return Ok(provider);
}
(ProviderStatus::NotRunning, ProviderStatus::Running) => {
let provider = OLLAMA_OSS_PROVIDER_ID.to_string();
return Ok(provider);
}
_ => {
// Both running or both not running - show UI
}
}
let mut widget = OssSelectionWidget::new(lmstudio_status, ollama_status)?;
enable_raw_mode()?;
let mut stdout = io::stdout();
execute!(stdout, EnterAlternateScreen)?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
let result = loop {
terminal.draw(|f| {
(&widget).render_ref(f.area(), f.buffer_mut());
})?;
if let Event::Key(key_event) = event::read()?
&& let Some(selection) = widget.handle_key_event(key_event)
{
break Ok(selection);
}
};
disable_raw_mode()?;
execute!(terminal.backend_mut(), LeaveAlternateScreen)?;
// If the user manually selected an OSS provider, we save it as the
// default one to use later.
if let Ok(ref provider) = result
&& let Err(e) = set_default_oss_provider(codex_home, provider)
{
tracing::warn!("Failed to save OSS provider preference: {e}");
}
result
}
async fn check_lmstudio_status() -> ProviderStatus {
match check_port_status(DEFAULT_LMSTUDIO_PORT).await {
Ok(true) => ProviderStatus::Running,
Ok(false) => ProviderStatus::NotRunning,
Err(_) => ProviderStatus::Unknown,
}
}
async fn check_ollama_status() -> ProviderStatus {
match check_port_status(DEFAULT_OLLAMA_PORT).await {
Ok(true) => ProviderStatus::Running,
Ok(false) => ProviderStatus::NotRunning,
Err(_) => ProviderStatus::Unknown,
}
}
async fn check_port_status(port: u16) -> io::Result<bool> {
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(2))
.build()
.map_err(io::Error::other)?;
let url = format!("http://localhost:{port}");
match client.get(&url).send().await {
Ok(response) => Ok(response.status().is_success()),
Err(_) => Ok(false), // Connection failed = not running
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/get_git_diff.rs | codex-rs/tui2/src/get_git_diff.rs | //! Utility to compute the current Git diff for the working directory.
//!
//! The implementation mirrors the behaviour of the TypeScript version in
//! `codex-cli`: it returns the diff for tracked changes as well as any
//! untracked files. When the current directory is not inside a Git
//! repository, the function returns `Ok((false, String::new()))`.
use std::io;
use std::path::Path;
use std::process::Stdio;
use tokio::process::Command;
/// Return value of [`get_git_diff`].
///
/// * `bool` – Whether the current working directory is inside a Git repo.
/// * `String` – The concatenated diff (may be empty).
pub(crate) async fn get_git_diff() -> io::Result<(bool, String)> {
// First check if we are inside a Git repository.
if !inside_git_repo().await? {
return Ok((false, String::new()));
}
// Run tracked diff and untracked file listing in parallel.
let (tracked_diff_res, untracked_output_res) = tokio::join!(
run_git_capture_diff(&["diff", "--color"]),
run_git_capture_stdout(&["ls-files", "--others", "--exclude-standard"]),
);
let tracked_diff = tracked_diff_res?;
let untracked_output = untracked_output_res?;
let mut untracked_diff = String::new();
let null_device: &Path = if cfg!(windows) {
Path::new("NUL")
} else {
Path::new("/dev/null")
};
let null_path = null_device.to_str().unwrap_or("/dev/null").to_string();
let mut join_set: tokio::task::JoinSet<io::Result<String>> = tokio::task::JoinSet::new();
for file in untracked_output
.split('\n')
.map(str::trim)
.filter(|s| !s.is_empty())
{
let null_path = null_path.clone();
let file = file.to_string();
join_set.spawn(async move {
let args = ["diff", "--color", "--no-index", "--", &null_path, &file];
run_git_capture_diff(&args).await
});
}
while let Some(res) = join_set.join_next().await {
match res {
Ok(Ok(diff)) => untracked_diff.push_str(&diff),
Ok(Err(err)) if err.kind() == io::ErrorKind::NotFound => {}
Ok(Err(err)) => return Err(err),
Err(_) => {}
}
}
Ok((true, format!("{tracked_diff}{untracked_diff}")))
}
/// Helper that executes `git` with the given `args` and returns `stdout` as a
/// UTF-8 string. Any non-zero exit status is considered an *error*.
async fn run_git_capture_stdout(args: &[&str]) -> io::Result<String> {
let output = Command::new("git")
.args(args)
.stdout(Stdio::piped())
.stderr(Stdio::null())
.output()
.await?;
if output.status.success() {
Ok(String::from_utf8_lossy(&output.stdout).into_owned())
} else {
Err(io::Error::other(format!(
"git {:?} failed with status {}",
args, output.status
)))
}
}
/// Like [`run_git_capture_stdout`] but treats exit status 1 as success and
/// returns stdout. Git returns 1 for diffs when differences are present.
async fn run_git_capture_diff(args: &[&str]) -> io::Result<String> {
let output = Command::new("git")
.args(args)
.stdout(Stdio::piped())
.stderr(Stdio::null())
.output()
.await?;
if output.status.success() || output.status.code() == Some(1) {
Ok(String::from_utf8_lossy(&output.stdout).into_owned())
} else {
Err(io::Error::other(format!(
"git {:?} failed with status {}",
args, output.status
)))
}
}
/// Determine if the current directory is inside a Git repository.
async fn inside_git_repo() -> io::Result<bool> {
let status = Command::new("git")
.args(["rev-parse", "--is-inside-work-tree"])
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()
.await;
match status {
Ok(s) if s.success() => Ok(true),
Ok(_) => Ok(false),
Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(false), // git not installed
Err(e) => Err(e),
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/selection_list.rs | codex-rs/tui2/src/selection_list.rs | use crate::render::renderable::Renderable;
use crate::render::renderable::RowRenderable;
use ratatui::style::Style;
use ratatui::style::Styled as _;
use ratatui::style::Stylize as _;
use ratatui::widgets::Paragraph;
use ratatui::widgets::Wrap;
use unicode_width::UnicodeWidthStr;
pub(crate) fn selection_option_row(
index: usize,
label: String,
is_selected: bool,
) -> Box<dyn Renderable> {
let prefix = if is_selected {
format!("› {}. ", index + 1)
} else {
format!(" {}. ", index + 1)
};
let style = if is_selected {
Style::default().cyan()
} else {
Style::default()
};
let prefix_width = UnicodeWidthStr::width(prefix.as_str()) as u16;
let mut row = RowRenderable::new();
row.push(prefix_width, prefix.set_style(style));
row.push(
u16::MAX,
Paragraph::new(label)
.style(style)
.wrap(Wrap { trim: false }),
);
row.into()
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/resume_picker.rs | codex-rs/tui2/src/resume_picker.rs | use std::collections::HashSet;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use chrono::DateTime;
use chrono::Utc;
use codex_core::ConversationItem;
use codex_core::ConversationsPage;
use codex_core::Cursor;
use codex_core::INTERACTIVE_SESSION_SOURCES;
use codex_core::RolloutRecorder;
use codex_core::path_utils;
use codex_protocol::items::TurnItem;
use color_eyre::eyre::Result;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use crossterm::event::KeyEventKind;
use ratatui::layout::Constraint;
use ratatui::layout::Layout;
use ratatui::layout::Rect;
use ratatui::style::Stylize as _;
use ratatui::text::Line;
use ratatui::text::Span;
use tokio::sync::mpsc;
use tokio_stream::StreamExt;
use tokio_stream::wrappers::UnboundedReceiverStream;
use unicode_width::UnicodeWidthStr;
use crate::diff_render::display_path_for;
use crate::key_hint;
use crate::text_formatting::truncate_text;
use crate::tui::FrameRequester;
use crate::tui::Tui;
use crate::tui::TuiEvent;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::SessionMetaLine;
const PAGE_SIZE: usize = 25;
const LOAD_NEAR_THRESHOLD: usize = 5;
#[derive(Debug, Clone)]
pub enum ResumeSelection {
StartFresh,
Resume(PathBuf),
Exit,
}
#[derive(Clone)]
struct PageLoadRequest {
codex_home: PathBuf,
cursor: Option<Cursor>,
request_token: usize,
search_token: Option<usize>,
default_provider: String,
}
type PageLoader = Arc<dyn Fn(PageLoadRequest) + Send + Sync>;
enum BackgroundEvent {
PageLoaded {
request_token: usize,
search_token: Option<usize>,
page: std::io::Result<ConversationsPage>,
},
}
/// Interactive session picker that lists recorded rollout files with simple
/// search and pagination. Shows the first user input as the preview, relative
/// time (e.g., "5 seconds ago"), and the absolute path.
pub async fn run_resume_picker(
tui: &mut Tui,
codex_home: &Path,
default_provider: &str,
show_all: bool,
) -> Result<ResumeSelection> {
let alt = AltScreenGuard::enter(tui);
let (bg_tx, bg_rx) = mpsc::unbounded_channel();
let default_provider = default_provider.to_string();
let filter_cwd = if show_all {
None
} else {
std::env::current_dir().ok()
};
let loader_tx = bg_tx.clone();
let page_loader: PageLoader = Arc::new(move |request: PageLoadRequest| {
let tx = loader_tx.clone();
tokio::spawn(async move {
let provider_filter = vec![request.default_provider.clone()];
let page = RolloutRecorder::list_conversations(
&request.codex_home,
PAGE_SIZE,
request.cursor.as_ref(),
INTERACTIVE_SESSION_SOURCES,
Some(provider_filter.as_slice()),
request.default_provider.as_str(),
)
.await;
let _ = tx.send(BackgroundEvent::PageLoaded {
request_token: request.request_token,
search_token: request.search_token,
page,
});
});
});
let mut state = PickerState::new(
codex_home.to_path_buf(),
alt.tui.frame_requester(),
page_loader,
default_provider.clone(),
show_all,
filter_cwd,
);
state.start_initial_load();
state.request_frame();
let mut tui_events = alt.tui.event_stream().fuse();
let mut background_events = UnboundedReceiverStream::new(bg_rx).fuse();
loop {
tokio::select! {
Some(ev) = tui_events.next() => {
match ev {
TuiEvent::Key(key) => {
if matches!(key.kind, KeyEventKind::Release) {
continue;
}
if let Some(sel) = state.handle_key(key).await? {
return Ok(sel);
}
}
TuiEvent::Draw => {
if let Ok(size) = alt.tui.terminal.size() {
let list_height = size.height.saturating_sub(4) as usize;
state.update_view_rows(list_height);
state.ensure_minimum_rows_for_view(list_height);
}
draw_picker(alt.tui, &state)?;
}
_ => {}
}
}
Some(event) = background_events.next() => {
state.handle_background_event(event)?;
}
else => break,
}
}
// Fallback – treat as cancel/new
Ok(ResumeSelection::StartFresh)
}
/// RAII guard that ensures we leave the alt-screen on scope exit.
struct AltScreenGuard<'a> {
tui: &'a mut Tui,
}
impl<'a> AltScreenGuard<'a> {
fn enter(tui: &'a mut Tui) -> Self {
let _ = tui.enter_alt_screen();
Self { tui }
}
}
impl Drop for AltScreenGuard<'_> {
fn drop(&mut self) {
let _ = self.tui.leave_alt_screen();
}
}
struct PickerState {
codex_home: PathBuf,
requester: FrameRequester,
pagination: PaginationState,
all_rows: Vec<Row>,
filtered_rows: Vec<Row>,
seen_paths: HashSet<PathBuf>,
selected: usize,
scroll_top: usize,
query: String,
search_state: SearchState,
next_request_token: usize,
next_search_token: usize,
page_loader: PageLoader,
view_rows: Option<usize>,
default_provider: String,
show_all: bool,
filter_cwd: Option<PathBuf>,
}
struct PaginationState {
next_cursor: Option<Cursor>,
num_scanned_files: usize,
reached_scan_cap: bool,
loading: LoadingState,
}
#[derive(Clone, Copy, Debug)]
enum LoadingState {
Idle,
Pending(PendingLoad),
}
#[derive(Clone, Copy, Debug)]
struct PendingLoad {
request_token: usize,
search_token: Option<usize>,
}
#[derive(Clone, Copy, Debug)]
enum SearchState {
Idle,
Active { token: usize },
}
enum LoadTrigger {
Scroll,
Search { token: usize },
}
impl LoadingState {
fn is_pending(&self) -> bool {
matches!(self, LoadingState::Pending(_))
}
}
impl SearchState {
fn active_token(&self) -> Option<usize> {
match self {
SearchState::Idle => None,
SearchState::Active { token } => Some(*token),
}
}
fn is_active(&self) -> bool {
self.active_token().is_some()
}
}
#[derive(Clone)]
struct Row {
path: PathBuf,
preview: String,
created_at: Option<DateTime<Utc>>,
updated_at: Option<DateTime<Utc>>,
cwd: Option<PathBuf>,
git_branch: Option<String>,
}
impl PickerState {
fn new(
codex_home: PathBuf,
requester: FrameRequester,
page_loader: PageLoader,
default_provider: String,
show_all: bool,
filter_cwd: Option<PathBuf>,
) -> Self {
Self {
codex_home,
requester,
pagination: PaginationState {
next_cursor: None,
num_scanned_files: 0,
reached_scan_cap: false,
loading: LoadingState::Idle,
},
all_rows: Vec::new(),
filtered_rows: Vec::new(),
seen_paths: HashSet::new(),
selected: 0,
scroll_top: 0,
query: String::new(),
search_state: SearchState::Idle,
next_request_token: 0,
next_search_token: 0,
page_loader,
view_rows: None,
default_provider,
show_all,
filter_cwd,
}
}
fn request_frame(&self) {
self.requester.schedule_frame();
}
async fn handle_key(&mut self, key: KeyEvent) -> Result<Option<ResumeSelection>> {
match key.code {
KeyCode::Esc => return Ok(Some(ResumeSelection::StartFresh)),
KeyCode::Char('c')
if key
.modifiers
.contains(crossterm::event::KeyModifiers::CONTROL) =>
{
return Ok(Some(ResumeSelection::Exit));
}
KeyCode::Enter => {
if let Some(row) = self.filtered_rows.get(self.selected) {
return Ok(Some(ResumeSelection::Resume(row.path.clone())));
}
}
KeyCode::Up => {
if self.selected > 0 {
self.selected -= 1;
self.ensure_selected_visible();
}
self.request_frame();
}
KeyCode::Down => {
if self.selected + 1 < self.filtered_rows.len() {
self.selected += 1;
self.ensure_selected_visible();
}
self.maybe_load_more_for_scroll();
self.request_frame();
}
KeyCode::PageUp => {
let step = self.view_rows.unwrap_or(10).max(1);
if self.selected > 0 {
self.selected = self.selected.saturating_sub(step);
self.ensure_selected_visible();
self.request_frame();
}
}
KeyCode::PageDown => {
if !self.filtered_rows.is_empty() {
let step = self.view_rows.unwrap_or(10).max(1);
let max_index = self.filtered_rows.len().saturating_sub(1);
self.selected = (self.selected + step).min(max_index);
self.ensure_selected_visible();
self.maybe_load_more_for_scroll();
self.request_frame();
}
}
KeyCode::Backspace => {
let mut new_query = self.query.clone();
new_query.pop();
self.set_query(new_query);
}
KeyCode::Char(c) => {
// basic text input for search
if !key
.modifiers
.contains(crossterm::event::KeyModifiers::CONTROL)
&& !key.modifiers.contains(crossterm::event::KeyModifiers::ALT)
{
let mut new_query = self.query.clone();
new_query.push(c);
self.set_query(new_query);
}
}
_ => {}
}
Ok(None)
}
fn start_initial_load(&mut self) {
self.reset_pagination();
self.all_rows.clear();
self.filtered_rows.clear();
self.seen_paths.clear();
self.search_state = SearchState::Idle;
self.selected = 0;
let request_token = self.allocate_request_token();
self.pagination.loading = LoadingState::Pending(PendingLoad {
request_token,
search_token: None,
});
self.request_frame();
(self.page_loader)(PageLoadRequest {
codex_home: self.codex_home.clone(),
cursor: None,
request_token,
search_token: None,
default_provider: self.default_provider.clone(),
});
}
fn handle_background_event(&mut self, event: BackgroundEvent) -> Result<()> {
match event {
BackgroundEvent::PageLoaded {
request_token,
search_token,
page,
} => {
let pending = match self.pagination.loading {
LoadingState::Pending(pending) => pending,
LoadingState::Idle => return Ok(()),
};
if pending.request_token != request_token {
return Ok(());
}
self.pagination.loading = LoadingState::Idle;
let page = page.map_err(color_eyre::Report::from)?;
self.ingest_page(page);
let completed_token = pending.search_token.or(search_token);
self.continue_search_if_token_matches(completed_token);
}
}
Ok(())
}
fn reset_pagination(&mut self) {
self.pagination.next_cursor = None;
self.pagination.num_scanned_files = 0;
self.pagination.reached_scan_cap = false;
self.pagination.loading = LoadingState::Idle;
}
fn ingest_page(&mut self, page: ConversationsPage) {
if let Some(cursor) = page.next_cursor.clone() {
self.pagination.next_cursor = Some(cursor);
} else {
self.pagination.next_cursor = None;
}
self.pagination.num_scanned_files = self
.pagination
.num_scanned_files
.saturating_add(page.num_scanned_files);
if page.reached_scan_cap {
self.pagination.reached_scan_cap = true;
}
let rows = rows_from_items(page.items);
for row in rows {
if self.seen_paths.insert(row.path.clone()) {
self.all_rows.push(row);
}
}
self.apply_filter();
}
fn apply_filter(&mut self) {
let base_iter = self
.all_rows
.iter()
.filter(|row| self.row_matches_filter(row));
if self.query.is_empty() {
self.filtered_rows = base_iter.cloned().collect();
} else {
let q = self.query.to_lowercase();
self.filtered_rows = base_iter
.filter(|r| r.preview.to_lowercase().contains(&q))
.cloned()
.collect();
}
if self.selected >= self.filtered_rows.len() {
self.selected = self.filtered_rows.len().saturating_sub(1);
}
if self.filtered_rows.is_empty() {
self.scroll_top = 0;
}
self.ensure_selected_visible();
self.request_frame();
}
fn row_matches_filter(&self, row: &Row) -> bool {
if self.show_all {
return true;
}
let Some(filter_cwd) = self.filter_cwd.as_ref() else {
return true;
};
let Some(row_cwd) = row.cwd.as_ref() else {
return false;
};
paths_match(row_cwd, filter_cwd)
}
fn set_query(&mut self, new_query: String) {
if self.query == new_query {
return;
}
self.query = new_query;
self.selected = 0;
self.apply_filter();
if self.query.is_empty() {
self.search_state = SearchState::Idle;
return;
}
if !self.filtered_rows.is_empty() {
self.search_state = SearchState::Idle;
return;
}
if self.pagination.reached_scan_cap || self.pagination.next_cursor.is_none() {
self.search_state = SearchState::Idle;
return;
}
let token = self.allocate_search_token();
self.search_state = SearchState::Active { token };
self.load_more_if_needed(LoadTrigger::Search { token });
}
fn continue_search_if_needed(&mut self) {
let Some(token) = self.search_state.active_token() else {
return;
};
if !self.filtered_rows.is_empty() {
self.search_state = SearchState::Idle;
return;
}
if self.pagination.reached_scan_cap || self.pagination.next_cursor.is_none() {
self.search_state = SearchState::Idle;
return;
}
self.load_more_if_needed(LoadTrigger::Search { token });
}
fn continue_search_if_token_matches(&mut self, completed_token: Option<usize>) {
let Some(active) = self.search_state.active_token() else {
return;
};
if let Some(token) = completed_token
&& token != active
{
return;
}
self.continue_search_if_needed();
}
fn ensure_selected_visible(&mut self) {
if self.filtered_rows.is_empty() {
self.scroll_top = 0;
return;
}
let capacity = self.view_rows.unwrap_or(self.filtered_rows.len()).max(1);
if self.selected < self.scroll_top {
self.scroll_top = self.selected;
} else {
let last_visible = self.scroll_top.saturating_add(capacity - 1);
if self.selected > last_visible {
self.scroll_top = self.selected.saturating_sub(capacity - 1);
}
}
let max_start = self.filtered_rows.len().saturating_sub(capacity);
if self.scroll_top > max_start {
self.scroll_top = max_start;
}
}
fn ensure_minimum_rows_for_view(&mut self, minimum_rows: usize) {
if minimum_rows == 0 {
return;
}
if self.filtered_rows.len() >= minimum_rows {
return;
}
if self.pagination.loading.is_pending() || self.pagination.next_cursor.is_none() {
return;
}
if let Some(token) = self.search_state.active_token() {
self.load_more_if_needed(LoadTrigger::Search { token });
} else {
self.load_more_if_needed(LoadTrigger::Scroll);
}
}
fn update_view_rows(&mut self, rows: usize) {
self.view_rows = if rows == 0 { None } else { Some(rows) };
self.ensure_selected_visible();
}
fn maybe_load_more_for_scroll(&mut self) {
if self.pagination.loading.is_pending() {
return;
}
if self.pagination.next_cursor.is_none() {
return;
}
if self.filtered_rows.is_empty() {
return;
}
let remaining = self.filtered_rows.len().saturating_sub(self.selected + 1);
if remaining <= LOAD_NEAR_THRESHOLD {
self.load_more_if_needed(LoadTrigger::Scroll);
}
}
fn load_more_if_needed(&mut self, trigger: LoadTrigger) {
if self.pagination.loading.is_pending() {
return;
}
let Some(cursor) = self.pagination.next_cursor.clone() else {
return;
};
let request_token = self.allocate_request_token();
let search_token = match trigger {
LoadTrigger::Scroll => None,
LoadTrigger::Search { token } => Some(token),
};
self.pagination.loading = LoadingState::Pending(PendingLoad {
request_token,
search_token,
});
self.request_frame();
(self.page_loader)(PageLoadRequest {
codex_home: self.codex_home.clone(),
cursor: Some(cursor),
request_token,
search_token,
default_provider: self.default_provider.clone(),
});
}
fn allocate_request_token(&mut self) -> usize {
let token = self.next_request_token;
self.next_request_token = self.next_request_token.wrapping_add(1);
token
}
fn allocate_search_token(&mut self) -> usize {
let token = self.next_search_token;
self.next_search_token = self.next_search_token.wrapping_add(1);
token
}
}
fn rows_from_items(items: Vec<ConversationItem>) -> Vec<Row> {
items.into_iter().map(|item| head_to_row(&item)).collect()
}
fn head_to_row(item: &ConversationItem) -> Row {
let created_at = item
.created_at
.as_deref()
.and_then(parse_timestamp_str)
.or_else(|| item.head.first().and_then(extract_timestamp));
let updated_at = item
.updated_at
.as_deref()
.and_then(parse_timestamp_str)
.or(created_at);
let (cwd, git_branch) = extract_session_meta_from_head(&item.head);
let preview = preview_from_head(&item.head)
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.unwrap_or_else(|| String::from("(no message yet)"));
Row {
path: item.path.clone(),
preview,
created_at,
updated_at,
cwd,
git_branch,
}
}
fn extract_session_meta_from_head(head: &[serde_json::Value]) -> (Option<PathBuf>, Option<String>) {
for value in head {
if let Ok(meta_line) = serde_json::from_value::<SessionMetaLine>(value.clone()) {
let cwd = Some(meta_line.meta.cwd);
let git_branch = meta_line.git.and_then(|git| git.branch);
return (cwd, git_branch);
}
}
(None, None)
}
fn paths_match(a: &Path, b: &Path) -> bool {
if let (Ok(ca), Ok(cb)) = (
path_utils::normalize_for_path_comparison(a),
path_utils::normalize_for_path_comparison(b),
) {
return ca == cb;
}
a == b
}
fn parse_timestamp_str(ts: &str) -> Option<DateTime<Utc>> {
chrono::DateTime::parse_from_rfc3339(ts)
.map(|dt| dt.with_timezone(&Utc))
.ok()
}
fn extract_timestamp(value: &serde_json::Value) -> Option<DateTime<Utc>> {
value
.get("timestamp")
.and_then(|v| v.as_str())
.and_then(|t| chrono::DateTime::parse_from_rfc3339(t).ok())
.map(|dt| dt.with_timezone(&Utc))
}
fn preview_from_head(head: &[serde_json::Value]) -> Option<String> {
head.iter()
.filter_map(|value| serde_json::from_value::<ResponseItem>(value.clone()).ok())
.find_map(|item| match codex_core::parse_turn_item(&item) {
Some(TurnItem::UserMessage(user)) => Some(user.message()),
_ => None,
})
}
fn draw_picker(tui: &mut Tui, state: &PickerState) -> std::io::Result<()> {
// Render full-screen overlay
let height = tui.terminal.size()?.height;
tui.draw(height, |frame| {
let area = frame.area();
let [header, search, columns, list, hint] = Layout::vertical([
Constraint::Length(1),
Constraint::Length(1),
Constraint::Length(1),
Constraint::Min(area.height.saturating_sub(4)),
Constraint::Length(1),
])
.areas(area);
// Header
frame.render_widget_ref(
Line::from(vec!["Resume a previous session".bold().cyan()]),
header,
);
// Search line
let q = if state.query.is_empty() {
"Type to search".dim().to_string()
} else {
format!("Search: {}", state.query)
};
frame.render_widget_ref(Line::from(q), search);
let metrics = calculate_column_metrics(&state.filtered_rows, state.show_all);
// Column headers and list
render_column_headers(frame, columns, &metrics);
render_list(frame, list, state, &metrics);
// Hint line
let hint_line: Line = vec![
key_hint::plain(KeyCode::Enter).into(),
" to resume ".dim(),
" ".dim(),
key_hint::plain(KeyCode::Esc).into(),
" to start new ".dim(),
" ".dim(),
key_hint::ctrl(KeyCode::Char('c')).into(),
" to quit ".dim(),
" ".dim(),
key_hint::plain(KeyCode::Up).into(),
"/".dim(),
key_hint::plain(KeyCode::Down).into(),
" to browse".dim(),
]
.into();
frame.render_widget_ref(hint_line, hint);
})
}
fn render_list(
frame: &mut crate::custom_terminal::Frame,
area: Rect,
state: &PickerState,
metrics: &ColumnMetrics,
) {
if area.height == 0 {
return;
}
let rows = &state.filtered_rows;
if rows.is_empty() {
let message = render_empty_state_line(state);
frame.render_widget_ref(message, area);
return;
}
let capacity = area.height as usize;
let start = state.scroll_top.min(rows.len().saturating_sub(1));
let end = rows.len().min(start + capacity);
let labels = &metrics.labels;
let mut y = area.y;
let max_updated_width = metrics.max_updated_width;
let max_branch_width = metrics.max_branch_width;
let max_cwd_width = metrics.max_cwd_width;
for (idx, (row, (updated_label, branch_label, cwd_label))) in rows[start..end]
.iter()
.zip(labels[start..end].iter())
.enumerate()
{
let is_sel = start + idx == state.selected;
let marker = if is_sel { "> ".bold() } else { " ".into() };
let marker_width = 2usize;
let updated_span = if max_updated_width == 0 {
None
} else {
Some(Span::from(format!("{updated_label:<max_updated_width$}")).dim())
};
let branch_span = if max_branch_width == 0 {
None
} else if branch_label.is_empty() {
Some(
Span::from(format!(
"{empty:<width$}",
empty = "-",
width = max_branch_width
))
.dim(),
)
} else {
Some(Span::from(format!("{branch_label:<max_branch_width$}")).cyan())
};
let cwd_span = if max_cwd_width == 0 {
None
} else if cwd_label.is_empty() {
Some(
Span::from(format!(
"{empty:<width$}",
empty = "-",
width = max_cwd_width
))
.dim(),
)
} else {
Some(Span::from(format!("{cwd_label:<max_cwd_width$}")).dim())
};
let mut preview_width = area.width as usize;
preview_width = preview_width.saturating_sub(marker_width);
if max_updated_width > 0 {
preview_width = preview_width.saturating_sub(max_updated_width + 2);
}
if max_branch_width > 0 {
preview_width = preview_width.saturating_sub(max_branch_width + 2);
}
if max_cwd_width > 0 {
preview_width = preview_width.saturating_sub(max_cwd_width + 2);
}
let add_leading_gap = max_updated_width == 0 && max_branch_width == 0 && max_cwd_width == 0;
if add_leading_gap {
preview_width = preview_width.saturating_sub(2);
}
let preview = truncate_text(&row.preview, preview_width);
let mut spans: Vec<Span> = vec![marker];
if let Some(updated) = updated_span {
spans.push(updated);
spans.push(" ".into());
}
if let Some(branch) = branch_span {
spans.push(branch);
spans.push(" ".into());
}
if let Some(cwd) = cwd_span {
spans.push(cwd);
spans.push(" ".into());
}
if add_leading_gap {
spans.push(" ".into());
}
spans.push(preview.into());
let line: Line = spans.into();
let rect = Rect::new(area.x, y, area.width, 1);
frame.render_widget_ref(line, rect);
y = y.saturating_add(1);
}
if state.pagination.loading.is_pending() && y < area.y.saturating_add(area.height) {
let loading_line: Line = vec![" ".into(), "Loading older sessions…".italic().dim()].into();
let rect = Rect::new(area.x, y, area.width, 1);
frame.render_widget_ref(loading_line, rect);
}
}
fn render_empty_state_line(state: &PickerState) -> Line<'static> {
if !state.query.is_empty() {
if state.search_state.is_active()
|| (state.pagination.loading.is_pending() && state.pagination.next_cursor.is_some())
{
return vec!["Searching…".italic().dim()].into();
}
if state.pagination.reached_scan_cap {
let msg = format!(
"Search scanned first {} sessions; more may exist",
state.pagination.num_scanned_files
);
return vec![Span::from(msg).italic().dim()].into();
}
return vec!["No results for your search".italic().dim()].into();
}
if state.all_rows.is_empty() && state.pagination.num_scanned_files == 0 {
return vec!["No sessions yet".italic().dim()].into();
}
if state.pagination.loading.is_pending() {
return vec!["Loading older sessions…".italic().dim()].into();
}
vec!["No sessions yet".italic().dim()].into()
}
fn human_time_ago(ts: DateTime<Utc>) -> String {
let now = Utc::now();
let delta = now - ts;
let secs = delta.num_seconds();
if secs < 60 {
let n = secs.max(0);
if n == 1 {
format!("{n} second ago")
} else {
format!("{n} seconds ago")
}
} else if secs < 60 * 60 {
let m = secs / 60;
if m == 1 {
format!("{m} minute ago")
} else {
format!("{m} minutes ago")
}
} else if secs < 60 * 60 * 24 {
let h = secs / 3600;
if h == 1 {
format!("{h} hour ago")
} else {
format!("{h} hours ago")
}
} else {
let d = secs / (60 * 60 * 24);
if d == 1 {
format!("{d} day ago")
} else {
format!("{d} days ago")
}
}
}
fn format_updated_label(row: &Row) -> String {
match (row.updated_at, row.created_at) {
(Some(updated), _) => human_time_ago(updated),
(None, Some(created)) => human_time_ago(created),
(None, None) => "-".to_string(),
}
}
fn render_column_headers(
frame: &mut crate::custom_terminal::Frame,
area: Rect,
metrics: &ColumnMetrics,
) {
if area.height == 0 {
return;
}
let mut spans: Vec<Span> = vec![" ".into()];
if metrics.max_updated_width > 0 {
let label = format!(
"{text:<width$}",
text = "Updated",
width = metrics.max_updated_width
);
spans.push(Span::from(label).bold());
spans.push(" ".into());
}
if metrics.max_branch_width > 0 {
let label = format!(
"{text:<width$}",
text = "Branch",
width = metrics.max_branch_width
);
spans.push(Span::from(label).bold());
spans.push(" ".into());
}
if metrics.max_cwd_width > 0 {
let label = format!(
"{text:<width$}",
text = "CWD",
width = metrics.max_cwd_width
);
spans.push(Span::from(label).bold());
spans.push(" ".into());
}
spans.push("Conversation".bold());
frame.render_widget_ref(Line::from(spans), area);
}
struct ColumnMetrics {
max_updated_width: usize,
max_branch_width: usize,
max_cwd_width: usize,
labels: Vec<(String, String, String)>,
}
fn calculate_column_metrics(rows: &[Row], include_cwd: bool) -> ColumnMetrics {
fn right_elide(s: &str, max: usize) -> String {
if s.chars().count() <= max {
return s.to_string();
}
if max <= 1 {
return "…".to_string();
}
let tail_len = max - 1;
let tail: String = s
.chars()
.rev()
.take(tail_len)
.collect::<String>()
.chars()
.rev()
.collect();
format!("…{tail}")
}
let mut labels: Vec<(String, String, String)> = Vec::with_capacity(rows.len());
let mut max_updated_width = UnicodeWidthStr::width("Updated");
let mut max_branch_width = UnicodeWidthStr::width("Branch");
let mut max_cwd_width = if include_cwd {
UnicodeWidthStr::width("CWD")
} else {
0
};
for row in rows {
let updated = format_updated_label(row);
let branch_raw = row.git_branch.clone().unwrap_or_default();
let branch = right_elide(&branch_raw, 24);
let cwd = if include_cwd {
let cwd_raw = row
.cwd
.as_ref()
.map(|p| display_path_for(p, std::path::Path::new("/")))
.unwrap_or_default();
right_elide(&cwd_raw, 24)
} else {
String::new()
};
max_updated_width = max_updated_width.max(UnicodeWidthStr::width(updated.as_str()));
max_branch_width = max_branch_width.max(UnicodeWidthStr::width(branch.as_str()));
max_cwd_width = max_cwd_width.max(UnicodeWidthStr::width(cwd.as_str()));
labels.push((updated, branch, cwd));
}
ColumnMetrics {
max_updated_width,
max_branch_width,
max_cwd_width,
labels,
}
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::Duration;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use crossterm::event::KeyModifiers;
use insta::assert_snapshot;
use serde_json::json;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Mutex;
fn head_with_ts_and_user_text(ts: &str, texts: &[&str]) -> Vec<serde_json::Value> {
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | true |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/history_cell.rs | codex-rs/tui2/src/history_cell.rs | use crate::diff_render::create_diff_summary;
use crate::diff_render::display_path_for;
use crate::exec_cell::CommandOutput;
use crate::exec_cell::OutputLinesParams;
use crate::exec_cell::TOOL_CALL_MAX_LINES;
use crate::exec_cell::output_lines;
use crate::exec_cell::spinner;
use crate::exec_command::relativize_to_home;
use crate::exec_command::strip_bash_lc_and_escape;
use crate::markdown::append_markdown;
use crate::render::line_utils::line_to_static;
use crate::render::line_utils::prefix_lines;
use crate::render::renderable::Renderable;
use crate::style::user_message_style;
use crate::text_formatting::format_and_truncate_tool_result;
use crate::text_formatting::truncate_text;
use crate::tooltips;
use crate::ui_consts::LIVE_PREFIX_COLS;
use crate::update_action::UpdateAction;
use crate::version::CODEX_CLI_VERSION;
use crate::wrapping::RtOptions;
use crate::wrapping::word_wrap_line;
use base64::Engine;
use codex_common::format_env_display::format_env_display;
use codex_core::config::Config;
use codex_core::config::types::McpServerTransportConfig;
use codex_core::protocol::FileChange;
use codex_core::protocol::McpAuthStatus;
use codex_core::protocol::McpInvocation;
use codex_core::protocol::SessionConfiguredEvent;
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::plan_tool::PlanItemArg;
use codex_protocol::plan_tool::StepStatus;
use codex_protocol::plan_tool::UpdatePlanArgs;
use image::DynamicImage;
use image::ImageReader;
use mcp_types::EmbeddedResourceResource;
use mcp_types::Resource;
use mcp_types::ResourceLink;
use mcp_types::ResourceTemplate;
use ratatui::prelude::*;
use ratatui::style::Modifier;
use ratatui::style::Style;
use ratatui::style::Styled;
use ratatui::style::Stylize;
use ratatui::widgets::Paragraph;
use ratatui::widgets::Wrap;
use std::any::Any;
use std::collections::HashMap;
use std::io::Cursor;
use std::path::Path;
use std::path::PathBuf;
use std::time::Duration;
use std::time::Instant;
use tracing::error;
use unicode_width::UnicodeWidthStr;
/// Visual transcript lines plus soft-wrap joiners.
///
/// A history cell can produce multiple "visual lines" once prefixes/indents and wrapping are
/// applied. Clipboard reconstruction needs more information than just those lines: users expect
/// soft-wrapped prose to copy as a single logical line, while explicit newlines and spacer rows
/// should remain hard breaks.
///
/// `joiner_before` records, for each output line, whether it is a continuation created by the
/// wrapping algorithm and what string should be inserted at the wrap boundary when joining lines.
/// This avoids heuristics like always inserting a space, and instead preserves the exact whitespace
/// that was skipped at the boundary.
///
/// ## Note for `codex-tui` vs `codex-tui2`
///
/// In `codex-tui`, `HistoryCell` only exposes `transcript_lines(...)` and the UI generally doesn't
/// need to reconstruct clipboard text across off-screen history or soft-wrap boundaries.
///
/// In `codex-tui2`, transcript selection and copy are app-driven (not terminal-driven) and may span
/// content that isn't currently visible. That means we need additional metadata to distinguish hard
/// breaks from soft wraps and to preserve the exact whitespace at wrap boundaries.
///
/// Invariants:
/// - `joiner_before.len() == lines.len()`
/// - `joiner_before[0]` is always `None`
/// - `None` represents a hard break
/// - `Some(joiner)` represents a soft wrap continuation
///
/// Consumers:
/// - `transcript_render` threads joiners through transcript flattening/wrapping.
/// - `transcript_copy` uses them to join wrapped prose while preserving hard breaks.
#[derive(Debug, Clone)]
pub(crate) struct TranscriptLinesWithJoiners {
/// Visual transcript lines for a history cell, including any indent/prefix spans.
///
/// This is the same shape used for on-screen transcript rendering: a single cell may expand
/// to multiple `Line`s after wrapping and prefixing.
pub(crate) lines: Vec<Line<'static>>,
/// For each output line, whether and how to join it to the previous line when copying.
pub(crate) joiner_before: Vec<Option<String>>,
}
/// Represents an event to display in the conversation history. Returns its
/// `Vec<Line<'static>>` representation to make it easier to display in a
/// scrollable list.
pub(crate) trait HistoryCell: std::fmt::Debug + Send + Sync + Any {
fn display_lines(&self, width: u16) -> Vec<Line<'static>>;
fn desired_height(&self, width: u16) -> u16 {
Paragraph::new(Text::from(self.display_lines(width)))
.wrap(Wrap { trim: false })
.line_count(width)
.try_into()
.unwrap_or(0)
}
fn transcript_lines(&self, width: u16) -> Vec<Line<'static>> {
self.display_lines(width)
}
/// Transcript lines plus soft-wrap joiners used for copy/paste fidelity.
///
/// Most cells can use the default implementation (no joiners), but cells that apply wrapping
/// should override this and return joiners derived from the same wrapping operation so
/// clipboard reconstruction can distinguish hard breaks from soft wraps.
fn transcript_lines_with_joiners(&self, width: u16) -> TranscriptLinesWithJoiners {
let lines = self.transcript_lines(width);
TranscriptLinesWithJoiners {
joiner_before: vec![None; lines.len()],
lines,
}
}
fn desired_transcript_height(&self, width: u16) -> u16 {
let lines = self.transcript_lines(width);
// Workaround for ratatui bug: if there's only one line and it's whitespace-only, ratatui gives 2 lines.
if let [line] = &lines[..]
&& line
.spans
.iter()
.all(|s| s.content.chars().all(char::is_whitespace))
{
return 1;
}
Paragraph::new(Text::from(lines))
.wrap(Wrap { trim: false })
.line_count(width)
.try_into()
.unwrap_or(0)
}
fn is_stream_continuation(&self) -> bool {
false
}
}
impl Renderable for Box<dyn HistoryCell> {
fn render(&self, area: Rect, buf: &mut Buffer) {
let lines = self.display_lines(area.width);
let y = if area.height == 0 {
0
} else {
let overflow = lines.len().saturating_sub(usize::from(area.height));
u16::try_from(overflow).unwrap_or(u16::MAX)
};
Paragraph::new(Text::from(lines))
.scroll((y, 0))
.render(area, buf);
}
fn desired_height(&self, width: u16) -> u16 {
HistoryCell::desired_height(self.as_ref(), width)
}
}
impl dyn HistoryCell {
pub(crate) fn as_any(&self) -> &dyn Any {
self
}
pub(crate) fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
#[derive(Debug)]
pub(crate) struct UserHistoryCell {
pub message: String,
}
impl HistoryCell for UserHistoryCell {
fn display_lines(&self, width: u16) -> Vec<Line<'static>> {
self.transcript_lines_with_joiners(width).lines
}
fn transcript_lines_with_joiners(&self, width: u16) -> TranscriptLinesWithJoiners {
let wrap_width = width
.saturating_sub(
LIVE_PREFIX_COLS + 1, /* keep a one-column right margin for wrapping */
)
.max(1);
let style = user_message_style();
let (wrapped, joiner_before) = crate::wrapping::word_wrap_lines_with_joiners(
self.message.lines().map(|l| Line::from(l).style(style)),
// Wrap algorithm matches textarea.rs.
RtOptions::new(usize::from(wrap_width))
.wrap_algorithm(textwrap::WrapAlgorithm::FirstFit),
);
let mut lines: Vec<Line<'static>> = Vec::new();
let mut joins: Vec<Option<String>> = Vec::new();
lines.push(Line::from("").style(style));
joins.push(None);
let prefixed = prefix_lines(wrapped, "› ".bold().dim(), " ".into());
for (line, joiner) in prefixed.into_iter().zip(joiner_before) {
lines.push(line);
joins.push(joiner);
}
lines.push(Line::from("").style(style));
joins.push(None);
TranscriptLinesWithJoiners {
lines,
joiner_before: joins,
}
}
}
#[derive(Debug)]
pub(crate) struct ReasoningSummaryCell {
_header: String,
content: String,
transcript_only: bool,
}
impl ReasoningSummaryCell {
pub(crate) fn new(header: String, content: String, transcript_only: bool) -> Self {
Self {
_header: header,
content,
transcript_only,
}
}
fn lines(&self, width: u16) -> Vec<Line<'static>> {
self.lines_with_joiners(width).lines
}
fn lines_with_joiners(&self, width: u16) -> TranscriptLinesWithJoiners {
let mut lines: Vec<Line<'static>> = Vec::new();
append_markdown(
&self.content,
Some((width as usize).saturating_sub(2)),
&mut lines,
);
let summary_style = Style::default().dim().italic();
let summary_lines = lines
.into_iter()
.map(|mut line| {
line.spans = line
.spans
.into_iter()
.map(|span| span.patch_style(summary_style))
.collect();
line
})
.collect::<Vec<_>>();
let (lines, joiner_before) = crate::wrapping::word_wrap_lines_with_joiners(
&summary_lines,
RtOptions::new(width as usize)
.initial_indent("• ".dim().into())
.subsequent_indent(" ".into()),
);
TranscriptLinesWithJoiners {
lines,
joiner_before,
}
}
}
impl HistoryCell for ReasoningSummaryCell {
fn display_lines(&self, width: u16) -> Vec<Line<'static>> {
if self.transcript_only {
Vec::new()
} else {
self.lines(width)
}
}
fn desired_height(&self, width: u16) -> u16 {
if self.transcript_only {
0
} else {
self.lines(width).len() as u16
}
}
fn transcript_lines(&self, width: u16) -> Vec<Line<'static>> {
self.lines(width)
}
fn transcript_lines_with_joiners(&self, width: u16) -> TranscriptLinesWithJoiners {
self.lines_with_joiners(width)
}
fn desired_transcript_height(&self, width: u16) -> u16 {
self.lines(width).len() as u16
}
}
#[derive(Debug)]
pub(crate) struct AgentMessageCell {
lines: Vec<Line<'static>>,
is_first_line: bool,
}
impl AgentMessageCell {
pub(crate) fn new(lines: Vec<Line<'static>>, is_first_line: bool) -> Self {
Self {
lines,
is_first_line,
}
}
}
impl HistoryCell for AgentMessageCell {
fn display_lines(&self, width: u16) -> Vec<Line<'static>> {
self.transcript_lines_with_joiners(width).lines
}
fn transcript_lines_with_joiners(&self, width: u16) -> TranscriptLinesWithJoiners {
use ratatui::style::Color;
let mut out_lines: Vec<Line<'static>> = Vec::new();
let mut joiner_before: Vec<Option<String>> = Vec::new();
let mut is_first_output_line = true;
for line in &self.lines {
let is_code_block_line = line.style.fg == Some(Color::Cyan);
let initial_indent: Line<'static> = if is_first_output_line && self.is_first_line {
"• ".dim().into()
} else {
" ".into()
};
let subsequent_indent: Line<'static> = " ".into();
if is_code_block_line {
let mut spans = initial_indent.spans;
spans.extend(line.spans.iter().cloned());
out_lines.push(Line::from(spans).style(line.style));
joiner_before.push(None);
is_first_output_line = false;
continue;
}
let opts = RtOptions::new(width as usize)
.initial_indent(initial_indent)
.subsequent_indent(subsequent_indent.clone());
let (wrapped, wrapped_joiners) =
crate::wrapping::word_wrap_line_with_joiners(line, opts);
for (l, j) in wrapped.into_iter().zip(wrapped_joiners) {
out_lines.push(line_to_static(&l));
joiner_before.push(j);
is_first_output_line = false;
}
}
TranscriptLinesWithJoiners {
lines: out_lines,
joiner_before,
}
}
fn is_stream_continuation(&self) -> bool {
!self.is_first_line
}
}
#[derive(Debug)]
pub(crate) struct PlainHistoryCell {
lines: Vec<Line<'static>>,
}
impl PlainHistoryCell {
pub(crate) fn new(lines: Vec<Line<'static>>) -> Self {
Self { lines }
}
}
impl HistoryCell for PlainHistoryCell {
fn display_lines(&self, _width: u16) -> Vec<Line<'static>> {
self.lines.clone()
}
}
#[cfg_attr(debug_assertions, allow(dead_code))]
#[derive(Debug)]
pub(crate) struct UpdateAvailableHistoryCell {
latest_version: String,
update_action: Option<UpdateAction>,
}
#[cfg_attr(debug_assertions, allow(dead_code))]
impl UpdateAvailableHistoryCell {
pub(crate) fn new(latest_version: String, update_action: Option<UpdateAction>) -> Self {
Self {
latest_version,
update_action,
}
}
}
impl HistoryCell for UpdateAvailableHistoryCell {
fn display_lines(&self, width: u16) -> Vec<Line<'static>> {
use ratatui_macros::line;
use ratatui_macros::text;
let update_instruction = if let Some(update_action) = self.update_action {
line!["Run ", update_action.command_str().cyan(), " to update."]
} else {
line![
"See ",
"https://github.com/openai/codex".cyan().underlined(),
" for installation options."
]
};
let content = text![
line![
padded_emoji("✨").bold().cyan(),
"Update available!".bold().cyan(),
" ",
format!("{CODEX_CLI_VERSION} -> {}", self.latest_version).bold(),
],
update_instruction,
"",
"See full release notes:",
"https://github.com/openai/codex/releases/latest"
.cyan()
.underlined(),
];
let inner_width = content
.width()
.min(usize::from(width.saturating_sub(4)))
.max(1);
with_border_with_inner_width(content.lines, inner_width)
}
}
#[derive(Debug)]
pub(crate) struct PrefixedWrappedHistoryCell {
text: Text<'static>,
initial_prefix: Line<'static>,
subsequent_prefix: Line<'static>,
}
impl PrefixedWrappedHistoryCell {
pub(crate) fn new(
text: impl Into<Text<'static>>,
initial_prefix: impl Into<Line<'static>>,
subsequent_prefix: impl Into<Line<'static>>,
) -> Self {
Self {
text: text.into(),
initial_prefix: initial_prefix.into(),
subsequent_prefix: subsequent_prefix.into(),
}
}
}
impl HistoryCell for PrefixedWrappedHistoryCell {
fn display_lines(&self, width: u16) -> Vec<Line<'static>> {
self.transcript_lines_with_joiners(width).lines
}
fn desired_height(&self, width: u16) -> u16 {
self.display_lines(width).len() as u16
}
fn transcript_lines_with_joiners(&self, width: u16) -> TranscriptLinesWithJoiners {
if width == 0 {
return TranscriptLinesWithJoiners {
lines: Vec::new(),
joiner_before: Vec::new(),
};
}
let opts = RtOptions::new(width.max(1) as usize)
.initial_indent(self.initial_prefix.clone())
.subsequent_indent(self.subsequent_prefix.clone());
let (lines, joiner_before) =
crate::wrapping::word_wrap_lines_with_joiners(&self.text, opts);
TranscriptLinesWithJoiners {
lines,
joiner_before,
}
}
}
fn truncate_exec_snippet(full_cmd: &str) -> String {
let mut snippet = match full_cmd.split_once('\n') {
Some((first, _)) => format!("{first} ..."),
None => full_cmd.to_string(),
};
snippet = truncate_text(&snippet, 80);
snippet
}
fn exec_snippet(command: &[String]) -> String {
let full_cmd = strip_bash_lc_and_escape(command);
truncate_exec_snippet(&full_cmd)
}
pub fn new_approval_decision_cell(
command: Vec<String>,
decision: codex_core::protocol::ReviewDecision,
) -> Box<dyn HistoryCell> {
use codex_core::protocol::ReviewDecision::*;
let (symbol, summary): (Span<'static>, Vec<Span<'static>>) = match decision {
Approved => {
let snippet = Span::from(exec_snippet(&command)).dim();
(
"✔ ".green(),
vec![
"You ".into(),
"approved".bold(),
" codex to run ".into(),
snippet,
" this time".bold(),
],
)
}
ApprovedExecpolicyAmendment { .. } => {
let snippet = Span::from(exec_snippet(&command)).dim();
(
"✔ ".green(),
vec![
"You ".into(),
"approved".bold(),
" codex to run ".into(),
snippet,
" and applied the execpolicy amendment".bold(),
],
)
}
ApprovedForSession => {
let snippet = Span::from(exec_snippet(&command)).dim();
(
"✔ ".green(),
vec![
"You ".into(),
"approved".bold(),
" codex to run ".into(),
snippet,
" every time this session".bold(),
],
)
}
Denied => {
let snippet = Span::from(exec_snippet(&command)).dim();
(
"✗ ".red(),
vec![
"You ".into(),
"did not approve".bold(),
" codex to run ".into(),
snippet,
],
)
}
Abort => {
let snippet = Span::from(exec_snippet(&command)).dim();
(
"✗ ".red(),
vec![
"You ".into(),
"canceled".bold(),
" the request to run ".into(),
snippet,
],
)
}
};
Box::new(PrefixedWrappedHistoryCell::new(
Line::from(summary),
symbol,
" ",
))
}
/// Cyan history cell line showing the current review status.
pub(crate) fn new_review_status_line(message: String) -> PlainHistoryCell {
PlainHistoryCell {
lines: vec![Line::from(message.cyan())],
}
}
#[derive(Debug)]
pub(crate) struct PatchHistoryCell {
changes: HashMap<PathBuf, FileChange>,
cwd: PathBuf,
}
impl HistoryCell for PatchHistoryCell {
fn display_lines(&self, width: u16) -> Vec<Line<'static>> {
create_diff_summary(&self.changes, &self.cwd, width as usize)
}
}
#[derive(Debug)]
struct CompletedMcpToolCallWithImageOutput {
_image: DynamicImage,
}
impl HistoryCell for CompletedMcpToolCallWithImageOutput {
fn display_lines(&self, _width: u16) -> Vec<Line<'static>> {
vec!["tool result (image output)".into()]
}
}
pub(crate) const SESSION_HEADER_MAX_INNER_WIDTH: usize = 56; // Just an eyeballed value
pub(crate) fn card_inner_width(width: u16, max_inner_width: usize) -> Option<usize> {
if width < 4 {
return None;
}
let inner_width = std::cmp::min(width.saturating_sub(4) as usize, max_inner_width);
Some(inner_width)
}
/// Render `lines` inside a border sized to the widest span in the content.
pub(crate) fn with_border(lines: Vec<Line<'static>>) -> Vec<Line<'static>> {
with_border_internal(lines, None)
}
/// Render `lines` inside a border whose inner width is at least `inner_width`.
///
/// This is useful when callers have already clamped their content to a
/// specific width and want the border math centralized here instead of
/// duplicating padding logic in the TUI widgets themselves.
pub(crate) fn with_border_with_inner_width(
lines: Vec<Line<'static>>,
inner_width: usize,
) -> Vec<Line<'static>> {
with_border_internal(lines, Some(inner_width))
}
fn with_border_internal(
lines: Vec<Line<'static>>,
forced_inner_width: Option<usize>,
) -> Vec<Line<'static>> {
let max_line_width = lines
.iter()
.map(|line| {
line.iter()
.map(|span| UnicodeWidthStr::width(span.content.as_ref()))
.sum::<usize>()
})
.max()
.unwrap_or(0);
let content_width = forced_inner_width
.unwrap_or(max_line_width)
.max(max_line_width);
let mut out = Vec::with_capacity(lines.len() + 2);
let border_inner_width = content_width + 2;
out.push(vec![format!("╭{}╮", "─".repeat(border_inner_width)).dim()].into());
for line in lines.into_iter() {
let used_width: usize = line
.iter()
.map(|span| UnicodeWidthStr::width(span.content.as_ref()))
.sum();
let span_count = line.spans.len();
let mut spans: Vec<Span<'static>> = Vec::with_capacity(span_count + 4);
spans.push(Span::from("│ ").dim());
spans.extend(line.into_iter());
if used_width < content_width {
spans.push(Span::from(" ".repeat(content_width - used_width)).dim());
}
spans.push(Span::from(" │").dim());
out.push(Line::from(spans));
}
out.push(vec![format!("╰{}╯", "─".repeat(border_inner_width)).dim()].into());
out
}
/// Return the emoji followed by a hair space (U+200A).
/// Using only the hair space avoids excessive padding after the emoji while
/// still providing a small visual gap across terminals.
pub(crate) fn padded_emoji(emoji: &str) -> String {
format!("{emoji}\u{200A}")
}
#[derive(Debug)]
struct TooltipHistoryCell {
tip: &'static str,
}
impl TooltipHistoryCell {
fn new(tip: &'static str) -> Self {
Self { tip }
}
}
impl HistoryCell for TooltipHistoryCell {
fn display_lines(&self, width: u16) -> Vec<Line<'static>> {
let indent = " ";
let indent_width = UnicodeWidthStr::width(indent);
let wrap_width = usize::from(width.max(1))
.saturating_sub(indent_width)
.max(1);
let mut lines: Vec<Line<'static>> = Vec::new();
append_markdown(
&format!("**Tip:** {}", self.tip),
Some(wrap_width),
&mut lines,
);
prefix_lines(lines, indent.into(), indent.into())
}
}
#[derive(Debug)]
pub struct SessionInfoCell(CompositeHistoryCell);
impl HistoryCell for SessionInfoCell {
fn display_lines(&self, width: u16) -> Vec<Line<'static>> {
self.0.display_lines(width)
}
fn desired_height(&self, width: u16) -> u16 {
self.0.desired_height(width)
}
fn transcript_lines(&self, width: u16) -> Vec<Line<'static>> {
self.0.transcript_lines(width)
}
}
pub(crate) fn new_session_info(
config: &Config,
requested_model: &str,
event: SessionConfiguredEvent,
is_first_event: bool,
) -> SessionInfoCell {
let SessionConfiguredEvent {
model,
reasoning_effort,
..
} = event;
// Header box rendered as history (so it appears at the very top)
let header = SessionHeaderHistoryCell::new(
model.clone(),
reasoning_effort,
config.cwd.clone(),
CODEX_CLI_VERSION,
);
let mut parts: Vec<Box<dyn HistoryCell>> = vec![Box::new(header)];
if is_first_event {
// Help lines below the header (new copy and list)
let help_lines: Vec<Line<'static>> = vec![
" To get started, describe a task or try one of these commands:"
.dim()
.into(),
Line::from(""),
Line::from(vec![
" ".into(),
"/init".into(),
" - create an AGENTS.md file with instructions for Codex".dim(),
]),
Line::from(vec![
" ".into(),
"/status".into(),
" - show current session configuration".dim(),
]),
Line::from(vec![
" ".into(),
"/approvals".into(),
" - choose what Codex can do without approval".dim(),
]),
Line::from(vec![
" ".into(),
"/model".into(),
" - choose what model and reasoning effort to use".dim(),
]),
Line::from(vec![
" ".into(),
"/review".into(),
" - review any changes and find issues".dim(),
]),
];
parts.push(Box::new(PlainHistoryCell { lines: help_lines }));
} else {
if config.show_tooltips
&& let Some(tooltips) = tooltips::random_tooltip().map(TooltipHistoryCell::new)
{
parts.push(Box::new(tooltips));
}
if requested_model != model {
let lines = vec![
"model changed:".magenta().bold().into(),
format!("requested: {requested_model}").into(),
format!("used: {model}").into(),
];
parts.push(Box::new(PlainHistoryCell { lines }));
}
}
SessionInfoCell(CompositeHistoryCell { parts })
}
pub(crate) fn new_user_prompt(message: String) -> UserHistoryCell {
UserHistoryCell { message }
}
#[derive(Debug)]
struct SessionHeaderHistoryCell {
version: &'static str,
model: String,
reasoning_effort: Option<ReasoningEffortConfig>,
directory: PathBuf,
}
impl SessionHeaderHistoryCell {
fn new(
model: String,
reasoning_effort: Option<ReasoningEffortConfig>,
directory: PathBuf,
version: &'static str,
) -> Self {
Self {
version,
model,
reasoning_effort,
directory,
}
}
fn format_directory(&self, max_width: Option<usize>) -> String {
Self::format_directory_inner(&self.directory, max_width)
}
fn format_directory_inner(directory: &Path, max_width: Option<usize>) -> String {
let formatted = if let Some(rel) = relativize_to_home(directory) {
if rel.as_os_str().is_empty() {
"~".to_string()
} else {
format!("~{}{}", std::path::MAIN_SEPARATOR, rel.display())
}
} else {
directory.display().to_string()
};
if let Some(max_width) = max_width {
if max_width == 0 {
return String::new();
}
if UnicodeWidthStr::width(formatted.as_str()) > max_width {
return crate::text_formatting::center_truncate_path(&formatted, max_width);
}
}
formatted
}
fn reasoning_label(&self) -> Option<&'static str> {
self.reasoning_effort.map(|effort| match effort {
ReasoningEffortConfig::Minimal => "minimal",
ReasoningEffortConfig::Low => "low",
ReasoningEffortConfig::Medium => "medium",
ReasoningEffortConfig::High => "high",
ReasoningEffortConfig::XHigh => "xhigh",
ReasoningEffortConfig::None => "none",
})
}
}
impl HistoryCell for SessionHeaderHistoryCell {
fn display_lines(&self, width: u16) -> Vec<Line<'static>> {
let Some(inner_width) = card_inner_width(width, SESSION_HEADER_MAX_INNER_WIDTH) else {
return Vec::new();
};
let make_row = |spans: Vec<Span<'static>>| Line::from(spans);
// Title line rendered inside the box: ">_ OpenAI Codex (vX)"
let title_spans: Vec<Span<'static>> = vec![
Span::from(">_ ").dim(),
Span::from("OpenAI Codex").bold(),
Span::from(" ").dim(),
Span::from(format!("(v{})", self.version)).dim(),
];
const CHANGE_MODEL_HINT_COMMAND: &str = "/model";
const CHANGE_MODEL_HINT_EXPLANATION: &str = " to change";
const DIR_LABEL: &str = "directory:";
let label_width = DIR_LABEL.len();
let model_label = format!(
"{model_label:<label_width$}",
model_label = "model:",
label_width = label_width
);
let reasoning_label = self.reasoning_label();
let mut model_spans: Vec<Span<'static>> = vec![
Span::from(format!("{model_label} ")).dim(),
Span::from(self.model.clone()),
];
if let Some(reasoning) = reasoning_label {
model_spans.push(Span::from(" "));
model_spans.push(Span::from(reasoning));
}
model_spans.push(" ".dim());
model_spans.push(CHANGE_MODEL_HINT_COMMAND.cyan());
model_spans.push(CHANGE_MODEL_HINT_EXPLANATION.dim());
let dir_label = format!("{DIR_LABEL:<label_width$}");
let dir_prefix = format!("{dir_label} ");
let dir_prefix_width = UnicodeWidthStr::width(dir_prefix.as_str());
let dir_max_width = inner_width.saturating_sub(dir_prefix_width);
let dir = self.format_directory(Some(dir_max_width));
let dir_spans = vec![Span::from(dir_prefix).dim(), Span::from(dir)];
let lines = vec![
make_row(title_spans),
make_row(Vec::new()),
make_row(model_spans),
make_row(dir_spans),
];
with_border(lines)
}
}
#[derive(Debug)]
pub(crate) struct CompositeHistoryCell {
parts: Vec<Box<dyn HistoryCell>>,
}
impl CompositeHistoryCell {
pub(crate) fn new(parts: Vec<Box<dyn HistoryCell>>) -> Self {
Self { parts }
}
}
impl HistoryCell for CompositeHistoryCell {
fn display_lines(&self, width: u16) -> Vec<Line<'static>> {
let mut out: Vec<Line<'static>> = Vec::new();
let mut first = true;
for part in &self.parts {
let mut lines = part.display_lines(width);
if !lines.is_empty() {
if !first {
out.push(Line::from(""));
}
out.append(&mut lines);
first = false;
}
}
out
}
}
#[derive(Debug)]
pub(crate) struct McpToolCallCell {
call_id: String,
invocation: McpInvocation,
start_time: Instant,
duration: Option<Duration>,
result: Option<Result<mcp_types::CallToolResult, String>>,
animations_enabled: bool,
}
impl McpToolCallCell {
pub(crate) fn new(
call_id: String,
invocation: McpInvocation,
animations_enabled: bool,
) -> Self {
Self {
call_id,
invocation,
start_time: Instant::now(),
duration: None,
result: None,
animations_enabled,
}
}
pub(crate) fn call_id(&self) -> &str {
&self.call_id
}
pub(crate) fn complete(
&mut self,
duration: Duration,
result: Result<mcp_types::CallToolResult, String>,
) -> Option<Box<dyn HistoryCell>> {
let image_cell = try_new_completed_mcp_tool_call_with_image_output(&result)
.map(|cell| Box::new(cell) as Box<dyn HistoryCell>);
self.duration = Some(duration);
self.result = Some(result);
image_cell
}
fn success(&self) -> Option<bool> {
match self.result.as_ref() {
Some(Ok(result)) => Some(!result.is_error.unwrap_or(false)),
Some(Err(_)) => Some(false),
None => None,
}
}
pub(crate) fn mark_failed(&mut self) {
let elapsed = self.start_time.elapsed();
self.duration = Some(elapsed);
self.result = Some(Err("interrupted".to_string()));
}
fn render_content_block(block: &mcp_types::ContentBlock, width: usize) -> String {
match block {
mcp_types::ContentBlock::TextContent(text) => {
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | true |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/status_indicator_widget.rs | codex-rs/tui2/src/status_indicator_widget.rs | //! A live status indicator that shows the *latest* log line emitted by the
//! application while the agent is processing a long‑running task.
use std::time::Duration;
use std::time::Instant;
use codex_core::protocol::Op;
use crossterm::event::KeyCode;
use ratatui::buffer::Buffer;
use ratatui::layout::Rect;
use ratatui::style::Stylize;
use ratatui::text::Line;
use ratatui::text::Span;
use ratatui::text::Text;
use ratatui::widgets::Paragraph;
use ratatui::widgets::WidgetRef;
use unicode_width::UnicodeWidthStr;
use crate::app_event::AppEvent;
use crate::app_event_sender::AppEventSender;
use crate::exec_cell::spinner;
use crate::key_hint;
use crate::render::renderable::Renderable;
use crate::shimmer::shimmer_spans;
use crate::text_formatting::capitalize_first;
use crate::tui::FrameRequester;
use crate::wrapping::RtOptions;
use crate::wrapping::word_wrap_lines;
const DETAILS_MAX_LINES: usize = 3;
const DETAILS_PREFIX: &str = " └ ";
pub(crate) struct StatusIndicatorWidget {
/// Animated header text (defaults to "Working").
header: String,
details: Option<String>,
show_interrupt_hint: bool,
elapsed_running: Duration,
last_resume_at: Instant,
is_paused: bool,
app_event_tx: AppEventSender,
frame_requester: FrameRequester,
animations_enabled: bool,
}
// Format elapsed seconds into a compact human-friendly form used by the status line.
// Examples: 0s, 59s, 1m 00s, 59m 59s, 1h 00m 00s, 2h 03m 09s
pub fn fmt_elapsed_compact(elapsed_secs: u64) -> String {
if elapsed_secs < 60 {
return format!("{elapsed_secs}s");
}
if elapsed_secs < 3600 {
let minutes = elapsed_secs / 60;
let seconds = elapsed_secs % 60;
return format!("{minutes}m {seconds:02}s");
}
let hours = elapsed_secs / 3600;
let minutes = (elapsed_secs % 3600) / 60;
let seconds = elapsed_secs % 60;
format!("{hours}h {minutes:02}m {seconds:02}s")
}
impl StatusIndicatorWidget {
pub(crate) fn new(
app_event_tx: AppEventSender,
frame_requester: FrameRequester,
animations_enabled: bool,
) -> Self {
Self {
header: String::from("Working"),
details: None,
show_interrupt_hint: true,
elapsed_running: Duration::ZERO,
last_resume_at: Instant::now(),
is_paused: false,
app_event_tx,
frame_requester,
animations_enabled,
}
}
pub(crate) fn interrupt(&self) {
self.app_event_tx.send(AppEvent::CodexOp(Op::Interrupt));
}
/// Update the animated header label (left of the brackets).
pub(crate) fn update_header(&mut self, header: String) {
self.header = header;
}
/// Update the details text shown below the header.
pub(crate) fn update_details(&mut self, details: Option<String>) {
self.details = details
.filter(|details| !details.is_empty())
.map(|details| capitalize_first(details.trim_start()));
}
#[cfg(test)]
pub(crate) fn header(&self) -> &str {
&self.header
}
#[cfg(test)]
pub(crate) fn details(&self) -> Option<&str> {
self.details.as_deref()
}
pub(crate) fn set_interrupt_hint_visible(&mut self, visible: bool) {
self.show_interrupt_hint = visible;
}
#[cfg(test)]
pub(crate) fn interrupt_hint_visible(&self) -> bool {
self.show_interrupt_hint
}
pub(crate) fn pause_timer(&mut self) {
self.pause_timer_at(Instant::now());
}
pub(crate) fn resume_timer(&mut self) {
self.resume_timer_at(Instant::now());
}
pub(crate) fn pause_timer_at(&mut self, now: Instant) {
if self.is_paused {
return;
}
self.elapsed_running += now.saturating_duration_since(self.last_resume_at);
self.is_paused = true;
}
pub(crate) fn resume_timer_at(&mut self, now: Instant) {
if !self.is_paused {
return;
}
self.last_resume_at = now;
self.is_paused = false;
self.frame_requester.schedule_frame();
}
fn elapsed_duration_at(&self, now: Instant) -> Duration {
let mut elapsed = self.elapsed_running;
if !self.is_paused {
elapsed += now.saturating_duration_since(self.last_resume_at);
}
elapsed
}
fn elapsed_seconds_at(&self, now: Instant) -> u64 {
self.elapsed_duration_at(now).as_secs()
}
pub fn elapsed_seconds(&self) -> u64 {
self.elapsed_seconds_at(Instant::now())
}
/// Wrap the details text into a fixed width and return the lines, truncating if necessary.
fn wrapped_details_lines(&self, width: u16) -> Vec<Line<'static>> {
let Some(details) = self.details.as_deref() else {
return Vec::new();
};
if width == 0 {
return Vec::new();
}
let prefix_width = UnicodeWidthStr::width(DETAILS_PREFIX);
let opts = RtOptions::new(usize::from(width))
.initial_indent(Line::from(DETAILS_PREFIX.dim()))
.subsequent_indent(Line::from(Span::from(" ".repeat(prefix_width)).dim()))
.break_words(true);
let mut out = word_wrap_lines(details.lines().map(|line| vec![line.dim()]), opts);
if out.len() > DETAILS_MAX_LINES {
out.truncate(DETAILS_MAX_LINES);
let content_width = usize::from(width).saturating_sub(prefix_width).max(1);
let max_base_len = content_width.saturating_sub(1);
if let Some(last) = out.last_mut()
&& let Some(span) = last.spans.last_mut()
{
let trimmed: String = span.content.as_ref().chars().take(max_base_len).collect();
*span = format!("{trimmed}…").dim();
}
}
out
}
}
impl Renderable for StatusIndicatorWidget {
fn desired_height(&self, width: u16) -> u16 {
1 + u16::try_from(self.wrapped_details_lines(width).len()).unwrap_or(0)
}
fn render(&self, area: Rect, buf: &mut Buffer) {
if area.is_empty() {
return;
}
// Schedule next animation frame.
self.frame_requester
.schedule_frame_in(Duration::from_millis(32));
let now = Instant::now();
let elapsed_duration = self.elapsed_duration_at(now);
let pretty_elapsed = fmt_elapsed_compact(elapsed_duration.as_secs());
let mut spans = Vec::with_capacity(5);
spans.push(spinner(Some(self.last_resume_at), self.animations_enabled));
spans.push(" ".into());
if self.animations_enabled {
spans.extend(shimmer_spans(&self.header));
} else if !self.header.is_empty() {
spans.push(self.header.clone().into());
}
spans.push(" ".into());
if self.show_interrupt_hint {
spans.extend(vec![
format!("({pretty_elapsed} • ").dim(),
key_hint::plain(KeyCode::Esc).into(),
" to interrupt)".dim(),
]);
} else {
spans.push(format!("({pretty_elapsed})").dim());
}
let mut lines = Vec::new();
lines.push(Line::from(spans));
// If there is enough space, add the details lines below the header.
if area.height > 1 {
let details = self.wrapped_details_lines(area.width);
let max_details = usize::from(area.height.saturating_sub(1));
lines.extend(details.into_iter().take(max_details));
}
Paragraph::new(Text::from(lines)).render_ref(area, buf);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::app_event::AppEvent;
use crate::app_event_sender::AppEventSender;
use ratatui::Terminal;
use ratatui::backend::TestBackend;
use std::time::Duration;
use std::time::Instant;
use tokio::sync::mpsc::unbounded_channel;
use pretty_assertions::assert_eq;
#[test]
fn fmt_elapsed_compact_formats_seconds_minutes_hours() {
assert_eq!(fmt_elapsed_compact(0), "0s");
assert_eq!(fmt_elapsed_compact(1), "1s");
assert_eq!(fmt_elapsed_compact(59), "59s");
assert_eq!(fmt_elapsed_compact(60), "1m 00s");
assert_eq!(fmt_elapsed_compact(61), "1m 01s");
assert_eq!(fmt_elapsed_compact(3 * 60 + 5), "3m 05s");
assert_eq!(fmt_elapsed_compact(59 * 60 + 59), "59m 59s");
assert_eq!(fmt_elapsed_compact(3600), "1h 00m 00s");
assert_eq!(fmt_elapsed_compact(3600 + 60 + 1), "1h 01m 01s");
assert_eq!(fmt_elapsed_compact(25 * 3600 + 2 * 60 + 3), "25h 02m 03s");
}
#[test]
fn renders_with_working_header() {
let (tx_raw, _rx) = unbounded_channel::<AppEvent>();
let tx = AppEventSender::new(tx_raw);
let w = StatusIndicatorWidget::new(tx, crate::tui::FrameRequester::test_dummy(), true);
// Render into a fixed-size test terminal and snapshot the backend.
let mut terminal = Terminal::new(TestBackend::new(80, 2)).expect("terminal");
terminal
.draw(|f| w.render(f.area(), f.buffer_mut()))
.expect("draw");
insta::assert_snapshot!(terminal.backend());
}
#[test]
fn renders_truncated() {
let (tx_raw, _rx) = unbounded_channel::<AppEvent>();
let tx = AppEventSender::new(tx_raw);
let w = StatusIndicatorWidget::new(tx, crate::tui::FrameRequester::test_dummy(), true);
// Render into a fixed-size test terminal and snapshot the backend.
let mut terminal = Terminal::new(TestBackend::new(20, 2)).expect("terminal");
terminal
.draw(|f| w.render(f.area(), f.buffer_mut()))
.expect("draw");
insta::assert_snapshot!(terminal.backend());
}
#[test]
fn renders_wrapped_details_panama_two_lines() {
let (tx_raw, _rx) = unbounded_channel::<AppEvent>();
let tx = AppEventSender::new(tx_raw);
let mut w = StatusIndicatorWidget::new(tx, crate::tui::FrameRequester::test_dummy(), false);
w.update_details(Some("A man a plan a canal panama".to_string()));
w.set_interrupt_hint_visible(false);
// Freeze time-dependent rendering (elapsed + spinner) to keep the snapshot stable.
w.is_paused = true;
w.elapsed_running = Duration::ZERO;
// Prefix is 4 columns, so a width of 30 yields a content width of 26: one column
// short of fitting the whole phrase (27 cols), forcing exactly one wrap without ellipsis.
let mut terminal = Terminal::new(TestBackend::new(30, 3)).expect("terminal");
terminal
.draw(|f| w.render(f.area(), f.buffer_mut()))
.expect("draw");
insta::assert_snapshot!(terminal.backend());
}
#[test]
fn timer_pauses_when_requested() {
let (tx_raw, _rx) = unbounded_channel::<AppEvent>();
let tx = AppEventSender::new(tx_raw);
let mut widget =
StatusIndicatorWidget::new(tx, crate::tui::FrameRequester::test_dummy(), true);
let baseline = Instant::now();
widget.last_resume_at = baseline;
let before_pause = widget.elapsed_seconds_at(baseline + Duration::from_secs(5));
assert_eq!(before_pause, 5);
widget.pause_timer_at(baseline + Duration::from_secs(5));
let paused_elapsed = widget.elapsed_seconds_at(baseline + Duration::from_secs(10));
assert_eq!(paused_elapsed, before_pause);
widget.resume_timer_at(baseline + Duration::from_secs(10));
let after_resume = widget.elapsed_seconds_at(baseline + Duration::from_secs(13));
assert_eq!(after_resume, before_pause + 3);
}
#[test]
fn details_overflow_adds_ellipsis() {
let (tx_raw, _rx) = unbounded_channel::<AppEvent>();
let tx = AppEventSender::new(tx_raw);
let mut w = StatusIndicatorWidget::new(tx, crate::tui::FrameRequester::test_dummy(), true);
w.update_details(Some("abcd abcd abcd abcd".to_string()));
let lines = w.wrapped_details_lines(6);
assert_eq!(lines.len(), DETAILS_MAX_LINES);
let last = lines.last().expect("expected last details line");
assert!(
last.spans[1].content.as_ref().ends_with("…"),
"expected ellipsis in last line: {last:?}"
);
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/transcript_copy.rs | codex-rs/tui2/src/transcript_copy.rs | //! Converting a transcript selection to clipboard text.
//!
//! Copy is driven by a content-relative selection (`TranscriptSelectionPoint`),
//! but the transcript is rendered with styling and wrapping for the TUI. This
//! module reconstructs clipboard text from the rendered transcript lines while
//! preserving user expectations:
//!
//! - Soft-wrapped prose is treated as a single logical line when copying.
//! - Code blocks preserve meaningful indentation.
//! - Markdown “source markers” are emitted when copying (backticks for inline
//! code, triple-backtick fences for code blocks) even if the on-screen
//! rendering is styled differently.
//!
//! ## Inputs and invariants
//!
//! Clipboard reconstruction is performed over the same *visual lines* that are
//! rendered in the transcript viewport:
//!
//! - `lines`: wrapped transcript `Line`s, including the gutter spans.
//! - `joiner_before`: a parallel vector describing which wrapped lines are
//! *soft wrap* continuations (and what to insert at the wrap boundary).
//! - `(line_index, column)` selection points in *content space* (columns exclude
//! the gutter).
//!
//! Callers must keep `lines` and `joiner_before` aligned. In practice, `App`
//! obtains both from `transcript_render`, which itself builds from each cell's
//! `HistoryCell::transcript_lines_with_joiners` implementation.
//!
//! ## Style-derived Markdown cues
//!
//! For fidelity, we copy Markdown source markers even though the viewport may
//! render content using styles instead of literal characters. Today, the copy
//! logic derives "inline code" and "code block" boundaries from the styling we
//! apply during rendering (currently cyan spans/lines).
//!
//! If transcript styling changes (for example, if code blocks stop using cyan),
//! update `is_code_block_line` and [`span_is_inline_code`] so clipboard output
//! continues to match user expectations.
//!
//! The caller can choose whether copy covers only the visible viewport range
//! (by passing `visible_start..visible_end`) or spans the entire transcript
//! (by passing `0..lines.len()`).
//!
//! UI affordances (keybinding detection and the on-screen "copy" pill) live in
//! `transcript_copy_ui`.
use ratatui::text::Line;
use ratatui::text::Span;
use crate::history_cell::HistoryCell;
use crate::transcript_selection::TRANSCRIPT_GUTTER_COLS;
use crate::transcript_selection::TranscriptSelection;
use crate::transcript_selection::TranscriptSelectionPoint;
use std::sync::Arc;
/// Render the current transcript selection into clipboard text.
///
/// This is the `App`-level helper: it rebuilds wrapped transcript lines using
/// the same rules as the on-screen viewport and then applies
/// [`selection_to_copy_text`] across the full transcript range (including
/// off-screen lines).
pub(crate) fn selection_to_copy_text_for_cells(
cells: &[Arc<dyn HistoryCell>],
selection: TranscriptSelection,
width: u16,
) -> Option<String> {
let (anchor, head) = selection.anchor.zip(selection.head)?;
let transcript = crate::transcript_render::build_wrapped_transcript_lines(cells, width);
let total_lines = transcript.lines.len();
if total_lines == 0 {
return None;
}
selection_to_copy_text(
&transcript.lines,
&transcript.joiner_before,
anchor,
head,
0,
total_lines,
width,
)
}
/// Render the selected region into clipboard text.
///
/// `lines` must be the wrapped transcript lines as rendered by the TUI,
/// including the leading gutter spans. `start`/`end` columns are expressed in
/// content-space (excluding the gutter), and will be ordered internally if the
/// endpoints are reversed.
///
/// `joiner_before[i]` is the exact string to insert *before* `lines[i]` when
/// it is a continuation of a soft-wrapped prose line. This enables copy to
/// treat soft-wrapped prose as a single logical line.
///
/// Notes:
///
/// - For code/preformatted runs, copy is permitted to extend beyond the
/// viewport width when the user selects “to the right edge”, so we avoid
/// producing truncated logical lines in narrow terminals.
/// - Markdown markers are derived from render-time styles (see module docs).
/// - Column math is display-width-aware (wide glyphs count as multiple columns).
///
/// Returns `None` if the inputs imply an empty selection or if `width` is too
/// small to contain the gutter plus at least one content column.
pub(crate) fn selection_to_copy_text(
lines: &[Line<'static>],
joiner_before: &[Option<String>],
start: TranscriptSelectionPoint,
end: TranscriptSelectionPoint,
visible_start: usize,
visible_end: usize,
width: u16,
) -> Option<String> {
use ratatui::style::Color;
if width <= TRANSCRIPT_GUTTER_COLS {
return None;
}
// Selection points are expressed in content-relative coordinates and may be provided in either
// direction (dragging "backwards"). Normalize to a forward `(start, end)` pair so the rest of
// the logic can assume `start <= end`.
let (start, end) = order_points(start, end);
if start == end {
return None;
}
// Transcript `Line`s include a left gutter (bullet/prefix space). Selection columns exclude the
// gutter, so we translate selection columns to absolute columns by adding `base_x`.
let base_x = TRANSCRIPT_GUTTER_COLS;
let max_x = width.saturating_sub(1);
let mut out = String::new();
let mut prev_selected_line: Option<usize> = None;
// We emit Markdown fences around runs of code/preformatted visual lines so:
// - the clipboard captures source-style markers (` ``` `) even if the viewport is stylized
// - indentation is preserved and paste is stable in editors
let mut in_code_run = false;
// `wrote_any` lets us handle separators (newline or soft-wrap joiner) without special-casing
// "first output line" at every decision point.
let mut wrote_any = false;
for line_index in visible_start..visible_end {
// Only consider lines that intersect the selection's line range. (Selection endpoints are
// clamped elsewhere; if the indices don't exist, `lines.get(...)` returns `None`.)
if line_index < start.line_index || line_index > end.line_index {
continue;
}
let line = lines.get(line_index)?;
// Code blocks (and other preformatted content) are detected via styling and copied as
// "verbatim lines" (no inline Markdown re-encoding). This also enables special handling for
// narrow terminals: selecting "to the right edge" should copy the full logical line, not a
// viewport-truncated slice.
let is_code_block_line = line.style.fg == Some(Color::Cyan);
// Flatten the line to compute the rightmost non-space column. We use that to:
// - avoid copying trailing right-margin padding
// - clamp prose selection to the viewport width
let flat = line_to_flat(line);
let text_end = if is_code_block_line {
last_non_space_col(flat.as_str())
} else {
last_non_space_col(flat.as_str()).map(|c| c.min(max_x))
};
// Convert selection endpoints into a selection range for this specific visual line:
// - first line clamps the start column
// - last line clamps the end column
// - intermediate lines select the full line.
let line_start_col = if line_index == start.line_index {
start.column
} else {
0
};
let line_end_col = if line_index == end.line_index {
end.column
} else {
max_x.saturating_sub(base_x)
};
let row_sel_start = base_x.saturating_add(line_start_col).min(max_x);
// For code/preformatted lines, treat "selection ends at the viewport edge" as a special
// "copy to end of logical line" case. This prevents narrow terminals from producing
// truncated clipboard content when the user drags to the right edge.
let row_sel_end = if is_code_block_line && line_end_col >= max_x.saturating_sub(base_x) {
u16::MAX
} else {
base_x.saturating_add(line_end_col).min(max_x)
};
if row_sel_start > row_sel_end {
continue;
}
let selected_line = if let Some(text_end) = text_end {
let from_col = row_sel_start.max(base_x);
let to_col = row_sel_end.min(text_end);
if from_col > to_col {
Line::default().style(line.style)
} else {
slice_line_by_cols(line, from_col, to_col)
}
} else {
Line::default().style(line.style)
};
// Convert the selected `Line` into Markdown source:
// - For prose: wrap inline-code spans in backticks.
// - For code blocks: return the raw flat text so we preserve indentation/spacing.
let line_text = line_to_markdown(&selected_line, is_code_block_line);
// Track transitions into/out of code/preformatted runs and emit triple-backtick fences.
// We always separate a code run from prior prose with a newline.
if is_code_block_line && !in_code_run {
if wrote_any {
out.push('\n');
}
out.push_str("```");
out.push('\n');
in_code_run = true;
prev_selected_line = None;
wrote_any = true;
} else if !is_code_block_line && in_code_run {
out.push('\n');
out.push_str("```");
out.push('\n');
in_code_run = false;
prev_selected_line = None;
wrote_any = true;
}
// When copying inside a code run, every selected visual line becomes a literal line inside
// the fence (no soft-wrap joining). We preserve explicit blank lines by writing empty
// strings as a line.
if in_code_run {
if wrote_any && (!out.ends_with('\n') || prev_selected_line.is_some()) {
out.push('\n');
}
out.push_str(line_text.as_str());
prev_selected_line = Some(line_index);
wrote_any = true;
continue;
}
// Prose path:
// - If this line is a soft-wrap continuation of the previous selected line, insert the
// recorded joiner (often spaces) instead of a newline.
// - Otherwise, insert a newline to preserve hard breaks.
if wrote_any {
let joiner = joiner_before.get(line_index).cloned().unwrap_or(None);
if prev_selected_line == Some(line_index.saturating_sub(1))
&& let Some(joiner) = joiner
{
out.push_str(joiner.as_str());
} else {
out.push('\n');
}
}
out.push_str(line_text.as_str());
prev_selected_line = Some(line_index);
wrote_any = true;
}
if in_code_run {
out.push('\n');
out.push_str("```");
}
(!out.is_empty()).then_some(out)
}
/// Order two selection endpoints into `(start, end)` in transcript order.
///
/// Dragging can produce reversed endpoints; callers typically want a normalized range before
/// iterating visual lines.
fn order_points(
a: TranscriptSelectionPoint,
b: TranscriptSelectionPoint,
) -> (TranscriptSelectionPoint, TranscriptSelectionPoint) {
if (b.line_index < a.line_index) || (b.line_index == a.line_index && b.column < a.column) {
(b, a)
} else {
(a, b)
}
}
/// Flatten a styled `Line` into its plain text content.
///
/// This is used for cursor/column arithmetic and for emitting plain-text code lines.
fn line_to_flat(line: &Line<'_>) -> String {
line.spans
.iter()
.map(|s| s.content.as_ref())
.collect::<String>()
}
/// Return the last non-space *display column* in `flat` (inclusive).
///
/// This is display-width-aware, so wide glyphs (e.g. CJK) advance by more than one column.
///
/// Rationale: transcript rendering often pads out to the viewport width; copy should avoid
/// including that right-margin whitespace.
fn last_non_space_col(flat: &str) -> Option<u16> {
use unicode_width::UnicodeWidthChar;
let mut col: u16 = 0;
let mut last: Option<u16> = None;
for ch in flat.chars() {
let w = UnicodeWidthChar::width(ch).unwrap_or(0) as u16;
if ch != ' ' {
let end = col.saturating_add(w.saturating_sub(1));
last = Some(end);
}
col = col.saturating_add(w);
}
last
}
/// Map a display-column range to a UTF-8 byte range within `flat`.
///
/// The returned range is suitable for slicing `flat` and for slicing the original `Span` strings
/// (once translated into span-local offsets).
///
/// This walks Unicode scalar values and advances by display width so callers can slice based on the
/// same column semantics the selection model uses.
fn byte_range_for_cols(flat: &str, start_col: u16, end_col: u16) -> Option<std::ops::Range<usize>> {
use unicode_width::UnicodeWidthChar;
// We translate selection columns (display columns, not bytes) into a UTF-8 byte range. This is
// intentionally Unicode-width aware: wide glyphs cover multiple columns but occupy one `char`
// and several bytes.
//
// Strategy:
// - Walk `flat` by `char_indices()` while tracking the current display column.
// - The start byte is the first char whose rendered columns intersect `start_col`.
// - The end byte is the end of the last char whose rendered columns intersect `end_col`.
let mut col: u16 = 0;
let mut start_byte: Option<usize> = None;
let mut end_byte: Option<usize> = None;
for (idx, ch) in flat.char_indices() {
let w = UnicodeWidthChar::width(ch).unwrap_or(0) as u16;
let end = col.saturating_add(w.saturating_sub(1));
// Start is inclusive: select the first glyph whose right edge reaches the start column.
if start_byte.is_none() && end >= start_col {
start_byte = Some(idx);
}
// End is inclusive in column space; keep extending end byte while we're still at/before
// `end_col`. This includes a wide glyph even if it starts before `end_col` but ends after.
if col <= end_col {
end_byte = Some(idx + ch.len_utf8());
}
col = col.saturating_add(w);
if col > end_col && start_byte.is_some() {
break;
}
}
match (start_byte, end_byte) {
(Some(s), Some(e)) if e >= s => Some(s..e),
_ => None,
}
}
/// Slice a styled `Line` by display columns, preserving per-span style.
///
/// This is the core "selection → styled substring" helper used before Markdown re-encoding. It
/// avoids mixing styles across spans by slicing each contributing span independently, then
/// reassembling them into a new `Line` with the original line-level style.
fn slice_line_by_cols(line: &Line<'static>, start_col: u16, end_col: u16) -> Line<'static> {
// `Line` spans store independent string slices with their own styles. To slice by columns while
// preserving styling, we:
// 1) Flatten the line and compute the desired UTF-8 byte range in the flattened string.
// 2) Compute each span's byte range within the flattened string.
// 3) Intersect the selection range with each span range and slice per-span, preserving styles.
let flat = line_to_flat(line);
let mut span_bounds: Vec<(std::ops::Range<usize>, ratatui::style::Style)> = Vec::new();
let mut acc = 0usize;
for s in &line.spans {
let start = acc;
let text = s.content.as_ref();
acc += text.len();
span_bounds.push((start..acc, s.style));
}
let Some(range) = byte_range_for_cols(flat.as_str(), start_col, end_col) else {
return Line::default().style(line.style);
};
// Translate the flattened byte range back into (span-local) slices.
let start_byte = range.start;
let end_byte = range.end;
let mut spans: Vec<ratatui::text::Span<'static>> = Vec::new();
for (i, (r, style)) in span_bounds.iter().enumerate() {
let s = r.start;
let e = r.end;
if e <= start_byte {
continue;
}
if s >= end_byte {
break;
}
let seg_start = start_byte.max(s);
let seg_end = end_byte.min(e);
if seg_end > seg_start {
let local_start = seg_start - s;
let local_end = seg_end - s;
let content = line.spans[i].content.as_ref();
spans.push(ratatui::text::Span {
style: *style,
content: content[local_start..local_end].to_string().into(),
});
}
if e >= end_byte {
break;
}
}
Line::from(spans).style(line.style)
}
/// Whether a span should be treated as "inline code" when reconstructing Markdown.
///
/// TUI2 renders inline code using a cyan foreground. Links also use cyan, but are underlined, so we
/// exclude underlined cyan spans to avoid wrapping links in backticks.
fn span_is_inline_code(span: &Span<'_>) -> bool {
use ratatui::style::Color;
span.style.fg == Some(Color::Cyan)
&& !span
.style
.add_modifier
.contains(ratatui::style::Modifier::UNDERLINED)
}
/// Convert a selected, styled `Line` back into Markdown-ish source text.
///
/// - For prose: wraps runs of inline-code spans in backticks to preserve the source marker.
/// - For code blocks: emits the raw flat text (no additional escaping), since the entire run will
/// be wrapped in triple-backtick fences by the caller.
fn line_to_markdown(line: &Line<'static>, is_code_block: bool) -> String {
if is_code_block {
return line_to_flat(line);
}
let mut out = String::new();
let mut in_code = false;
for span in &line.spans {
let is_code = span_is_inline_code(span);
if is_code && !in_code {
out.push('`');
in_code = true;
} else if !is_code && in_code {
out.push('`');
in_code = false;
}
out.push_str(span.content.as_ref());
}
if in_code {
out.push('`');
}
out
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use ratatui::style::Color;
use ratatui::style::Style;
use ratatui::style::Stylize;
#[test]
fn selection_to_copy_text_returns_none_for_zero_content_width() {
let lines = vec![Line::from("• Hello")];
let joiner_before = vec![None];
let start = TranscriptSelectionPoint {
line_index: 0,
column: 0,
};
let end = TranscriptSelectionPoint {
line_index: 0,
column: 1,
};
assert_eq!(
selection_to_copy_text(
&lines,
&joiner_before,
start,
end,
0,
lines.len(),
TRANSCRIPT_GUTTER_COLS,
),
None
);
}
#[test]
fn selection_to_copy_text_returns_none_for_empty_selection_point() {
let lines = vec![Line::from("• Hello")];
let joiner_before = vec![None];
let pt = TranscriptSelectionPoint {
line_index: 0,
column: 0,
};
assert_eq!(
selection_to_copy_text(&lines, &joiner_before, pt, pt, 0, lines.len(), 20),
None
);
}
#[test]
fn selection_to_copy_text_orders_reversed_endpoints() {
let lines = vec![Line::from("• Hello world")];
let joiner_before = vec![None];
let start = TranscriptSelectionPoint {
line_index: 0,
column: 10,
};
let end = TranscriptSelectionPoint {
line_index: 0,
column: 6,
};
let out = selection_to_copy_text(&lines, &joiner_before, start, end, 0, 1, 80)
.expect("expected text");
assert_eq!(out, "world");
}
#[test]
fn copy_selection_soft_wrap_joins_without_newline() {
let lines = vec![Line::from("• Hello"), Line::from(" world")];
let joiner_before = vec![None, Some(" ".to_string())];
let start = TranscriptSelectionPoint {
line_index: 0,
column: 0,
};
let end = TranscriptSelectionPoint {
line_index: 1,
column: 100,
};
let out = selection_to_copy_text(&lines, &joiner_before, start, end, 0, lines.len(), 20)
.expect("expected text");
assert_eq!(out, "Hello world");
}
#[test]
fn copy_selection_wraps_inline_code_in_backticks() {
let lines = vec![Line::from(vec![
"• ".into(),
"Use ".into(),
ratatui::text::Span::from("foo()").style(Style::new().fg(Color::Cyan)),
" now".into(),
])];
let joiner_before = vec![None];
let start = TranscriptSelectionPoint {
line_index: 0,
column: 0,
};
let end = TranscriptSelectionPoint {
line_index: 0,
column: 100,
};
let out = selection_to_copy_text(&lines, &joiner_before, start, end, 0, 1, 80)
.expect("expected text");
assert_eq!(out, "Use `foo()` now");
}
#[test]
fn selection_to_copy_text_for_cells_reconstructs_full_code_line_beyond_viewport() {
#[derive(Debug)]
struct FakeCell {
lines: Vec<Line<'static>>,
joiner_before: Vec<Option<String>>,
}
impl HistoryCell for FakeCell {
fn display_lines(&self, _width: u16) -> Vec<Line<'static>> {
self.lines.clone()
}
fn transcript_lines_with_joiners(
&self,
_width: u16,
) -> crate::history_cell::TranscriptLinesWithJoiners {
crate::history_cell::TranscriptLinesWithJoiners {
lines: self.lines.clone(),
joiner_before: self.joiner_before.clone(),
}
}
}
let style = Style::new().fg(Color::Cyan);
let cell = FakeCell {
lines: vec![Line::from("• 0123456789ABCDEFGHIJ").style(style)],
joiner_before: vec![None],
};
let cells: Vec<std::sync::Arc<dyn HistoryCell>> = vec![std::sync::Arc::new(cell)];
let width: u16 = 12;
let max_x = width.saturating_sub(1);
let viewport_edge_col = max_x.saturating_sub(TRANSCRIPT_GUTTER_COLS);
let selection = TranscriptSelection {
anchor: Some(TranscriptSelectionPoint::new(0, 0)),
head: Some(TranscriptSelectionPoint::new(0, viewport_edge_col)),
};
let out =
selection_to_copy_text_for_cells(&cells, selection, width).expect("expected text");
assert_eq!(out, "```\n 0123456789ABCDEFGHIJ\n```");
}
#[test]
fn order_points_orders_by_line_then_column() {
let a = TranscriptSelectionPoint::new(2, 5);
let b = TranscriptSelectionPoint::new(1, 10);
assert_eq!(order_points(a, b), (b, a));
let a = TranscriptSelectionPoint::new(1, 5);
let b = TranscriptSelectionPoint::new(1, 10);
assert_eq!(order_points(a, b), (a, b));
}
#[test]
fn line_to_flat_concatenates_spans() {
let line = Line::from(vec!["a".into(), "b".into(), "c".into()]);
assert_eq!(line_to_flat(&line), "abc");
}
#[test]
fn last_non_space_col_counts_display_width() {
// "コ" is width 2, so "コX" occupies columns 0..=2.
assert_eq!(last_non_space_col("コX"), Some(2));
assert_eq!(last_non_space_col("a "), Some(0));
assert_eq!(last_non_space_col(" "), None);
}
#[test]
fn byte_range_for_cols_maps_columns_to_utf8_bytes() {
let flat = "abcd";
let range = byte_range_for_cols(flat, 1, 2).expect("range");
assert_eq!(&flat[range], "bc");
let flat = "コX";
let range = byte_range_for_cols(flat, 0, 2).expect("range");
assert_eq!(&flat[range], "コX");
}
#[test]
fn slice_line_by_cols_preserves_span_styles() {
let line = Line::from(vec![
"• ".into(),
"Hello".red(),
" ".into(),
"world".green(),
]);
// Slice "llo wo" (crosses span boundaries).
let sliced = slice_line_by_cols(&line, 4, 9);
assert_eq!(line_to_flat(&sliced), "llo wo");
assert_eq!(sliced.spans.len(), 3);
assert_eq!(sliced.spans[0].content.as_ref(), "llo");
assert_eq!(sliced.spans[0].style.fg, Some(Color::Red));
assert_eq!(sliced.spans[1].content.as_ref(), " ");
assert_eq!(sliced.spans[2].content.as_ref(), "wo");
assert_eq!(sliced.spans[2].style.fg, Some(Color::Green));
}
#[test]
fn span_is_inline_code_excludes_underlined_cyan() {
let inline_code = Span::from("x").style(Style::new().fg(Color::Cyan));
assert!(span_is_inline_code(&inline_code));
let link_like = Span::from("x").style(Style::new().fg(Color::Cyan).underlined());
assert!(!span_is_inline_code(&link_like));
let other = Span::from("x").style(Style::new().fg(Color::Green));
assert!(!span_is_inline_code(&other));
}
#[test]
fn line_to_markdown_wraps_contiguous_inline_code_spans() {
let line = Line::from(vec![
"Use ".into(),
Span::from("foo").style(Style::new().fg(Color::Cyan)),
Span::from("()").style(Style::new().fg(Color::Cyan)),
" now".into(),
]);
assert_eq!(line_to_markdown(&line, false), "Use `foo()` now");
}
#[test]
fn copy_selection_preserves_wide_glyphs() {
let lines = vec![Line::from("• コX")];
let joiner_before = vec![None];
let start = TranscriptSelectionPoint {
line_index: 0,
column: 0,
};
let end = TranscriptSelectionPoint {
line_index: 0,
column: 2,
};
let out = selection_to_copy_text(&lines, &joiner_before, start, end, 0, 1, 80)
.expect("expected text");
assert_eq!(out, "コX");
}
#[test]
fn copy_selection_wraps_code_block_in_fences_and_preserves_indent() {
let style = Style::new().fg(Color::Cyan);
let lines = vec![
Line::from("• fn main() {}").style(style),
Line::from(" println!(\"hi\");").style(style),
];
let joiner_before = vec![None, None];
let start = TranscriptSelectionPoint {
line_index: 0,
column: 0,
};
let end = TranscriptSelectionPoint {
line_index: 1,
column: 100,
};
let out = selection_to_copy_text(&lines, &joiner_before, start, end, 0, lines.len(), 80)
.expect("expected text");
assert_eq!(out, "```\n fn main() {}\n println!(\"hi\");\n```");
}
#[test]
fn copy_selection_code_block_end_col_at_viewport_edge_copies_full_line() {
let style = Style::new().fg(Color::Cyan);
let lines = vec![Line::from("• 0123456789ABCDEFGHIJ").style(style)];
let joiner_before = vec![None];
let width: u16 = 12;
let max_x = width.saturating_sub(1);
let viewport_edge_col = max_x.saturating_sub(TRANSCRIPT_GUTTER_COLS);
let start = TranscriptSelectionPoint {
line_index: 0,
column: 0,
};
let end = TranscriptSelectionPoint {
line_index: 0,
column: viewport_edge_col,
};
let out = selection_to_copy_text(&lines, &joiner_before, start, end, 0, 1, width)
.expect("expected text");
assert_eq!(out, "```\n 0123456789ABCDEFGHIJ\n```");
}
#[test]
fn copy_selection_code_block_end_col_before_viewport_edge_copies_partial_line() {
let style = Style::new().fg(Color::Cyan);
let lines = vec![Line::from("• 0123456789ABCDEFGHIJ").style(style)];
let joiner_before = vec![None];
let width: u16 = 12;
let start = TranscriptSelectionPoint {
line_index: 0,
column: 0,
};
let end = TranscriptSelectionPoint {
line_index: 0,
column: 7,
};
let out = selection_to_copy_text(&lines, &joiner_before, start, end, 0, 1, width)
.expect("expected text");
assert_eq!(out, "```\n 0123\n```");
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/terminal_palette.rs | codex-rs/tui2/src/terminal_palette.rs | use crate::color::perceptual_distance;
use ratatui::style::Color;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
static DEFAULT_PALETTE_VERSION: AtomicU64 = AtomicU64::new(0);
fn bump_palette_version() {
DEFAULT_PALETTE_VERSION.fetch_add(1, Ordering::Relaxed);
}
/// Returns the closest color to the target color that the terminal can display.
pub fn best_color(target: (u8, u8, u8)) -> Color {
let Some(color_level) = supports_color::on_cached(supports_color::Stream::Stdout) else {
return Color::default();
};
if color_level.has_16m {
let (r, g, b) = target;
#[allow(clippy::disallowed_methods)]
Color::Rgb(r, g, b)
} else if color_level.has_256
&& let Some((i, _)) = xterm_fixed_colors().min_by(|(_, a), (_, b)| {
perceptual_distance(*a, target)
.partial_cmp(&perceptual_distance(*b, target))
.unwrap_or(std::cmp::Ordering::Equal)
})
{
#[allow(clippy::disallowed_methods)]
Color::Indexed(i as u8)
} else {
#[allow(clippy::disallowed_methods)]
Color::default()
}
}
pub fn requery_default_colors() {
imp::requery_default_colors();
bump_palette_version();
}
#[derive(Clone, Copy)]
pub struct DefaultColors {
fg: (u8, u8, u8),
bg: (u8, u8, u8),
}
pub fn default_colors() -> Option<DefaultColors> {
imp::default_colors()
}
pub fn default_fg() -> Option<(u8, u8, u8)> {
default_colors().map(|c| c.fg)
}
pub fn default_bg() -> Option<(u8, u8, u8)> {
default_colors().map(|c| c.bg)
}
pub fn palette_version() -> u64 {
DEFAULT_PALETTE_VERSION.load(Ordering::Relaxed)
}
#[cfg(all(unix, not(test)))]
mod imp {
use super::DefaultColors;
use crossterm::style::Color as CrosstermColor;
use crossterm::style::query_background_color;
use crossterm::style::query_foreground_color;
use std::sync::Mutex;
use std::sync::OnceLock;
struct Cache<T> {
attempted: bool,
value: Option<T>,
}
impl<T> Default for Cache<T> {
fn default() -> Self {
Self {
attempted: false,
value: None,
}
}
}
impl<T: Copy> Cache<T> {
fn get_or_init_with(&mut self, mut init: impl FnMut() -> Option<T>) -> Option<T> {
if !self.attempted {
self.value = init();
self.attempted = true;
}
self.value
}
fn refresh_with(&mut self, mut init: impl FnMut() -> Option<T>) -> Option<T> {
self.value = init();
self.attempted = true;
self.value
}
}
fn default_colors_cache() -> &'static Mutex<Cache<DefaultColors>> {
static CACHE: OnceLock<Mutex<Cache<DefaultColors>>> = OnceLock::new();
CACHE.get_or_init(|| Mutex::new(Cache::default()))
}
pub(super) fn default_colors() -> Option<DefaultColors> {
let cache = default_colors_cache();
let mut cache = cache.lock().ok()?;
cache.get_or_init_with(|| query_default_colors().unwrap_or_default())
}
pub(super) fn requery_default_colors() {
if let Ok(mut cache) = default_colors_cache().lock() {
// Don't try to refresh if the cache is already attempted and failed.
if cache.attempted && cache.value.is_none() {
return;
}
cache.refresh_with(|| query_default_colors().unwrap_or_default());
}
}
fn query_default_colors() -> std::io::Result<Option<DefaultColors>> {
let fg = query_foreground_color()?.and_then(color_to_tuple);
let bg = query_background_color()?.and_then(color_to_tuple);
Ok(fg.zip(bg).map(|(fg, bg)| DefaultColors { fg, bg }))
}
fn color_to_tuple(color: CrosstermColor) -> Option<(u8, u8, u8)> {
match color {
CrosstermColor::Rgb { r, g, b } => Some((r, g, b)),
_ => None,
}
}
}
#[cfg(not(all(unix, not(test))))]
mod imp {
use super::DefaultColors;
pub(super) fn default_colors() -> Option<DefaultColors> {
None
}
pub(super) fn requery_default_colors() {}
}
/// The subset of Xterm colors that are usually consistent across terminals.
fn xterm_fixed_colors() -> impl Iterator<Item = (usize, (u8, u8, u8))> {
XTERM_COLORS.into_iter().enumerate().skip(16)
}
// Xterm colors; derived from https://ss64.com/bash/syntax-colors.html
pub const XTERM_COLORS: [(u8, u8, u8); 256] = [
// The first 16 colors vary based on terminal theme, so these are likely not the actual colors
// that are displayed when using these indices.
(0, 0, 0), // 0 Black (SYSTEM)
(128, 0, 0), // 1 Maroon (SYSTEM)
(0, 128, 0), // 2 Green (SYSTEM)
(128, 128, 0), // 3 Olive (SYSTEM)
(0, 0, 128), // 4 Navy (SYSTEM)
(128, 0, 128), // 5 Purple (SYSTEM)
(0, 128, 128), // 6 Teal (SYSTEM)
(192, 192, 192), // 7 Silver (SYSTEM)
(128, 128, 128), // 8 Grey (SYSTEM)
(255, 0, 0), // 9 Red (SYSTEM)
(0, 255, 0), // 10 Lime (SYSTEM)
(255, 255, 0), // 11 Yellow (SYSTEM)
(0, 0, 255), // 12 Blue (SYSTEM)
(255, 0, 255), // 13 Fuchsia (SYSTEM)
(0, 255, 255), // 14 Aqua (SYSTEM)
(255, 255, 255), // 15 White (SYSTEM)
// The rest of the colors are consistent in most terminals.
(0, 0, 0), // 16 Grey0
(0, 0, 95), // 17 NavyBlue
(0, 0, 135), // 18 DarkBlue
(0, 0, 175), // 19 Blue3
(0, 0, 215), // 20 Blue3
(0, 0, 255), // 21 Blue1
(0, 95, 0), // 22 DarkGreen
(0, 95, 95), // 23 DeepSkyBlue4
(0, 95, 135), // 24 DeepSkyBlue4
(0, 95, 175), // 25 DeepSkyBlue4
(0, 95, 215), // 26 DodgerBlue3
(0, 95, 255), // 27 DodgerBlue2
(0, 135, 0), // 28 Green4
(0, 135, 95), // 29 SpringGreen4
(0, 135, 135), // 30 Turquoise4
(0, 135, 175), // 31 DeepSkyBlue3
(0, 135, 215), // 32 DeepSkyBlue3
(0, 135, 255), // 33 DodgerBlue1
(0, 175, 0), // 34 Green3
(0, 175, 95), // 35 SpringGreen3
(0, 175, 135), // 36 DarkCyan
(0, 175, 175), // 37 LightSeaGreen
(0, 175, 215), // 38 DeepSkyBlue2
(0, 175, 255), // 39 DeepSkyBlue1
(0, 215, 0), // 40 Green3
(0, 215, 95), // 41 SpringGreen3
(0, 215, 135), // 42 SpringGreen2
(0, 215, 175), // 43 Cyan3
(0, 215, 215), // 44 DarkTurquoise
(0, 215, 255), // 45 Turquoise2
(0, 255, 0), // 46 Green1
(0, 255, 95), // 47 SpringGreen2
(0, 255, 135), // 48 SpringGreen1
(0, 255, 175), // 49 MediumSpringGreen
(0, 255, 215), // 50 Cyan2
(0, 255, 255), // 51 Cyan1
(95, 0, 0), // 52 DarkRed
(95, 0, 95), // 53 DeepPink4
(95, 0, 135), // 54 Purple4
(95, 0, 175), // 55 Purple4
(95, 0, 215), // 56 Purple3
(95, 0, 255), // 57 BlueViolet
(95, 95, 0), // 58 Orange4
(95, 95, 95), // 59 Grey37
(95, 95, 135), // 60 MediumPurple4
(95, 95, 175), // 61 SlateBlue3
(95, 95, 215), // 62 SlateBlue3
(95, 95, 255), // 63 RoyalBlue1
(95, 135, 0), // 64 Chartreuse4
(95, 135, 95), // 65 DarkSeaGreen4
(95, 135, 135), // 66 PaleTurquoise4
(95, 135, 175), // 67 SteelBlue
(95, 135, 215), // 68 SteelBlue3
(95, 135, 255), // 69 CornflowerBlue
(95, 175, 0), // 70 Chartreuse3
(95, 175, 95), // 71 DarkSeaGreen4
(95, 175, 135), // 72 CadetBlue
(95, 175, 175), // 73 CadetBlue
(95, 175, 215), // 74 SkyBlue3
(95, 175, 255), // 75 SteelBlue1
(95, 215, 0), // 76 Chartreuse3
(95, 215, 95), // 77 PaleGreen3
(95, 215, 135), // 78 SeaGreen3
(95, 215, 175), // 79 Aquamarine3
(95, 215, 215), // 80 MediumTurquoise
(95, 215, 255), // 81 SteelBlue1
(95, 255, 0), // 82 Chartreuse2
(95, 255, 95), // 83 SeaGreen2
(95, 255, 135), // 84 SeaGreen1
(95, 255, 175), // 85 SeaGreen1
(95, 255, 215), // 86 Aquamarine1
(95, 255, 255), // 87 DarkSlateGray2
(135, 0, 0), // 88 DarkRed
(135, 0, 95), // 89 DeepPink4
(135, 0, 135), // 90 DarkMagenta
(135, 0, 175), // 91 DarkMagenta
(135, 0, 215), // 92 DarkViolet
(135, 0, 255), // 93 Purple
(135, 95, 0), // 94 Orange4
(135, 95, 95), // 95 LightPink4
(135, 95, 135), // 96 Plum4
(135, 95, 175), // 97 MediumPurple3
(135, 95, 215), // 98 MediumPurple3
(135, 95, 255), // 99 SlateBlue1
(135, 135, 0), // 100 Yellow4
(135, 135, 95), // 101 Wheat4
(135, 135, 135), // 102 Grey53
(135, 135, 175), // 103 LightSlateGrey
(135, 135, 215), // 104 MediumPurple
(135, 135, 255), // 105 LightSlateBlue
(135, 175, 0), // 106 Yellow4
(135, 175, 95), // 107 DarkOliveGreen3
(135, 175, 135), // 108 DarkSeaGreen
(135, 175, 175), // 109 LightSkyBlue3
(135, 175, 215), // 110 LightSkyBlue3
(135, 175, 255), // 111 SkyBlue2
(135, 215, 0), // 112 Chartreuse2
(135, 215, 95), // 113 DarkOliveGreen3
(135, 215, 135), // 114 PaleGreen3
(135, 215, 175), // 115 DarkSeaGreen3
(135, 215, 215), // 116 DarkSlateGray3
(135, 215, 255), // 117 SkyBlue1
(135, 255, 0), // 118 Chartreuse1
(135, 255, 95), // 119 LightGreen
(135, 255, 135), // 120 LightGreen
(135, 255, 175), // 121 PaleGreen1
(135, 255, 215), // 122 Aquamarine1
(135, 255, 255), // 123 DarkSlateGray1
(175, 0, 0), // 124 Red3
(175, 0, 95), // 125 DeepPink4
(175, 0, 135), // 126 MediumVioletRed
(175, 0, 175), // 127 Magenta3
(175, 0, 215), // 128 DarkViolet
(175, 0, 255), // 129 Purple
(175, 95, 0), // 130 DarkOrange3
(175, 95, 95), // 131 IndianRed
(175, 95, 135), // 132 HotPink3
(175, 95, 175), // 133 MediumOrchid3
(175, 95, 215), // 134 MediumOrchid
(175, 95, 255), // 135 MediumPurple2
(175, 135, 0), // 136 DarkGoldenrod
(175, 135, 95), // 137 LightSalmon3
(175, 135, 135), // 138 RosyBrown
(175, 135, 175), // 139 Grey63
(175, 135, 215), // 140 MediumPurple2
(175, 135, 255), // 141 MediumPurple1
(175, 175, 0), // 142 Gold3
(175, 175, 95), // 143 DarkKhaki
(175, 175, 135), // 144 NavajoWhite3
(175, 175, 175), // 145 Grey69
(175, 175, 215), // 146 LightSteelBlue3
(175, 175, 255), // 147 LightSteelBlue
(175, 215, 0), // 148 Yellow3
(175, 215, 95), // 149 DarkOliveGreen3
(175, 215, 135), // 150 DarkSeaGreen3
(175, 215, 175), // 151 DarkSeaGreen2
(175, 215, 215), // 152 LightCyan3
(175, 215, 255), // 153 LightSkyBlue1
(175, 255, 0), // 154 GreenYellow
(175, 255, 95), // 155 DarkOliveGreen2
(175, 255, 135), // 156 PaleGreen1
(175, 255, 175), // 157 DarkSeaGreen2
(175, 255, 215), // 158 DarkSeaGreen1
(175, 255, 255), // 159 PaleTurquoise1
(215, 0, 0), // 160 Red3
(215, 0, 95), // 161 DeepPink3
(215, 0, 135), // 162 DeepPink3
(215, 0, 175), // 163 Magenta3
(215, 0, 215), // 164 Magenta3
(215, 0, 255), // 165 Magenta2
(215, 95, 0), // 166 DarkOrange3
(215, 95, 95), // 167 IndianRed
(215, 95, 135), // 168 HotPink3
(215, 95, 175), // 169 HotPink2
(215, 95, 215), // 170 Orchid
(215, 95, 255), // 171 MediumOrchid1
(215, 135, 0), // 172 Orange3
(215, 135, 95), // 173 LightSalmon3
(215, 135, 135), // 174 LightPink3
(215, 135, 175), // 175 Pink3
(215, 135, 215), // 176 Plum3
(215, 135, 255), // 177 Violet
(215, 175, 0), // 178 Gold3
(215, 175, 95), // 179 LightGoldenrod3
(215, 175, 135), // 180 Tan
(215, 175, 175), // 181 MistyRose3
(215, 175, 215), // 182 Thistle3
(215, 175, 255), // 183 Plum2
(215, 215, 0), // 184 Yellow3
(215, 215, 95), // 185 Khaki3
(215, 215, 135), // 186 LightGoldenrod2
(215, 215, 175), // 187 LightYellow3
(215, 215, 215), // 188 Grey84
(215, 215, 255), // 189 LightSteelBlue1
(215, 255, 0), // 190 Yellow2
(215, 255, 95), // 191 DarkOliveGreen1
(215, 255, 135), // 192 DarkOliveGreen1
(215, 255, 175), // 193 DarkSeaGreen1
(215, 255, 215), // 194 Honeydew2
(215, 255, 255), // 195 LightCyan1
(255, 0, 0), // 196 Red1
(255, 0, 95), // 197 DeepPink2
(255, 0, 135), // 198 DeepPink1
(255, 0, 175), // 199 DeepPink1
(255, 0, 215), // 200 Magenta2
(255, 0, 255), // 201 Magenta1
(255, 95, 0), // 202 OrangeRed1
(255, 95, 95), // 203 IndianRed1
(255, 95, 135), // 204 IndianRed1
(255, 95, 175), // 205 HotPink
(255, 95, 215), // 206 HotPink
(255, 95, 255), // 207 MediumOrchid1
(255, 135, 0), // 208 DarkOrange
(255, 135, 95), // 209 Salmon1
(255, 135, 135), // 210 LightCoral
(255, 135, 175), // 211 PaleVioletRed1
(255, 135, 215), // 212 Orchid2
(255, 135, 255), // 213 Orchid1
(255, 175, 0), // 214 Orange1
(255, 175, 95), // 215 SandyBrown
(255, 175, 135), // 216 LightSalmon1
(255, 175, 175), // 217 LightPink1
(255, 175, 215), // 218 Pink1
(255, 175, 255), // 219 Plum1
(255, 215, 0), // 220 Gold1
(255, 215, 95), // 221 LightGoldenrod2
(255, 215, 135), // 222 LightGoldenrod2
(255, 215, 175), // 223 NavajoWhite1
(255, 215, 215), // 224 MistyRose1
(255, 215, 255), // 225 Thistle1
(255, 255, 0), // 226 Yellow1
(255, 255, 95), // 227 LightGoldenrod1
(255, 255, 135), // 228 Khaki1
(255, 255, 175), // 229 Wheat1
(255, 255, 215), // 230 Cornsilk1
(255, 255, 255), // 231 Grey100
(8, 8, 8), // 232 Grey3
(18, 18, 18), // 233 Grey7
(28, 28, 28), // 234 Grey11
(38, 38, 38), // 235 Grey15
(48, 48, 48), // 236 Grey19
(58, 58, 58), // 237 Grey23
(68, 68, 68), // 238 Grey27
(78, 78, 78), // 239 Grey30
(88, 88, 88), // 240 Grey35
(98, 98, 98), // 241 Grey39
(108, 108, 108), // 242 Grey42
(118, 118, 118), // 243 Grey46
(128, 128, 128), // 244 Grey50
(138, 138, 138), // 245 Grey54
(148, 148, 148), // 246 Grey58
(158, 158, 158), // 247 Grey62
(168, 168, 168), // 248 Grey66
(178, 178, 178), // 249 Grey70
(188, 188, 188), // 250 Grey74
(198, 198, 198), // 251 Grey78
(208, 208, 208), // 252 Grey82
(218, 218, 218), // 253 Grey85
(228, 228, 228), // 254 Grey89
(238, 238, 238), // 255 Grey93
];
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/live_wrap.rs | codex-rs/tui2/src/live_wrap.rs | use unicode_width::UnicodeWidthChar;
use unicode_width::UnicodeWidthStr;
/// A single visual row produced by RowBuilder.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Row {
pub text: String,
/// True if this row ends with an explicit line break (as opposed to a hard wrap).
pub explicit_break: bool,
}
impl Row {
pub fn width(&self) -> usize {
self.text.width()
}
}
/// Incrementally wraps input text into visual rows of at most `width` cells.
///
/// Step 1: plain-text only. ANSI-carry and styled spans will be added later.
pub struct RowBuilder {
target_width: usize,
/// Buffer for the current logical line (until a '\n' is seen).
current_line: String,
/// Output rows built so far for the current logical line and previous ones.
rows: Vec<Row>,
}
impl RowBuilder {
pub fn new(target_width: usize) -> Self {
Self {
target_width: target_width.max(1),
current_line: String::new(),
rows: Vec::new(),
}
}
pub fn width(&self) -> usize {
self.target_width
}
pub fn set_width(&mut self, width: usize) {
self.target_width = width.max(1);
// Rewrap everything we have (simple approach for Step 1).
let mut all = String::new();
for row in self.rows.drain(..) {
all.push_str(&row.text);
if row.explicit_break {
all.push('\n');
}
}
all.push_str(&self.current_line);
self.current_line.clear();
self.push_fragment(&all);
}
/// Push an input fragment. May contain newlines.
pub fn push_fragment(&mut self, fragment: &str) {
if fragment.is_empty() {
return;
}
let mut start = 0usize;
for (i, ch) in fragment.char_indices() {
if ch == '\n' {
// Flush anything pending before the newline.
if start < i {
self.current_line.push_str(&fragment[start..i]);
}
self.flush_current_line(true);
start = i + ch.len_utf8();
}
}
if start < fragment.len() {
self.current_line.push_str(&fragment[start..]);
self.wrap_current_line();
}
}
/// Mark the end of the current logical line (equivalent to pushing a '\n').
pub fn end_line(&mut self) {
self.flush_current_line(true);
}
/// Drain and return all produced rows.
pub fn drain_rows(&mut self) -> Vec<Row> {
std::mem::take(&mut self.rows)
}
/// Return a snapshot of produced rows (non-draining).
pub fn rows(&self) -> &[Row] {
&self.rows
}
/// Rows suitable for display, including the current partial line if any.
pub fn display_rows(&self) -> Vec<Row> {
let mut out = self.rows.clone();
if !self.current_line.is_empty() {
out.push(Row {
text: self.current_line.clone(),
explicit_break: false,
});
}
out
}
/// Drain the oldest rows that exceed `max_keep` display rows (including the
/// current partial line, if any). Returns the drained rows in order.
pub fn drain_commit_ready(&mut self, max_keep: usize) -> Vec<Row> {
let display_count = self.rows.len() + if self.current_line.is_empty() { 0 } else { 1 };
if display_count <= max_keep {
return Vec::new();
}
let to_commit = display_count - max_keep;
let commit_count = to_commit.min(self.rows.len());
let mut drained = Vec::with_capacity(commit_count);
for _ in 0..commit_count {
drained.push(self.rows.remove(0));
}
drained
}
fn flush_current_line(&mut self, explicit_break: bool) {
// Wrap any remaining content in the current line and then finalize with explicit_break.
self.wrap_current_line();
// If the current line ended exactly on a width boundary and is non-empty, represent
// the explicit break as an empty explicit row so that fragmentation invariance holds.
if explicit_break {
if self.current_line.is_empty() {
// We ended on a boundary previously; add an empty explicit row.
self.rows.push(Row {
text: String::new(),
explicit_break: true,
});
} else {
// There is leftover content that did not wrap yet; push it now with the explicit flag.
let mut s = String::new();
std::mem::swap(&mut s, &mut self.current_line);
self.rows.push(Row {
text: s,
explicit_break: true,
});
}
}
// Reset current line buffer for next logical line.
self.current_line.clear();
}
fn wrap_current_line(&mut self) {
// While the current_line exceeds width, cut a prefix.
loop {
if self.current_line.is_empty() {
break;
}
let (prefix, suffix, taken) =
take_prefix_by_width(&self.current_line, self.target_width);
if taken == 0 {
// Avoid infinite loop on pathological inputs; take one scalar and continue.
if let Some((i, ch)) = self.current_line.char_indices().next() {
let len = i + ch.len_utf8();
let p = self.current_line[..len].to_string();
self.rows.push(Row {
text: p,
explicit_break: false,
});
self.current_line = self.current_line[len..].to_string();
continue;
}
break;
}
if suffix.is_empty() {
// Fits entirely; keep in buffer (do not push yet) so we can append more later.
break;
} else {
// Emit wrapped prefix as a non-explicit row and continue with the remainder.
self.rows.push(Row {
text: prefix,
explicit_break: false,
});
self.current_line = suffix.to_string();
}
}
}
}
/// Take a prefix of `text` whose visible width is at most `max_cols`.
/// Returns (prefix, suffix, prefix_width).
pub fn take_prefix_by_width(text: &str, max_cols: usize) -> (String, &str, usize) {
if max_cols == 0 || text.is_empty() {
return (String::new(), text, 0);
}
let mut cols = 0usize;
let mut end_idx = 0usize;
for (i, ch) in text.char_indices() {
let ch_width = UnicodeWidthChar::width(ch).unwrap_or(0);
if cols.saturating_add(ch_width) > max_cols {
break;
}
cols += ch_width;
end_idx = i + ch.len_utf8();
if cols == max_cols {
break;
}
}
let prefix = text[..end_idx].to_string();
let suffix = &text[end_idx..];
(prefix, suffix, cols)
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn rows_do_not_exceed_width_ascii() {
let mut rb = RowBuilder::new(10);
rb.push_fragment("hello whirl this is a test");
let rows = rb.rows().to_vec();
assert_eq!(
rows,
vec![
Row {
text: "hello whir".to_string(),
explicit_break: false
},
Row {
text: "l this is ".to_string(),
explicit_break: false
}
]
);
}
#[test]
fn rows_do_not_exceed_width_emoji_cjk() {
// 😀 is width 2; 你/好 are width 2.
let mut rb = RowBuilder::new(6);
rb.push_fragment("😀😀 你好");
let rows = rb.rows().to_vec();
// At width 6, we expect the first row to fit exactly two emojis and a space
// (2 + 2 + 1 = 5) plus one more column for the first CJK char (2 would overflow),
// so only the two emojis and the space fit; the rest remains buffered.
assert_eq!(
rows,
vec![Row {
text: "😀😀 ".to_string(),
explicit_break: false
}]
);
}
#[test]
fn fragmentation_invariance_long_token() {
let s = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; // 26 chars
let mut rb_all = RowBuilder::new(7);
rb_all.push_fragment(s);
let all_rows = rb_all.rows().to_vec();
let mut rb_chunks = RowBuilder::new(7);
for i in (0..s.len()).step_by(3) {
let end = (i + 3).min(s.len());
rb_chunks.push_fragment(&s[i..end]);
}
let chunk_rows = rb_chunks.rows().to_vec();
assert_eq!(all_rows, chunk_rows);
}
#[test]
fn newline_splits_rows() {
let mut rb = RowBuilder::new(10);
rb.push_fragment("hello\nworld");
let rows = rb.display_rows();
assert!(rows.iter().any(|r| r.explicit_break));
assert_eq!(rows[0].text, "hello");
// Second row should begin with 'world'
assert!(rows.iter().any(|r| r.text.starts_with("world")));
}
#[test]
fn rewrap_on_width_change() {
let mut rb = RowBuilder::new(10);
rb.push_fragment("abcdefghijK");
assert!(!rb.rows().is_empty());
rb.set_width(5);
for r in rb.rows() {
assert!(r.width() <= 5);
}
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/file_search.rs | codex-rs/tui2/src/file_search.rs | //! Helper that owns the debounce/cancellation logic for `@` file searches.
//!
//! `ChatComposer` publishes *every* change of the `@token` as
//! `AppEvent::StartFileSearch(query)`.
//! This struct receives those events and decides when to actually spawn the
//! expensive search (handled in the main `App` thread). It tries to ensure:
//!
//! - Even when the user types long text quickly, they will start seeing results
//! after a short delay using an early version of what they typed.
//! - At most one search is in-flight at any time.
//!
//! It works as follows:
//!
//! 1. First query starts a debounce timer.
//! 2. While the timer is pending, the latest query from the user is stored.
//! 3. When the timer fires, it is cleared, and a search is done for the most
//! recent query.
//! 4. If there is a in-flight search that is not a prefix of the latest thing
//! the user typed, it is cancelled.
use codex_file_search as file_search;
use std::num::NonZeroUsize;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
use crate::app_event::AppEvent;
use crate::app_event_sender::AppEventSender;
const MAX_FILE_SEARCH_RESULTS: NonZeroUsize = NonZeroUsize::new(20).unwrap();
const NUM_FILE_SEARCH_THREADS: NonZeroUsize = NonZeroUsize::new(2).unwrap();
/// How long to wait after a keystroke before firing the first search when none
/// is currently running. Keeps early queries more meaningful.
const FILE_SEARCH_DEBOUNCE: Duration = Duration::from_millis(100);
const ACTIVE_SEARCH_COMPLETE_POLL_INTERVAL: Duration = Duration::from_millis(20);
/// State machine for file-search orchestration.
pub(crate) struct FileSearchManager {
/// Unified state guarded by one mutex.
state: Arc<Mutex<SearchState>>,
search_dir: PathBuf,
app_tx: AppEventSender,
}
struct SearchState {
/// Latest query typed by user (updated every keystroke).
latest_query: String,
/// true if a search is currently scheduled.
is_search_scheduled: bool,
/// If there is an active search, this will be the query being searched.
active_search: Option<ActiveSearch>,
}
struct ActiveSearch {
query: String,
cancellation_token: Arc<AtomicBool>,
}
impl FileSearchManager {
pub fn new(search_dir: PathBuf, tx: AppEventSender) -> Self {
Self {
state: Arc::new(Mutex::new(SearchState {
latest_query: String::new(),
is_search_scheduled: false,
active_search: None,
})),
search_dir,
app_tx: tx,
}
}
/// Call whenever the user edits the `@` token.
pub fn on_user_query(&self, query: String) {
{
#[expect(clippy::unwrap_used)]
let mut st = self.state.lock().unwrap();
if query == st.latest_query {
// No change, nothing to do.
return;
}
// Update latest query.
st.latest_query.clear();
st.latest_query.push_str(&query);
// If there is an in-flight search that is definitely obsolete,
// cancel it now.
if let Some(active_search) = &st.active_search
&& !query.starts_with(&active_search.query)
{
active_search
.cancellation_token
.store(true, Ordering::Relaxed);
st.active_search = None;
}
// Schedule a search to run after debounce.
if !st.is_search_scheduled {
st.is_search_scheduled = true;
} else {
return;
}
}
// If we are here, we set `st.is_search_scheduled = true` before
// dropping the lock. This means we are the only thread that can spawn a
// debounce timer.
let state = self.state.clone();
let search_dir = self.search_dir.clone();
let tx_clone = self.app_tx.clone();
thread::spawn(move || {
// Always do a minimum debounce, but then poll until the
// `active_search` is cleared.
thread::sleep(FILE_SEARCH_DEBOUNCE);
loop {
#[expect(clippy::unwrap_used)]
if state.lock().unwrap().active_search.is_none() {
break;
}
thread::sleep(ACTIVE_SEARCH_COMPLETE_POLL_INTERVAL);
}
// The debounce timer has expired, so start a search using the
// latest query.
let cancellation_token = Arc::new(AtomicBool::new(false));
let token = cancellation_token.clone();
let query = {
#[expect(clippy::unwrap_used)]
let mut st = state.lock().unwrap();
let query = st.latest_query.clone();
st.is_search_scheduled = false;
st.active_search = Some(ActiveSearch {
query: query.clone(),
cancellation_token: token,
});
query
};
FileSearchManager::spawn_file_search(
query,
search_dir,
tx_clone,
cancellation_token,
state,
);
});
}
fn spawn_file_search(
query: String,
search_dir: PathBuf,
tx: AppEventSender,
cancellation_token: Arc<AtomicBool>,
search_state: Arc<Mutex<SearchState>>,
) {
let compute_indices = true;
std::thread::spawn(move || {
let matches = file_search::run(
&query,
MAX_FILE_SEARCH_RESULTS,
&search_dir,
Vec::new(),
NUM_FILE_SEARCH_THREADS,
cancellation_token.clone(),
compute_indices,
true,
)
.map(|res| res.matches)
.unwrap_or_default();
let is_cancelled = cancellation_token.load(Ordering::Relaxed);
if !is_cancelled {
tx.send(AppEvent::FileSearchResult { query, matches });
}
// Reset the active search state. Do a pointer comparison to verify
// that we are clearing the ActiveSearch that corresponds to the
// cancellation token we were given.
{
#[expect(clippy::unwrap_used)]
let mut st = search_state.lock().unwrap();
if let Some(active_search) = &st.active_search
&& Arc::ptr_eq(&active_search.cancellation_token, &cancellation_token)
{
st.active_search = None;
}
}
});
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/markdown.rs | codex-rs/tui2/src/markdown.rs | use ratatui::text::Line;
pub(crate) fn append_markdown(
markdown_source: &str,
width: Option<usize>,
lines: &mut Vec<Line<'static>>,
) {
let rendered = crate::markdown_render::render_markdown_text_with_width(markdown_source, width);
crate::render::line_utils::push_owned_lines(&rendered.lines, lines);
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use ratatui::text::Line;
fn lines_to_strings(lines: &[Line<'static>]) -> Vec<String> {
lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect()
}
#[test]
fn citations_render_as_plain_text() {
let src = "Before 【F:/x.rs†L1】\nAfter 【F:/x.rs†L3】\n";
let mut out = Vec::new();
append_markdown(src, None, &mut out);
let rendered = lines_to_strings(&out);
assert_eq!(
rendered,
vec![
"Before 【F:/x.rs†L1】".to_string(),
"After 【F:/x.rs†L3】".to_string()
]
);
}
#[test]
fn indented_code_blocks_preserve_leading_whitespace() {
// Basic sanity: indented code with surrounding blank lines should produce the indented line.
let src = "Before\n\n code 1\n\nAfter\n";
let mut out = Vec::new();
append_markdown(src, None, &mut out);
let lines = lines_to_strings(&out);
assert_eq!(lines, vec!["Before", "", " code 1", "", "After"]);
}
#[test]
fn append_markdown_preserves_full_text_line() {
let src = "Hi! How can I help with codex-rs today? Want me to explore the repo, run tests, or work on a specific change?\n";
let mut out = Vec::new();
append_markdown(src, None, &mut out);
assert_eq!(
out.len(),
1,
"expected a single rendered line for plain text"
);
let rendered: String = out
.iter()
.flat_map(|l| l.spans.iter())
.map(|s| s.content.clone())
.collect::<Vec<_>>()
.join("");
assert_eq!(
rendered,
"Hi! How can I help with codex-rs today? Want me to explore the repo, run tests, or work on a specific change?"
);
}
#[test]
fn append_markdown_matches_tui_markdown_for_ordered_item() {
let mut out = Vec::new();
append_markdown("1. Tight item\n", None, &mut out);
let lines = lines_to_strings(&out);
assert_eq!(lines, vec!["1. Tight item".to_string()]);
}
#[test]
fn append_markdown_keeps_ordered_list_line_unsplit_in_context() {
let src = "Loose vs. tight list items:\n1. Tight item\n";
let mut out = Vec::new();
append_markdown(src, None, &mut out);
let lines = lines_to_strings(&out);
// Expect to find the ordered list line rendered as a single line,
// not split into a marker-only line followed by the text.
assert!(
lines.iter().any(|s| s == "1. Tight item"),
"expected '1. Tight item' rendered as a single line; got: {lines:?}"
);
assert!(
!lines
.windows(2)
.any(|w| w[0].trim_end() == "1." && w[1] == "Tight item"),
"did not expect a split into ['1.', 'Tight item']; got: {lines:?}"
);
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/ui_consts.rs | codex-rs/tui2/src/ui_consts.rs | //! Shared UI constants for layout and alignment within the TUI.
/// Width (in terminal columns) reserved for the left gutter/prefix used by
/// live cells and aligned widgets.
///
/// Semantics:
/// - Chat composer reserves this many columns for the left border + padding.
/// - Status indicator lines begin with this many spaces for alignment.
/// - User history lines account for this many columns (e.g., "▌ ") when wrapping.
pub(crate) const LIVE_PREFIX_COLS: u16 = 2;
pub(crate) const FOOTER_INDENT_COLS: usize = LIVE_PREFIX_COLS as usize;
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/app_backtrack.rs | codex-rs/tui2/src/app_backtrack.rs | use std::any::TypeId;
use std::path::PathBuf;
use std::sync::Arc;
use crate::app::App;
use crate::history_cell::SessionInfoCell;
use crate::history_cell::UserHistoryCell;
use crate::pager_overlay::Overlay;
use crate::tui;
use crate::tui::TuiEvent;
use codex_core::protocol::ConversationPathResponseEvent;
use codex_protocol::ConversationId;
use color_eyre::eyre::Result;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use crossterm::event::KeyEventKind;
/// Aggregates all backtrack-related state used by the App.
#[derive(Default)]
pub(crate) struct BacktrackState {
/// True when Esc has primed backtrack mode in the main view.
pub(crate) primed: bool,
/// Session id of the base conversation to fork from.
pub(crate) base_id: Option<ConversationId>,
/// Index in the transcript of the last user message.
pub(crate) nth_user_message: usize,
/// True when the transcript overlay is showing a backtrack preview.
pub(crate) overlay_preview_active: bool,
/// Pending fork request: (base_id, nth_user_message, prefill).
pub(crate) pending: Option<(ConversationId, usize, String)>,
}
impl App {
/// Route overlay events when transcript overlay is active.
/// - If backtrack preview is active: Esc steps selection; Enter confirms.
/// - Otherwise: Esc begins preview; all other events forward to overlay.
/// interactions (Esc to step target, Enter to confirm) and overlay lifecycle.
pub(crate) async fn handle_backtrack_overlay_event(
&mut self,
tui: &mut tui::Tui,
event: TuiEvent,
) -> Result<bool> {
if self.backtrack.overlay_preview_active {
match event {
TuiEvent::Key(KeyEvent {
code: KeyCode::Esc,
kind: KeyEventKind::Press | KeyEventKind::Repeat,
..
}) => {
self.overlay_step_backtrack(tui, event)?;
Ok(true)
}
TuiEvent::Key(KeyEvent {
code: KeyCode::Enter,
kind: KeyEventKind::Press,
..
}) => {
self.overlay_confirm_backtrack(tui);
Ok(true)
}
// Catchall: forward any other events to the overlay widget.
_ => {
self.overlay_forward_event(tui, event)?;
Ok(true)
}
}
} else if let TuiEvent::Key(KeyEvent {
code: KeyCode::Esc,
kind: KeyEventKind::Press | KeyEventKind::Repeat,
..
}) = event
{
// First Esc in transcript overlay: begin backtrack preview at latest user message.
self.begin_overlay_backtrack_preview(tui);
Ok(true)
} else {
// Not in backtrack mode: forward events to the overlay widget.
self.overlay_forward_event(tui, event)?;
Ok(true)
}
}
/// Handle global Esc presses for backtracking when no overlay is present.
pub(crate) fn handle_backtrack_esc_key(&mut self, tui: &mut tui::Tui) {
if !self.chat_widget.composer_is_empty() {
return;
}
if !self.backtrack.primed {
self.prime_backtrack();
} else if self.overlay.is_none() {
self.open_backtrack_preview(tui);
} else if self.backtrack.overlay_preview_active {
self.step_backtrack_and_highlight(tui);
}
}
/// Stage a backtrack and request conversation history from the agent.
pub(crate) fn request_backtrack(
&mut self,
prefill: String,
base_id: ConversationId,
nth_user_message: usize,
) {
self.backtrack.pending = Some((base_id, nth_user_message, prefill));
if let Some(path) = self.chat_widget.rollout_path() {
let ev = ConversationPathResponseEvent {
conversation_id: base_id,
path,
};
self.app_event_tx
.send(crate::app_event::AppEvent::ConversationHistory(ev));
} else {
tracing::error!("rollout path unavailable; cannot backtrack");
}
}
/// Open transcript overlay (enters alternate screen and shows full transcript).
pub(crate) fn open_transcript_overlay(&mut self, tui: &mut tui::Tui) {
let _ = tui.enter_alt_screen();
self.overlay = Some(Overlay::new_transcript(self.transcript_cells.clone()));
tui.frame_requester().schedule_frame();
}
/// Close transcript overlay and restore normal UI.
pub(crate) fn close_transcript_overlay(&mut self, tui: &mut tui::Tui) {
let _ = tui.leave_alt_screen();
let was_backtrack = self.backtrack.overlay_preview_active;
if !self.deferred_history_lines.is_empty() {
let lines = std::mem::take(&mut self.deferred_history_lines);
tui.insert_history_lines(lines);
}
self.overlay = None;
self.backtrack.overlay_preview_active = false;
if was_backtrack {
// Ensure backtrack state is fully reset when overlay closes (e.g. via 'q').
self.reset_backtrack_state();
}
}
/// Re-render the full transcript into the terminal scrollback in one call.
/// Useful when switching sessions to ensure prior history remains visible.
pub(crate) fn render_transcript_once(&mut self, tui: &mut tui::Tui) {
if !self.transcript_cells.is_empty() {
let width = tui.terminal.last_known_screen_size.width;
for cell in &self.transcript_cells {
tui.insert_history_lines(cell.display_lines(width));
}
}
}
/// Initialize backtrack state and show composer hint.
fn prime_backtrack(&mut self) {
self.backtrack.primed = true;
self.backtrack.nth_user_message = usize::MAX;
self.backtrack.base_id = self.chat_widget.conversation_id();
self.chat_widget.show_esc_backtrack_hint();
}
/// Open overlay and begin backtrack preview flow (first step + highlight).
fn open_backtrack_preview(&mut self, tui: &mut tui::Tui) {
self.open_transcript_overlay(tui);
self.backtrack.overlay_preview_active = true;
// Composer is hidden by overlay; clear its hint.
self.chat_widget.clear_esc_backtrack_hint();
self.step_backtrack_and_highlight(tui);
}
/// When overlay is already open, begin preview mode and select latest user message.
fn begin_overlay_backtrack_preview(&mut self, tui: &mut tui::Tui) {
self.backtrack.primed = true;
self.backtrack.base_id = self.chat_widget.conversation_id();
self.backtrack.overlay_preview_active = true;
let count = user_count(&self.transcript_cells);
if let Some(last) = count.checked_sub(1) {
self.apply_backtrack_selection(last);
}
tui.frame_requester().schedule_frame();
}
/// Step selection to the next older user message and update overlay.
fn step_backtrack_and_highlight(&mut self, tui: &mut tui::Tui) {
let count = user_count(&self.transcript_cells);
if count == 0 {
return;
}
let last_index = count.saturating_sub(1);
let next_selection = if self.backtrack.nth_user_message == usize::MAX {
last_index
} else if self.backtrack.nth_user_message == 0 {
0
} else {
self.backtrack
.nth_user_message
.saturating_sub(1)
.min(last_index)
};
self.apply_backtrack_selection(next_selection);
tui.frame_requester().schedule_frame();
}
/// Apply a computed backtrack selection to the overlay and internal counter.
fn apply_backtrack_selection(&mut self, nth_user_message: usize) {
if let Some(cell_idx) = nth_user_position(&self.transcript_cells, nth_user_message) {
self.backtrack.nth_user_message = nth_user_message;
if let Some(Overlay::Transcript(t)) = &mut self.overlay {
t.set_highlight_cell(Some(cell_idx));
}
} else {
self.backtrack.nth_user_message = usize::MAX;
if let Some(Overlay::Transcript(t)) = &mut self.overlay {
t.set_highlight_cell(None);
}
}
}
/// Forward any event to the overlay and close it if done.
fn overlay_forward_event(&mut self, tui: &mut tui::Tui, event: TuiEvent) -> Result<()> {
if let Some(overlay) = &mut self.overlay {
overlay.handle_event(tui, event)?;
if overlay.is_done() {
self.close_transcript_overlay(tui);
tui.frame_requester().schedule_frame();
}
}
Ok(())
}
/// Handle Enter in overlay backtrack preview: confirm selection and reset state.
fn overlay_confirm_backtrack(&mut self, tui: &mut tui::Tui) {
let nth_user_message = self.backtrack.nth_user_message;
if let Some(base_id) = self.backtrack.base_id {
let prefill = nth_user_position(&self.transcript_cells, nth_user_message)
.and_then(|idx| self.transcript_cells.get(idx))
.and_then(|cell| cell.as_any().downcast_ref::<UserHistoryCell>())
.map(|c| c.message.clone())
.unwrap_or_default();
self.close_transcript_overlay(tui);
self.request_backtrack(prefill, base_id, nth_user_message);
}
self.reset_backtrack_state();
}
/// Handle Esc in overlay backtrack preview: step selection if armed, else forward.
fn overlay_step_backtrack(&mut self, tui: &mut tui::Tui, event: TuiEvent) -> Result<()> {
if self.backtrack.base_id.is_some() {
self.step_backtrack_and_highlight(tui);
} else {
self.overlay_forward_event(tui, event)?;
}
Ok(())
}
/// Confirm a primed backtrack from the main view (no overlay visible).
/// Computes the prefill from the selected user message and requests history.
pub(crate) fn confirm_backtrack_from_main(&mut self) {
if let Some(base_id) = self.backtrack.base_id {
let prefill =
nth_user_position(&self.transcript_cells, self.backtrack.nth_user_message)
.and_then(|idx| self.transcript_cells.get(idx))
.and_then(|cell| cell.as_any().downcast_ref::<UserHistoryCell>())
.map(|c| c.message.clone())
.unwrap_or_default();
self.request_backtrack(prefill, base_id, self.backtrack.nth_user_message);
}
self.reset_backtrack_state();
}
/// Clear all backtrack-related state and composer hints.
pub(crate) fn reset_backtrack_state(&mut self) {
self.backtrack.primed = false;
self.backtrack.base_id = None;
self.backtrack.nth_user_message = usize::MAX;
// In case a hint is somehow still visible (e.g., race with overlay open/close).
self.chat_widget.clear_esc_backtrack_hint();
}
/// Handle a ConversationHistory response while a backtrack is pending.
/// If it matches the primed base session, fork and switch to the new conversation.
pub(crate) async fn on_conversation_history_for_backtrack(
&mut self,
tui: &mut tui::Tui,
ev: ConversationPathResponseEvent,
) -> Result<()> {
if let Some((base_id, _, _)) = self.backtrack.pending.as_ref()
&& ev.conversation_id == *base_id
&& let Some((_, nth_user_message, prefill)) = self.backtrack.pending.take()
{
self.fork_and_switch_to_new_conversation(tui, ev, nth_user_message, prefill)
.await;
}
Ok(())
}
/// Fork the conversation using provided history and switch UI/state accordingly.
async fn fork_and_switch_to_new_conversation(
&mut self,
tui: &mut tui::Tui,
ev: ConversationPathResponseEvent,
nth_user_message: usize,
prefill: String,
) {
let cfg = self.chat_widget.config_ref().clone();
// Perform the fork via a thin wrapper for clarity/testability.
let result = self
.perform_fork(ev.path.clone(), nth_user_message, cfg.clone())
.await;
match result {
Ok(new_conv) => {
self.install_forked_conversation(tui, cfg, new_conv, nth_user_message, &prefill)
}
Err(e) => tracing::error!("error forking conversation: {e:#}"),
}
}
/// Thin wrapper around ConversationManager::fork_conversation.
async fn perform_fork(
&self,
path: PathBuf,
nth_user_message: usize,
cfg: codex_core::config::Config,
) -> codex_core::error::Result<codex_core::NewConversation> {
self.server
.fork_conversation(nth_user_message, cfg, path)
.await
}
/// Install a forked conversation into the ChatWidget and update UI to reflect selection.
fn install_forked_conversation(
&mut self,
tui: &mut tui::Tui,
cfg: codex_core::config::Config,
new_conv: codex_core::NewConversation,
nth_user_message: usize,
prefill: &str,
) {
let conv = new_conv.conversation;
let session_configured = new_conv.session_configured;
let init = crate::chatwidget::ChatWidgetInit {
config: cfg,
model: self.current_model.clone(),
frame_requester: tui.frame_requester(),
app_event_tx: self.app_event_tx.clone(),
initial_prompt: None,
initial_images: Vec::new(),
enhanced_keys_supported: self.enhanced_keys_supported,
auth_manager: self.auth_manager.clone(),
models_manager: self.server.get_models_manager(),
feedback: self.feedback.clone(),
is_first_run: false,
};
self.chat_widget =
crate::chatwidget::ChatWidget::new_from_existing(init, conv, session_configured);
// Trim transcript up to the selected user message and re-render it.
self.trim_transcript_for_backtrack(nth_user_message);
self.render_transcript_once(tui);
if !prefill.is_empty() {
self.chat_widget.set_composer_text(prefill.to_string());
}
tui.frame_requester().schedule_frame();
}
/// Trim transcript_cells to preserve only content up to the selected user message.
fn trim_transcript_for_backtrack(&mut self, nth_user_message: usize) {
trim_transcript_cells_to_nth_user(&mut self.transcript_cells, nth_user_message);
}
}
fn trim_transcript_cells_to_nth_user(
transcript_cells: &mut Vec<Arc<dyn crate::history_cell::HistoryCell>>,
nth_user_message: usize,
) {
if nth_user_message == usize::MAX {
return;
}
if let Some(cut_idx) = nth_user_position(transcript_cells, nth_user_message) {
transcript_cells.truncate(cut_idx);
}
}
pub(crate) fn user_count(cells: &[Arc<dyn crate::history_cell::HistoryCell>]) -> usize {
user_positions_iter(cells).count()
}
fn nth_user_position(
cells: &[Arc<dyn crate::history_cell::HistoryCell>],
nth: usize,
) -> Option<usize> {
user_positions_iter(cells)
.enumerate()
.find_map(|(i, idx)| (i == nth).then_some(idx))
}
fn user_positions_iter(
cells: &[Arc<dyn crate::history_cell::HistoryCell>],
) -> impl Iterator<Item = usize> + '_ {
let session_start_type = TypeId::of::<SessionInfoCell>();
let user_type = TypeId::of::<UserHistoryCell>();
let type_of = |cell: &Arc<dyn crate::history_cell::HistoryCell>| cell.as_any().type_id();
let start = cells
.iter()
.rposition(|cell| type_of(cell) == session_start_type)
.map_or(0, |idx| idx + 1);
cells
.iter()
.enumerate()
.skip(start)
.filter_map(move |(idx, cell)| (type_of(cell) == user_type).then_some(idx))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::history_cell::AgentMessageCell;
use crate::history_cell::HistoryCell;
use ratatui::prelude::Line;
use std::sync::Arc;
#[test]
fn trim_transcript_for_first_user_drops_user_and_newer_cells() {
let mut cells: Vec<Arc<dyn HistoryCell>> = vec![
Arc::new(UserHistoryCell {
message: "first user".to_string(),
}) as Arc<dyn HistoryCell>,
Arc::new(AgentMessageCell::new(vec![Line::from("assistant")], true))
as Arc<dyn HistoryCell>,
];
trim_transcript_cells_to_nth_user(&mut cells, 0);
assert!(cells.is_empty());
}
#[test]
fn trim_transcript_preserves_cells_before_selected_user() {
let mut cells: Vec<Arc<dyn HistoryCell>> = vec![
Arc::new(AgentMessageCell::new(vec![Line::from("intro")], true))
as Arc<dyn HistoryCell>,
Arc::new(UserHistoryCell {
message: "first".to_string(),
}) as Arc<dyn HistoryCell>,
Arc::new(AgentMessageCell::new(vec![Line::from("after")], false))
as Arc<dyn HistoryCell>,
];
trim_transcript_cells_to_nth_user(&mut cells, 0);
assert_eq!(cells.len(), 1);
let agent = cells[0]
.as_any()
.downcast_ref::<AgentMessageCell>()
.expect("agent cell");
let agent_lines = agent.display_lines(u16::MAX);
assert_eq!(agent_lines.len(), 1);
let intro_text: String = agent_lines[0]
.spans
.iter()
.map(|span| span.content.as_ref())
.collect();
assert_eq!(intro_text, "• intro");
}
#[test]
fn trim_transcript_for_later_user_keeps_prior_history() {
let mut cells: Vec<Arc<dyn HistoryCell>> = vec![
Arc::new(AgentMessageCell::new(vec![Line::from("intro")], true))
as Arc<dyn HistoryCell>,
Arc::new(UserHistoryCell {
message: "first".to_string(),
}) as Arc<dyn HistoryCell>,
Arc::new(AgentMessageCell::new(vec![Line::from("between")], false))
as Arc<dyn HistoryCell>,
Arc::new(UserHistoryCell {
message: "second".to_string(),
}) as Arc<dyn HistoryCell>,
Arc::new(AgentMessageCell::new(vec![Line::from("tail")], false))
as Arc<dyn HistoryCell>,
];
trim_transcript_cells_to_nth_user(&mut cells, 1);
assert_eq!(cells.len(), 3);
let agent_intro = cells[0]
.as_any()
.downcast_ref::<AgentMessageCell>()
.expect("intro agent");
let intro_lines = agent_intro.display_lines(u16::MAX);
let intro_text: String = intro_lines[0]
.spans
.iter()
.map(|span| span.content.as_ref())
.collect();
assert_eq!(intro_text, "• intro");
let user_first = cells[1]
.as_any()
.downcast_ref::<UserHistoryCell>()
.expect("first user");
assert_eq!(user_first.message, "first");
let agent_between = cells[2]
.as_any()
.downcast_ref::<AgentMessageCell>()
.expect("between agent");
let between_lines = agent_between.display_lines(u16::MAX);
let between_text: String = between_lines[0]
.spans
.iter()
.map(|span| span.content.as_ref())
.collect();
assert_eq!(between_text, " between");
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/insert_history.rs | codex-rs/tui2/src/insert_history.rs | //! Render `ratatui` transcript lines into terminal scrollback.
//!
//! `insert_history_lines` is responsible for inserting rendered transcript lines
//! *above* the TUI viewport by emitting ANSI control sequences through the
//! terminal backend writer.
//!
//! ## Why we use crossterm style commands
//!
//! `write_spans` is also used by non-terminal callers (e.g.
//! `transcript_render::render_lines_to_ansi`) to produce deterministic ANSI
//! output for tests and "print after exit" flows. That means the implementation
//! must work with any `impl Write` (including an in-memory `Vec<u8>`) and must
//! preserve `ratatui::style::Color` semantics, including `Rgb(...)` and
//! `Indexed(...)`.
//!
//! Crossterm's style commands implement `Command` (including ANSI emission), so
//! `write_spans` can remain backend-independent while still producing ANSI
//! output that matches the terminal-rendered transcript.
use std::fmt;
use std::io;
use std::io::Write;
use crate::wrapping::word_wrap_lines_borrowed;
use crossterm::Command;
use crossterm::cursor::MoveTo;
use crossterm::queue;
use crossterm::style::Color as CColor;
use crossterm::style::Colors;
use crossterm::style::Print;
use crossterm::style::SetAttribute;
use crossterm::style::SetColors;
use crossterm::terminal::Clear;
use crossterm::terminal::ClearType;
use ratatui::layout::Size;
use ratatui::prelude::Backend;
use ratatui::style::Modifier;
use ratatui::text::Line;
use ratatui::text::Span;
/// Insert `lines` above the viewport using the terminal's backend writer
/// (avoids direct stdout references).
pub fn insert_history_lines<B>(
terminal: &mut crate::custom_terminal::Terminal<B>,
lines: Vec<Line>,
) -> io::Result<()>
where
B: Backend + Write,
{
let screen_size = terminal.backend().size().unwrap_or(Size::new(0, 0));
let mut area = terminal.viewport_area;
let mut should_update_area = false;
let last_cursor_pos = terminal.last_known_cursor_pos;
let writer = terminal.backend_mut();
// Pre-wrap lines using word-aware wrapping so terminal scrollback sees the same
// formatting as the TUI. This avoids character-level hard wrapping by the terminal.
let wrapped = word_wrap_lines_borrowed(&lines, area.width.max(1) as usize);
let wrapped_lines = wrapped.len() as u16;
let cursor_top = if area.bottom() < screen_size.height {
// If the viewport is not at the bottom of the screen, scroll it down to make room.
// Don't scroll it past the bottom of the screen.
let scroll_amount = wrapped_lines.min(screen_size.height - area.bottom());
// Emit ANSI to scroll the lower region (from the top of the viewport to the bottom
// of the screen) downward by `scroll_amount` lines. We do this by:
// 1) Limiting the scroll region to [area.top()+1 .. screen_height] (1-based bounds)
// 2) Placing the cursor at the top margin of that region
// 3) Emitting Reverse Index (RI, ESC M) `scroll_amount` times
// 4) Resetting the scroll region back to full screen
let top_1based = area.top() + 1; // Convert 0-based row to 1-based for DECSTBM
queue!(writer, SetScrollRegion(top_1based..screen_size.height))?;
queue!(writer, MoveTo(0, area.top()))?;
for _ in 0..scroll_amount {
// Reverse Index (RI): ESC M
queue!(writer, Print("\x1bM"))?;
}
queue!(writer, ResetScrollRegion)?;
let cursor_top = area.top().saturating_sub(1);
area.y += scroll_amount;
should_update_area = true;
cursor_top
} else {
area.top().saturating_sub(1)
};
// Limit the scroll region to the lines from the top of the screen to the
// top of the viewport. With this in place, when we add lines inside this
// area, only the lines in this area will be scrolled. We place the cursor
// at the end of the scroll region, and add lines starting there.
//
// ┌─Screen───────────────────────┐
// │┌╌Scroll region╌╌╌╌╌╌╌╌╌╌╌╌╌╌┐│
// │┆ ┆│
// │┆ ┆│
// │┆ ┆│
// │█╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┘│
// │╭─Viewport───────────────────╮│
// ││ ││
// │╰────────────────────────────╯│
// └──────────────────────────────┘
queue!(writer, SetScrollRegion(1..area.top()))?;
// NB: we are using MoveTo instead of set_cursor_position here to avoid messing with the
// terminal's last_known_cursor_position, which hopefully will still be accurate after we
// fetch/restore the cursor position. insert_history_lines should be cursor-position-neutral :)
queue!(writer, MoveTo(0, cursor_top))?;
for line in wrapped {
queue!(writer, Print("\r\n"))?;
queue!(
writer,
SetColors(Colors::new(
line.style.fg.map(Into::into).unwrap_or(CColor::Reset),
line.style.bg.map(Into::into).unwrap_or(CColor::Reset),
))
)?;
queue!(writer, Clear(ClearType::UntilNewLine))?;
// Merge line-level style into each span so that ANSI colors reflect
// line styles (e.g., blockquotes with green fg).
let merged_spans: Vec<Span> = line
.spans
.iter()
.map(|s| Span {
style: s.style.patch(line.style),
content: s.content.clone(),
})
.collect();
write_spans(writer, merged_spans.iter())?;
}
queue!(writer, ResetScrollRegion)?;
// Restore the cursor position to where it was before we started.
queue!(writer, MoveTo(last_cursor_pos.x, last_cursor_pos.y))?;
let _ = writer;
if should_update_area {
terminal.set_viewport_area(area);
}
Ok(())
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct SetScrollRegion(pub std::ops::Range<u16>);
impl Command for SetScrollRegion {
fn write_ansi(&self, f: &mut impl fmt::Write) -> fmt::Result {
write!(f, "\x1b[{};{}r", self.0.start, self.0.end)
}
#[cfg(windows)]
fn execute_winapi(&self) -> std::io::Result<()> {
panic!("tried to execute SetScrollRegion command using WinAPI, use ANSI instead");
}
#[cfg(windows)]
fn is_ansi_code_supported(&self) -> bool {
// TODO(nornagon): is this supported on Windows?
true
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ResetScrollRegion;
impl Command for ResetScrollRegion {
fn write_ansi(&self, f: &mut impl fmt::Write) -> fmt::Result {
write!(f, "\x1b[r")
}
#[cfg(windows)]
fn execute_winapi(&self) -> std::io::Result<()> {
panic!("tried to execute ResetScrollRegion command using WinAPI, use ANSI instead");
}
#[cfg(windows)]
fn is_ansi_code_supported(&self) -> bool {
// TODO(nornagon): is this supported on Windows?
true
}
}
struct ModifierDiff {
pub from: Modifier,
pub to: Modifier,
}
impl ModifierDiff {
fn queue<W>(self, mut w: W) -> io::Result<()>
where
W: io::Write,
{
use crossterm::style::Attribute as CAttribute;
let removed = self.from - self.to;
if removed.contains(Modifier::REVERSED) {
queue!(w, SetAttribute(CAttribute::NoReverse))?;
}
if removed.contains(Modifier::BOLD) {
queue!(w, SetAttribute(CAttribute::NormalIntensity))?;
if self.to.contains(Modifier::DIM) {
queue!(w, SetAttribute(CAttribute::Dim))?;
}
}
if removed.contains(Modifier::ITALIC) {
queue!(w, SetAttribute(CAttribute::NoItalic))?;
}
if removed.contains(Modifier::UNDERLINED) {
queue!(w, SetAttribute(CAttribute::NoUnderline))?;
}
if removed.contains(Modifier::DIM) {
queue!(w, SetAttribute(CAttribute::NormalIntensity))?;
}
if removed.contains(Modifier::CROSSED_OUT) {
queue!(w, SetAttribute(CAttribute::NotCrossedOut))?;
}
if removed.contains(Modifier::SLOW_BLINK) || removed.contains(Modifier::RAPID_BLINK) {
queue!(w, SetAttribute(CAttribute::NoBlink))?;
}
let added = self.to - self.from;
if added.contains(Modifier::REVERSED) {
queue!(w, SetAttribute(CAttribute::Reverse))?;
}
if added.contains(Modifier::BOLD) {
queue!(w, SetAttribute(CAttribute::Bold))?;
}
if added.contains(Modifier::ITALIC) {
queue!(w, SetAttribute(CAttribute::Italic))?;
}
if added.contains(Modifier::UNDERLINED) {
queue!(w, SetAttribute(CAttribute::Underlined))?;
}
if added.contains(Modifier::DIM) {
queue!(w, SetAttribute(CAttribute::Dim))?;
}
if added.contains(Modifier::CROSSED_OUT) {
queue!(w, SetAttribute(CAttribute::CrossedOut))?;
}
if added.contains(Modifier::SLOW_BLINK) {
queue!(w, SetAttribute(CAttribute::SlowBlink))?;
}
if added.contains(Modifier::RAPID_BLINK) {
queue!(w, SetAttribute(CAttribute::RapidBlink))?;
}
Ok(())
}
}
pub(crate) fn write_spans<'a, I>(mut writer: &mut impl Write, content: I) -> io::Result<()>
where
I: IntoIterator<Item = &'a Span<'a>>,
{
let mut fg = CColor::Reset;
let mut bg = CColor::Reset;
let mut last_modifier = Modifier::empty();
for span in content {
let mut modifier = Modifier::empty();
modifier.insert(span.style.add_modifier);
modifier.remove(span.style.sub_modifier);
if modifier != last_modifier {
let diff = ModifierDiff {
from: last_modifier,
to: modifier,
};
diff.queue(&mut writer)?;
last_modifier = modifier;
}
let next_fg = span.style.fg.map(Into::into).unwrap_or(CColor::Reset);
let next_bg = span.style.bg.map(Into::into).unwrap_or(CColor::Reset);
if next_fg != fg || next_bg != bg {
queue!(writer, SetColors(Colors::new(next_fg, next_bg)))?;
fg = next_fg;
bg = next_bg;
}
queue!(writer, Print(span.content.clone()))?;
}
queue!(writer, SetAttribute(crossterm::style::Attribute::Reset))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::markdown_render::render_markdown_text;
use crate::test_backend::VT100Backend;
use pretty_assertions::assert_eq;
use ratatui::layout::Rect;
use ratatui::style::Color;
use ratatui::style::Style;
#[test]
fn writes_bold_then_regular_spans() {
use ratatui::style::Stylize;
let spans = ["A".bold(), "B".into()];
let mut actual: Vec<u8> = Vec::new();
write_spans(&mut actual, spans.iter()).unwrap();
let mut expected: Vec<u8> = Vec::new();
queue!(
expected,
SetAttribute(crossterm::style::Attribute::Bold),
Print("A"),
SetAttribute(crossterm::style::Attribute::NormalIntensity),
Print("B"),
SetAttribute(crossterm::style::Attribute::Reset),
)
.unwrap();
assert_eq!(
String::from_utf8(actual).unwrap(),
String::from_utf8(expected).unwrap()
);
}
#[test]
fn write_spans_emits_truecolor_and_indexed_sgr() {
// This test asserts that `write_spans` emits the correct SGR sequences for colors that
// can't be represented with the theme-aware ANSI palette:
//
// - `ratatui::style::Color::Rgb` (truecolor; `38;2;r;g;b`)
// - `ratatui::style::Color::Indexed` (256-color index; `48;5;n`)
//
// Those constructors are intentionally disallowed in production code (see
// `codex-rs/clippy.toml`), but the test needs them so the output bytes are fully
// deterministic.
#[expect(clippy::disallowed_methods)]
let fg = Color::Rgb(1, 2, 3);
#[expect(clippy::disallowed_methods)]
let bg = Color::Indexed(42);
let spans = [Span::styled("X", Style::default().fg(fg).bg(bg))];
let mut actual: Vec<u8> = Vec::new();
write_spans(&mut actual, spans.iter()).unwrap();
let mut expected: Vec<u8> = Vec::new();
queue!(
expected,
SetColors(Colors::new(
CColor::Rgb { r: 1, g: 2, b: 3 },
CColor::AnsiValue(42)
)),
Print("X"),
SetAttribute(crossterm::style::Attribute::Reset),
)
.unwrap();
assert_eq!(
String::from_utf8(actual).unwrap(),
String::from_utf8(expected).unwrap(),
);
}
#[test]
fn vt100_blockquote_line_emits_green_fg() {
// Set up a small off-screen terminal
let width: u16 = 40;
let height: u16 = 10;
let backend = VT100Backend::new(width, height);
let mut term = crate::custom_terminal::Terminal::with_options(backend).expect("terminal");
// Place viewport on the last line so history inserts scroll upward
let viewport = Rect::new(0, height - 1, width, 1);
term.set_viewport_area(viewport);
// Build a blockquote-like line: apply line-level green style and prefix "> "
let mut line: Line<'static> = Line::from(vec!["> ".into(), "Hello world".into()]);
line = line.style(Style::default().fg(Color::Green));
insert_history_lines(&mut term, vec![line])
.expect("Failed to insert history lines in test");
let mut saw_colored = false;
'outer: for row in 0..height {
for col in 0..width {
if let Some(cell) = term.backend().vt100().screen().cell(row, col)
&& cell.has_contents()
&& cell.fgcolor() != vt100::Color::Default
{
saw_colored = true;
break 'outer;
}
}
}
assert!(
saw_colored,
"expected at least one colored cell in vt100 output"
);
}
#[test]
fn vt100_blockquote_wrap_preserves_color_on_all_wrapped_lines() {
// Force wrapping by using a narrow viewport width and a long blockquote line.
let width: u16 = 20;
let height: u16 = 8;
let backend = VT100Backend::new(width, height);
let mut term = crate::custom_terminal::Terminal::with_options(backend).expect("terminal");
// Viewport is the last line so history goes directly above it.
let viewport = Rect::new(0, height - 1, width, 1);
term.set_viewport_area(viewport);
// Create a long blockquote with a distinct prefix and enough text to wrap.
let mut line: Line<'static> = Line::from(vec![
"> ".into(),
"This is a long quoted line that should wrap".into(),
]);
line = line.style(Style::default().fg(Color::Green));
insert_history_lines(&mut term, vec![line])
.expect("Failed to insert history lines in test");
// Parse and inspect the final screen buffer.
let screen = term.backend().vt100().screen();
// Collect rows that are non-empty; these should correspond to our wrapped lines.
let mut non_empty_rows: Vec<u16> = Vec::new();
for row in 0..height {
let mut any = false;
for col in 0..width {
if let Some(cell) = screen.cell(row, col)
&& cell.has_contents()
&& cell.contents() != "\0"
&& cell.contents() != " "
{
any = true;
break;
}
}
if any {
non_empty_rows.push(row);
}
}
// Expect at least two rows due to wrapping.
assert!(
non_empty_rows.len() >= 2,
"expected wrapped output to span >=2 rows, got {non_empty_rows:?}",
);
// For each non-empty row, ensure all non-space cells are using a non-default fg color.
for row in non_empty_rows {
for col in 0..width {
if let Some(cell) = screen.cell(row, col) {
let contents = cell.contents();
if !contents.is_empty() && contents != " " {
assert!(
cell.fgcolor() != vt100::Color::Default,
"expected non-default fg on row {row} col {col}, got {:?}",
cell.fgcolor()
);
}
}
}
}
}
#[test]
fn vt100_colored_prefix_then_plain_text_resets_color() {
let width: u16 = 40;
let height: u16 = 6;
let backend = VT100Backend::new(width, height);
let mut term = crate::custom_terminal::Terminal::with_options(backend).expect("terminal");
let viewport = Rect::new(0, height - 1, width, 1);
term.set_viewport_area(viewport);
// First span colored, rest plain.
let line: Line<'static> = Line::from(vec![
Span::styled("1. ", ratatui::style::Style::default().fg(Color::LightBlue)),
Span::raw("Hello world"),
]);
insert_history_lines(&mut term, vec![line])
.expect("Failed to insert history lines in test");
let screen = term.backend().vt100().screen();
// Find the first non-empty row; verify first three cells are colored, following cells default.
'rows: for row in 0..height {
let mut has_text = false;
for col in 0..width {
if let Some(cell) = screen.cell(row, col)
&& cell.has_contents()
&& cell.contents() != " "
{
has_text = true;
break;
}
}
if !has_text {
continue;
}
// Expect "1. Hello world" starting at col 0.
for col in 0..3 {
let cell = screen.cell(row, col).unwrap();
assert!(
cell.fgcolor() != vt100::Color::Default,
"expected colored prefix at col {col}, got {:?}",
cell.fgcolor()
);
}
for col in 3..(3 + "Hello world".len() as u16) {
let cell = screen.cell(row, col).unwrap();
assert_eq!(
cell.fgcolor(),
vt100::Color::Default,
"expected default color for plain text at col {col}, got {:?}",
cell.fgcolor()
);
}
break 'rows;
}
}
#[test]
fn vt100_deep_nested_mixed_list_third_level_marker_is_colored() {
// Markdown with five levels (ordered → unordered → ordered → unordered → unordered).
let md = "1. First\n - Second level\n 1. Third level (ordered)\n - Fourth level (bullet)\n - Fifth level to test indent consistency\n";
let text = render_markdown_text(md);
let lines: Vec<Line<'static>> = text.lines.clone();
let width: u16 = 60;
let height: u16 = 12;
let backend = VT100Backend::new(width, height);
let mut term = crate::custom_terminal::Terminal::with_options(backend).expect("terminal");
let viewport = ratatui::layout::Rect::new(0, height - 1, width, 1);
term.set_viewport_area(viewport);
insert_history_lines(&mut term, lines).expect("Failed to insert history lines in test");
let screen = term.backend().vt100().screen();
// Reconstruct screen rows as strings to locate the 3rd level line.
let rows: Vec<String> = screen.rows(0, width).collect();
let needle = "1. Third level (ordered)";
let row_idx = rows
.iter()
.position(|r| r.contains(needle))
.unwrap_or_else(|| {
panic!("expected to find row containing {needle:?}, have rows: {rows:?}")
});
let col_start = rows[row_idx].find(needle).unwrap() as u16; // column where '1' starts
// Verify that the numeric marker ("1.") at the third level is colored
// (non-default fg) and the content after the following space resets to default.
for c in [col_start, col_start + 1] {
let cell = screen.cell(row_idx as u16, c).unwrap();
assert!(
cell.fgcolor() != vt100::Color::Default,
"expected colored 3rd-level marker at row {row_idx} col {c}, got {:?}",
cell.fgcolor()
);
}
let content_col = col_start + 3; // skip '1', '.', and the space
if let Some(cell) = screen.cell(row_idx as u16, content_col) {
assert_eq!(
cell.fgcolor(),
vt100::Color::Default,
"expected default color for 3rd-level content at row {row_idx} col {content_col}, got {:?}",
cell.fgcolor()
);
}
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/transcript_render.rs | codex-rs/tui2/src/transcript_render.rs | //! Transcript rendering helpers (flattening, wrapping, and metadata).
//!
//! `App` treats the transcript (history cells) as the source of truth and
//! renders a *flattened* list of visual lines into the viewport. A single
//! history cell may render multiple visual lines, and the viewport may include
//! synthetic spacer rows between cells.
//!
//! This module centralizes the logic for:
//! - Flattening history cells into visual `ratatui::text::Line`s.
//! - Producing parallel metadata (`TranscriptLineMeta`) used for scroll
//! anchoring and "user row" styling.
//! - Computing *soft-wrap joiners* so copy can treat wrapped prose as one
//! logical line instead of inserting hard newlines.
use crate::history_cell::HistoryCell;
use crate::tui::scrolling::TranscriptLineMeta;
use ratatui::text::Line;
use std::sync::Arc;
/// Flattened transcript lines plus the metadata required to interpret them.
#[derive(Debug)]
pub(crate) struct TranscriptLines {
/// Flattened visual transcript lines, in the same order they are rendered.
pub(crate) lines: Vec<Line<'static>>,
/// Parallel metadata for each line (same length as `lines`).
///
/// This maps a visual line back to `(cell_index, line_in_cell)` so scroll
/// anchoring and "user row" styling remain stable across reflow.
pub(crate) meta: Vec<TranscriptLineMeta>,
/// Soft-wrap joiners (same length as `lines`).
///
/// `joiner_before[i]` is `Some(joiner)` when line `i` is a soft-wrap
/// continuation of line `i - 1`, and `None` when the break is a hard break
/// (between input lines/cells, or spacer rows).
///
/// Copy uses this to join wrapped prose without inserting hard newlines,
/// while still preserving hard line breaks and explicit blank lines.
pub(crate) joiner_before: Vec<Option<String>>,
}
/// Build flattened transcript lines without applying additional viewport wrapping.
///
/// This is useful for:
/// - Exit transcript rendering (ANSI) where we want the "cell as rendered"
/// output.
/// - Any consumer that wants a stable cell → line mapping without re-wrapping.
pub(crate) fn build_transcript_lines(
cells: &[Arc<dyn HistoryCell>],
width: u16,
) -> TranscriptLines {
// This function is the "lossless" transcript flattener:
// - it asks each cell for its transcript lines (including any per-cell prefixes/indents)
// - it inserts spacer rows between non-continuation cells to match the viewport layout
// - it emits parallel metadata so scroll anchoring can map visual lines back to cells.
let mut lines: Vec<Line<'static>> = Vec::new();
let mut meta: Vec<TranscriptLineMeta> = Vec::new();
let mut joiner_before: Vec<Option<String>> = Vec::new();
let mut has_emitted_lines = false;
for (cell_index, cell) in cells.iter().enumerate() {
// Cells provide joiners alongside lines so copy can distinguish hard breaks from soft wraps
// (and preserve the exact whitespace at wrap boundaries).
let rendered = cell.transcript_lines_with_joiners(width);
if rendered.lines.is_empty() {
continue;
}
// Cells that are not stream continuations are separated by an explicit spacer row.
// This keeps the flattened transcript aligned with what the user sees in the viewport
// and preserves intentional blank lines in copy.
if !cell.is_stream_continuation() {
if has_emitted_lines {
lines.push(Line::from(""));
meta.push(TranscriptLineMeta::Spacer);
joiner_before.push(None);
} else {
has_emitted_lines = true;
}
}
for (line_in_cell, line) in rendered.lines.into_iter().enumerate() {
// `line_in_cell` is the *visual* line index within the cell. Consumers use this for
// anchoring (e.g., "keep this row visible when the transcript reflows").
meta.push(TranscriptLineMeta::CellLine {
cell_index,
line_in_cell,
});
lines.push(line);
// Maintain the `joiner_before` invariant: exactly one entry per output line.
joiner_before.push(
rendered
.joiner_before
.get(line_in_cell)
.cloned()
.unwrap_or(None),
);
}
}
TranscriptLines {
lines,
meta,
joiner_before,
}
}
/// Build flattened transcript lines as they appear in the transcript viewport.
///
/// This applies *viewport wrapping* to prose lines, while deliberately avoiding
/// wrapping for preformatted content (currently detected via the code-block
/// line style) so indentation remains meaningful for copy/paste.
pub(crate) fn build_wrapped_transcript_lines(
cells: &[Arc<dyn HistoryCell>],
width: u16,
) -> TranscriptLines {
if width == 0 {
return TranscriptLines {
lines: Vec::new(),
meta: Vec::new(),
joiner_before: Vec::new(),
};
}
let mut transcript = TranscriptLines {
lines: Vec::new(),
meta: Vec::new(),
joiner_before: Vec::new(),
};
let mut has_emitted_lines = false;
let base_opts: crate::wrapping::RtOptions<'_> =
crate::wrapping::RtOptions::new(width.max(1) as usize);
for (cell_index, cell) in cells.iter().enumerate() {
append_wrapped_transcript_cell(
&mut transcript,
&mut has_emitted_lines,
cell_index,
cell,
width,
&base_opts,
);
}
transcript
}
/// Append a single history cell to an existing wrapped transcript.
///
/// This is the incremental building block used by transcript caching: it applies the same
/// flattening and viewport-wrapping rules as [`build_wrapped_transcript_lines`], but for one cell
/// at a time.
///
/// `has_emitted_lines` tracks whether the output already contains any non-spacer lines and is used
/// to decide when to insert an inter-cell spacer row.
pub(crate) fn append_wrapped_transcript_cell(
out: &mut TranscriptLines,
has_emitted_lines: &mut bool,
cell_index: usize,
cell: &Arc<dyn HistoryCell>,
width: u16,
base_opts: &crate::wrapping::RtOptions<'_>,
) {
use crate::render::line_utils::line_to_static;
use ratatui::style::Color;
if width == 0 {
return;
}
// Start from each cell's transcript view (prefixes/indents already applied), then apply
// viewport wrapping to prose while keeping preformatted content intact.
let rendered = cell.transcript_lines_with_joiners(width);
if rendered.lines.is_empty() {
return;
}
if !cell.is_stream_continuation() {
if *has_emitted_lines {
out.lines.push(Line::from(""));
out.meta.push(TranscriptLineMeta::Spacer);
out.joiner_before.push(None);
} else {
*has_emitted_lines = true;
}
}
// `visual_line_in_cell` counts the output visual lines produced from this cell *after* any
// viewport wrapping. This is distinct from `base_idx` (the index into the cell's input
// lines), since a single input line may wrap into multiple visual lines.
let mut visual_line_in_cell: usize = 0;
let mut first = true;
for (base_idx, base_line) in rendered.lines.iter().enumerate() {
// Preserve code blocks (and other preformatted text) by not applying
// viewport wrapping, so indentation remains meaningful for copy/paste.
if base_line.style.fg == Some(Color::Cyan) {
out.lines.push(base_line.clone());
out.meta.push(TranscriptLineMeta::CellLine {
cell_index,
line_in_cell: visual_line_in_cell,
});
visual_line_in_cell = visual_line_in_cell.saturating_add(1);
// Preformatted lines are treated as hard breaks; we keep the cell-provided joiner
// (which is typically `None`).
out.joiner_before.push(
rendered
.joiner_before
.get(base_idx)
.cloned()
.unwrap_or(None),
);
first = false;
continue;
}
let opts = if first {
base_opts.clone()
} else {
// For subsequent input lines within a cell, treat the "initial" indent as the cell's
// subsequent indent (matches textarea wrapping expectations).
base_opts
.clone()
.initial_indent(base_opts.subsequent_indent.clone())
};
// `word_wrap_line_with_joiners` returns both the wrapped visual lines and, for each
// continuation segment, the exact joiner substring that should be inserted instead of a
// newline when copying as a logical line.
let (wrapped, wrapped_joiners) =
crate::wrapping::word_wrap_line_with_joiners(base_line, opts);
for (seg_idx, (wrapped_line, seg_joiner)) in
wrapped.into_iter().zip(wrapped_joiners).enumerate()
{
out.lines.push(line_to_static(&wrapped_line));
out.meta.push(TranscriptLineMeta::CellLine {
cell_index,
line_in_cell: visual_line_in_cell,
});
visual_line_in_cell = visual_line_in_cell.saturating_add(1);
if seg_idx == 0 {
// The first wrapped segment corresponds to the original input line, so we use the
// cell-provided joiner (hard break vs soft break *between input lines*).
out.joiner_before.push(
rendered
.joiner_before
.get(base_idx)
.cloned()
.unwrap_or(None),
);
} else {
// Subsequent wrapped segments are soft-wrap continuations produced by viewport
// wrapping, so we use the wrap-derived joiner.
out.joiner_before.push(seg_joiner);
}
}
first = false;
}
}
/// Render flattened transcript lines into ANSI strings suitable for printing after the TUI exits.
///
/// This helper mirrors the transcript viewport behavior:
/// - Merges line-level style into each span so ANSI output matches on-screen styling.
/// - For user-authored rows, pads the background style out to the full terminal width so prompts
/// appear as solid blocks in scrollback.
/// - Streams spans through the shared vt100 writer so downstream tests and tools see consistent
/// escape sequences.
pub(crate) fn render_lines_to_ansi(
lines: &[Line<'static>],
line_meta: &[TranscriptLineMeta],
is_user_cell: &[bool],
width: u16,
) -> Vec<String> {
use unicode_width::UnicodeWidthStr;
lines
.iter()
.enumerate()
.map(|(idx, line)| {
// Determine whether this visual line belongs to a user-authored cell. We use this to
// pad the background to the full terminal width so prompts appear as solid blocks in
// scrollback.
let is_user_row = line_meta
.get(idx)
.and_then(TranscriptLineMeta::cell_index)
.map(|cell_index| is_user_cell.get(cell_index).copied().unwrap_or(false))
.unwrap_or(false);
// Line-level styles in ratatui apply to the entire line, but spans can also have their
// own styles. ANSI output is span-based, so we "bake" the line style into every span by
// patching span style with the line style.
let mut merged_spans: Vec<ratatui::text::Span<'static>> = line
.spans
.iter()
.map(|span| ratatui::text::Span {
style: span.style.patch(line.style),
content: span.content.clone(),
})
.collect();
if is_user_row && width > 0 {
// For user rows, pad out to the full width so the background color extends across
// the line in terminal scrollback (mirrors the on-screen viewport behavior).
let text: String = merged_spans
.iter()
.map(|span| span.content.as_ref())
.collect();
let text_width = UnicodeWidthStr::width(text.as_str());
let total_width = usize::from(width);
if text_width < total_width {
let pad_len = total_width.saturating_sub(text_width);
if pad_len > 0 {
let pad_style = crate::style::user_message_style();
merged_spans.push(ratatui::text::Span {
style: pad_style,
content: " ".repeat(pad_len).into(),
});
}
}
}
let mut buf: Vec<u8> = Vec::new();
let _ = crate::insert_history::write_spans(&mut buf, merged_spans.iter());
String::from_utf8(buf).unwrap_or_default()
})
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::history_cell::TranscriptLinesWithJoiners;
use pretty_assertions::assert_eq;
use std::sync::Arc;
#[derive(Debug)]
struct FakeCell {
lines: Vec<Line<'static>>,
joiner_before: Vec<Option<String>>,
is_stream_continuation: bool,
}
impl HistoryCell for FakeCell {
fn display_lines(&self, _width: u16) -> Vec<Line<'static>> {
self.lines.clone()
}
fn transcript_lines_with_joiners(&self, _width: u16) -> TranscriptLinesWithJoiners {
TranscriptLinesWithJoiners {
lines: self.lines.clone(),
joiner_before: self.joiner_before.clone(),
}
}
fn is_stream_continuation(&self) -> bool {
self.is_stream_continuation
}
}
fn concat_line(line: &Line<'_>) -> String {
line.spans
.iter()
.map(|s| s.content.as_ref())
.collect::<String>()
}
#[test]
fn build_wrapped_transcript_lines_threads_joiners_and_spacers() {
let cells: Vec<Arc<dyn HistoryCell>> = vec![
Arc::new(FakeCell {
lines: vec![Line::from("• hello world")],
joiner_before: vec![None],
is_stream_continuation: false,
}),
Arc::new(FakeCell {
lines: vec![Line::from("• foo bar")],
joiner_before: vec![None],
is_stream_continuation: false,
}),
];
// Force wrapping so we get soft-wrap joiners for the second segment of each cell's line.
let transcript = build_wrapped_transcript_lines(&cells, 8);
assert_eq!(transcript.lines.len(), transcript.meta.len());
assert_eq!(transcript.lines.len(), transcript.joiner_before.len());
let rendered: Vec<String> = transcript.lines.iter().map(concat_line).collect();
assert_eq!(rendered, vec!["• hello", "world", "", "• foo", "bar"]);
assert_eq!(
transcript.meta,
vec![
TranscriptLineMeta::CellLine {
cell_index: 0,
line_in_cell: 0
},
TranscriptLineMeta::CellLine {
cell_index: 0,
line_in_cell: 1
},
TranscriptLineMeta::Spacer,
TranscriptLineMeta::CellLine {
cell_index: 1,
line_in_cell: 0
},
TranscriptLineMeta::CellLine {
cell_index: 1,
line_in_cell: 1
},
]
);
assert_eq!(
transcript.joiner_before,
vec![
None,
Some(" ".to_string()),
None,
None,
Some(" ".to_string()),
]
);
}
#[test]
fn append_wrapped_transcript_cell_matches_full_build() {
use ratatui::style::Color;
use ratatui::style::Style;
let cells: Vec<Arc<dyn HistoryCell>> = vec![
Arc::new(FakeCell {
lines: vec![Line::from("• hello world")],
joiner_before: vec![None],
is_stream_continuation: false,
}),
// A preformatted line should not be viewport-wrapped.
Arc::new(FakeCell {
lines: vec![Line::from("• 1234567890").style(Style::default().fg(Color::Cyan))],
joiner_before: vec![None],
is_stream_continuation: false,
}),
// A stream continuation should not get an inter-cell spacer row.
Arc::new(FakeCell {
lines: vec![Line::from("• wrap me please")],
joiner_before: vec![None],
is_stream_continuation: true,
}),
];
let width = 7;
let full = build_wrapped_transcript_lines(&cells, width);
let mut out = TranscriptLines {
lines: Vec::new(),
meta: Vec::new(),
joiner_before: Vec::new(),
};
let mut has_emitted_lines = false;
let base_opts: crate::wrapping::RtOptions<'_> =
crate::wrapping::RtOptions::new(width.max(1) as usize);
for (cell_index, cell) in cells.iter().enumerate() {
append_wrapped_transcript_cell(
&mut out,
&mut has_emitted_lines,
cell_index,
cell,
width,
&base_opts,
);
}
assert_eq!(out.lines, full.lines);
assert_eq!(out.meta, full.meta);
assert_eq!(out.joiner_before, full.joiner_before);
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/style.rs | codex-rs/tui2/src/style.rs | use crate::color::blend;
use crate::color::is_light;
use crate::terminal_palette::best_color;
use crate::terminal_palette::default_bg;
use ratatui::style::Color;
use ratatui::style::Style;
pub fn user_message_style() -> Style {
user_message_style_for(default_bg())
}
/// Returns the style for a user-authored message using the provided terminal background.
pub fn user_message_style_for(terminal_bg: Option<(u8, u8, u8)>) -> Style {
match terminal_bg {
Some(bg) => Style::default().bg(user_message_bg(bg)),
None => Style::default(),
}
}
#[allow(clippy::disallowed_methods)]
pub fn user_message_bg(terminal_bg: (u8, u8, u8)) -> Color {
let top = if is_light(terminal_bg) {
(0, 0, 0)
} else {
(255, 255, 255)
};
best_color(blend(top, terminal_bg, 0.1))
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/markdown_render.rs | codex-rs/tui2/src/markdown_render.rs | use crate::render::line_utils::line_to_static;
use crate::wrapping::RtOptions;
use crate::wrapping::word_wrap_line;
use pulldown_cmark::CodeBlockKind;
use pulldown_cmark::CowStr;
use pulldown_cmark::Event;
use pulldown_cmark::HeadingLevel;
use pulldown_cmark::Options;
use pulldown_cmark::Parser;
use pulldown_cmark::Tag;
use pulldown_cmark::TagEnd;
use ratatui::style::Style;
use ratatui::text::Line;
use ratatui::text::Span;
use ratatui::text::Text;
struct MarkdownStyles {
h1: Style,
h2: Style,
h3: Style,
h4: Style,
h5: Style,
h6: Style,
code: Style,
emphasis: Style,
strong: Style,
strikethrough: Style,
ordered_list_marker: Style,
unordered_list_marker: Style,
link: Style,
blockquote: Style,
}
impl Default for MarkdownStyles {
fn default() -> Self {
use ratatui::style::Stylize;
Self {
h1: Style::new().bold().underlined(),
h2: Style::new().bold(),
h3: Style::new().bold().italic(),
h4: Style::new().italic(),
h5: Style::new().italic(),
h6: Style::new().italic(),
code: Style::new().cyan(),
emphasis: Style::new().italic(),
strong: Style::new().bold(),
strikethrough: Style::new().crossed_out(),
ordered_list_marker: Style::new().light_blue(),
unordered_list_marker: Style::new(),
link: Style::new().cyan().underlined(),
blockquote: Style::new().green(),
}
}
}
#[derive(Clone, Debug)]
struct IndentContext {
prefix: Vec<Span<'static>>,
marker: Option<Vec<Span<'static>>>,
is_list: bool,
}
impl IndentContext {
fn new(prefix: Vec<Span<'static>>, marker: Option<Vec<Span<'static>>>, is_list: bool) -> Self {
Self {
prefix,
marker,
is_list,
}
}
}
pub fn render_markdown_text(input: &str) -> Text<'static> {
render_markdown_text_with_width(input, None)
}
pub(crate) fn render_markdown_text_with_width(input: &str, width: Option<usize>) -> Text<'static> {
let mut options = Options::empty();
options.insert(Options::ENABLE_STRIKETHROUGH);
let parser = Parser::new_ext(input, options);
let mut w = Writer::new(parser, width);
w.run();
w.text
}
struct Writer<'a, I>
where
I: Iterator<Item = Event<'a>>,
{
iter: I,
text: Text<'static>,
styles: MarkdownStyles,
inline_styles: Vec<Style>,
indent_stack: Vec<IndentContext>,
list_indices: Vec<Option<u64>>,
link: Option<String>,
needs_newline: bool,
pending_marker_line: bool,
in_paragraph: bool,
in_code_block: bool,
wrap_width: Option<usize>,
current_line_content: Option<Line<'static>>,
current_initial_indent: Vec<Span<'static>>,
current_subsequent_indent: Vec<Span<'static>>,
current_line_style: Style,
current_line_in_code_block: bool,
}
impl<'a, I> Writer<'a, I>
where
I: Iterator<Item = Event<'a>>,
{
fn new(iter: I, wrap_width: Option<usize>) -> Self {
Self {
iter,
text: Text::default(),
styles: MarkdownStyles::default(),
inline_styles: Vec::new(),
indent_stack: Vec::new(),
list_indices: Vec::new(),
link: None,
needs_newline: false,
pending_marker_line: false,
in_paragraph: false,
in_code_block: false,
wrap_width,
current_line_content: None,
current_initial_indent: Vec::new(),
current_subsequent_indent: Vec::new(),
current_line_style: Style::default(),
current_line_in_code_block: false,
}
}
fn run(&mut self) {
while let Some(ev) = self.iter.next() {
self.handle_event(ev);
}
self.flush_current_line();
}
fn handle_event(&mut self, event: Event<'a>) {
match event {
Event::Start(tag) => self.start_tag(tag),
Event::End(tag) => self.end_tag(tag),
Event::Text(text) => self.text(text),
Event::Code(code) => self.code(code),
Event::SoftBreak => self.soft_break(),
Event::HardBreak => self.hard_break(),
Event::Rule => {
self.flush_current_line();
if !self.text.lines.is_empty() {
self.push_blank_line();
}
self.push_line(Line::from("———"));
self.needs_newline = true;
}
Event::Html(html) => self.html(html, false),
Event::InlineHtml(html) => self.html(html, true),
Event::FootnoteReference(_) => {}
Event::TaskListMarker(_) => {}
}
}
fn start_tag(&mut self, tag: Tag<'a>) {
match tag {
Tag::Paragraph => self.start_paragraph(),
Tag::Heading { level, .. } => self.start_heading(level),
Tag::BlockQuote => self.start_blockquote(),
Tag::CodeBlock(kind) => {
let indent = match kind {
CodeBlockKind::Fenced(_) => None,
CodeBlockKind::Indented => Some(Span::from(" ".repeat(4))),
};
let lang = match kind {
CodeBlockKind::Fenced(lang) => Some(lang.to_string()),
CodeBlockKind::Indented => None,
};
self.start_codeblock(lang, indent)
}
Tag::List(start) => self.start_list(start),
Tag::Item => self.start_item(),
Tag::Emphasis => self.push_inline_style(self.styles.emphasis),
Tag::Strong => self.push_inline_style(self.styles.strong),
Tag::Strikethrough => self.push_inline_style(self.styles.strikethrough),
Tag::Link { dest_url, .. } => self.push_link(dest_url.to_string()),
Tag::HtmlBlock
| Tag::FootnoteDefinition(_)
| Tag::Table(_)
| Tag::TableHead
| Tag::TableRow
| Tag::TableCell
| Tag::Image { .. }
| Tag::MetadataBlock(_) => {}
}
}
fn end_tag(&mut self, tag: TagEnd) {
match tag {
TagEnd::Paragraph => self.end_paragraph(),
TagEnd::Heading(_) => self.end_heading(),
TagEnd::BlockQuote => self.end_blockquote(),
TagEnd::CodeBlock => self.end_codeblock(),
TagEnd::List(_) => self.end_list(),
TagEnd::Item => {
self.indent_stack.pop();
self.pending_marker_line = false;
}
TagEnd::Emphasis | TagEnd::Strong | TagEnd::Strikethrough => self.pop_inline_style(),
TagEnd::Link => self.pop_link(),
TagEnd::HtmlBlock
| TagEnd::FootnoteDefinition
| TagEnd::Table
| TagEnd::TableHead
| TagEnd::TableRow
| TagEnd::TableCell
| TagEnd::Image
| TagEnd::MetadataBlock(_) => {}
}
}
fn start_paragraph(&mut self) {
if self.needs_newline {
self.push_blank_line();
}
self.push_line(Line::default());
self.needs_newline = false;
self.in_paragraph = true;
}
fn end_paragraph(&mut self) {
self.needs_newline = true;
self.in_paragraph = false;
self.pending_marker_line = false;
}
fn start_heading(&mut self, level: HeadingLevel) {
if self.needs_newline {
self.push_line(Line::default());
self.needs_newline = false;
}
let heading_style = match level {
HeadingLevel::H1 => self.styles.h1,
HeadingLevel::H2 => self.styles.h2,
HeadingLevel::H3 => self.styles.h3,
HeadingLevel::H4 => self.styles.h4,
HeadingLevel::H5 => self.styles.h5,
HeadingLevel::H6 => self.styles.h6,
};
let content = format!("{} ", "#".repeat(level as usize));
self.push_line(Line::from(vec![Span::styled(content, heading_style)]));
self.push_inline_style(heading_style);
self.needs_newline = false;
}
fn end_heading(&mut self) {
self.needs_newline = true;
self.pop_inline_style();
}
fn start_blockquote(&mut self) {
if self.needs_newline {
self.push_blank_line();
self.needs_newline = false;
}
self.indent_stack
.push(IndentContext::new(vec![Span::from("> ")], None, false));
}
fn end_blockquote(&mut self) {
self.indent_stack.pop();
self.needs_newline = true;
}
fn text(&mut self, text: CowStr<'a>) {
if self.pending_marker_line {
self.push_line(Line::default());
}
self.pending_marker_line = false;
if self.in_code_block && !self.needs_newline {
let has_content = self
.current_line_content
.as_ref()
.map(|line| !line.spans.is_empty())
.unwrap_or_else(|| {
self.text
.lines
.last()
.map(|line| !line.spans.is_empty())
.unwrap_or(false)
});
if has_content {
self.push_line(Line::default());
}
}
for (i, line) in text.lines().enumerate() {
if self.needs_newline {
self.push_line(Line::default());
self.needs_newline = false;
}
if i > 0 {
self.push_line(Line::default());
}
let content = line.to_string();
let span = Span::styled(
content,
self.inline_styles.last().copied().unwrap_or_default(),
);
self.push_span(span);
}
self.needs_newline = false;
}
fn code(&mut self, code: CowStr<'a>) {
if self.pending_marker_line {
self.push_line(Line::default());
self.pending_marker_line = false;
}
let span = Span::from(code.into_string()).style(self.styles.code);
self.push_span(span);
}
fn html(&mut self, html: CowStr<'a>, inline: bool) {
self.pending_marker_line = false;
for (i, line) in html.lines().enumerate() {
if self.needs_newline {
self.push_line(Line::default());
self.needs_newline = false;
}
if i > 0 {
self.push_line(Line::default());
}
let style = self.inline_styles.last().copied().unwrap_or_default();
self.push_span(Span::styled(line.to_string(), style));
}
self.needs_newline = !inline;
}
fn hard_break(&mut self) {
self.push_line(Line::default());
}
fn soft_break(&mut self) {
self.push_line(Line::default());
}
fn start_list(&mut self, index: Option<u64>) {
if self.list_indices.is_empty() && self.needs_newline {
self.push_line(Line::default());
}
self.list_indices.push(index);
}
fn end_list(&mut self) {
self.list_indices.pop();
self.needs_newline = true;
}
fn start_item(&mut self) {
self.pending_marker_line = true;
let depth = self.list_indices.len();
let is_ordered = self
.list_indices
.last()
.map(Option::is_some)
.unwrap_or(false);
let width = depth * 4 - 3;
let marker = if let Some(last_index) = self.list_indices.last_mut() {
match last_index {
None => Some(vec![Span::styled(
" ".repeat(width - 1) + "- ",
self.styles.unordered_list_marker,
)]),
Some(index) => {
*index += 1;
Some(vec![Span::styled(
format!("{:width$}. ", *index - 1),
self.styles.ordered_list_marker,
)])
}
}
} else {
None
};
let indent_prefix = if depth == 0 {
Vec::new()
} else {
let indent_len = if is_ordered { width + 2 } else { width + 1 };
vec![Span::from(" ".repeat(indent_len))]
};
self.indent_stack
.push(IndentContext::new(indent_prefix, marker, true));
self.needs_newline = false;
}
fn start_codeblock(&mut self, _lang: Option<String>, indent: Option<Span<'static>>) {
self.flush_current_line();
if !self.text.lines.is_empty() {
self.push_blank_line();
}
self.in_code_block = true;
self.indent_stack.push(IndentContext::new(
vec![indent.unwrap_or_default()],
None,
false,
));
self.needs_newline = true;
}
fn end_codeblock(&mut self) {
self.needs_newline = true;
self.in_code_block = false;
self.indent_stack.pop();
}
fn push_inline_style(&mut self, style: Style) {
let current = self.inline_styles.last().copied().unwrap_or_default();
let merged = current.patch(style);
self.inline_styles.push(merged);
}
fn pop_inline_style(&mut self) {
self.inline_styles.pop();
}
fn push_link(&mut self, dest_url: String) {
self.link = Some(dest_url);
}
fn pop_link(&mut self) {
if let Some(link) = self.link.take() {
self.push_span(" (".into());
self.push_span(Span::styled(link, self.styles.link));
self.push_span(")".into());
}
}
fn flush_current_line(&mut self) {
if let Some(line) = self.current_line_content.take() {
let style = self.current_line_style;
// NB we don't wrap code in code blocks, in order to preserve whitespace for copy/paste.
if !self.current_line_in_code_block
&& let Some(width) = self.wrap_width
{
let opts = RtOptions::new(width)
.initial_indent(self.current_initial_indent.clone().into())
.subsequent_indent(self.current_subsequent_indent.clone().into());
for wrapped in word_wrap_line(&line, opts) {
let owned = line_to_static(&wrapped).style(style);
self.text.lines.push(owned);
}
} else {
let mut spans = self.current_initial_indent.clone();
let mut line = line;
spans.append(&mut line.spans);
self.text.lines.push(Line::from_iter(spans).style(style));
}
self.current_initial_indent.clear();
self.current_subsequent_indent.clear();
self.current_line_in_code_block = false;
}
}
fn push_line(&mut self, line: Line<'static>) {
self.flush_current_line();
let blockquote_active = self
.indent_stack
.iter()
.any(|ctx| ctx.prefix.iter().any(|s| s.content.contains('>')));
let mut style = if blockquote_active {
self.styles.blockquote
} else {
line.style
};
// Code blocks are "preformatted": we want them to keep code styling even when they appear
// within other structures like blockquotes (which otherwise apply a line-level style).
//
// This matters for copy fidelity: downstream copy logic uses code styling as a cue to
// preserve indentation and to fence code runs with Markdown markers.
if self.in_code_block {
style = style.patch(self.styles.code);
}
let was_pending = self.pending_marker_line;
self.current_initial_indent = self.prefix_spans(was_pending);
self.current_subsequent_indent = self.prefix_spans(false);
self.current_line_style = style;
self.current_line_content = Some(line);
self.current_line_in_code_block = self.in_code_block;
self.pending_marker_line = false;
}
fn push_span(&mut self, span: Span<'static>) {
if let Some(line) = self.current_line_content.as_mut() {
line.push_span(span);
} else {
self.push_line(Line::from(vec![span]));
}
}
fn push_blank_line(&mut self) {
self.flush_current_line();
if self.indent_stack.iter().all(|ctx| ctx.is_list) {
self.text.lines.push(Line::default());
} else {
self.push_line(Line::default());
self.flush_current_line();
}
}
fn prefix_spans(&self, pending_marker_line: bool) -> Vec<Span<'static>> {
let mut prefix: Vec<Span<'static>> = Vec::new();
let last_marker_index = if pending_marker_line {
self.indent_stack
.iter()
.enumerate()
.rev()
.find_map(|(i, ctx)| if ctx.marker.is_some() { Some(i) } else { None })
} else {
None
};
let last_list_index = self.indent_stack.iter().rposition(|ctx| ctx.is_list);
for (i, ctx) in self.indent_stack.iter().enumerate() {
if pending_marker_line {
if Some(i) == last_marker_index
&& let Some(marker) = &ctx.marker
{
prefix.extend(marker.iter().cloned());
continue;
}
if ctx.is_list && last_marker_index.is_some_and(|idx| idx > i) {
continue;
}
} else if ctx.is_list && Some(i) != last_list_index {
continue;
}
prefix.extend(ctx.prefix.iter().cloned());
}
prefix
}
}
#[cfg(test)]
mod markdown_render_tests {
include!("markdown_render_tests.rs");
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use ratatui::text::Text;
fn lines_to_strings(text: &Text<'_>) -> Vec<String> {
text.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect()
}
#[test]
fn wraps_plain_text_when_width_provided() {
let markdown = "This is a simple sentence that should wrap.";
let rendered = render_markdown_text_with_width(markdown, Some(16));
let lines = lines_to_strings(&rendered);
assert_eq!(
lines,
vec![
"This is a simple".to_string(),
"sentence that".to_string(),
"should wrap.".to_string(),
]
);
}
#[test]
fn wraps_list_items_preserving_indent() {
let markdown = "- first second third fourth";
let rendered = render_markdown_text_with_width(markdown, Some(14));
let lines = lines_to_strings(&rendered);
assert_eq!(
lines,
vec!["- first second".to_string(), " third fourth".to_string(),]
);
}
#[test]
fn wraps_nested_lists() {
let markdown =
"- outer item with several words to wrap\n - inner item that also needs wrapping";
let rendered = render_markdown_text_with_width(markdown, Some(20));
let lines = lines_to_strings(&rendered);
assert_eq!(
lines,
vec![
"- outer item with".to_string(),
" several words to".to_string(),
" wrap".to_string(),
" - inner item".to_string(),
" that also".to_string(),
" needs wrapping".to_string(),
]
);
}
#[test]
fn wraps_ordered_lists() {
let markdown = "1. ordered item contains many words for wrapping";
let rendered = render_markdown_text_with_width(markdown, Some(18));
let lines = lines_to_strings(&rendered);
assert_eq!(
lines,
vec![
"1. ordered item".to_string(),
" contains many".to_string(),
" words for".to_string(),
" wrapping".to_string(),
]
);
}
#[test]
fn wraps_blockquotes() {
let markdown = "> block quote with content that should wrap nicely";
let rendered = render_markdown_text_with_width(markdown, Some(22));
let lines = lines_to_strings(&rendered);
assert_eq!(
lines,
vec![
"> block quote with".to_string(),
"> content that should".to_string(),
"> wrap nicely".to_string(),
]
);
}
#[test]
fn wraps_blockquotes_inside_lists() {
let markdown = "- list item\n > block quote inside list that wraps";
let rendered = render_markdown_text_with_width(markdown, Some(24));
let lines = lines_to_strings(&rendered);
assert_eq!(
lines,
vec![
"- list item".to_string(),
" > block quote inside".to_string(),
" > list that wraps".to_string(),
]
);
}
#[test]
fn wraps_list_items_containing_blockquotes() {
let markdown = "1. item with quote\n > quoted text that should wrap";
let rendered = render_markdown_text_with_width(markdown, Some(24));
let lines = lines_to_strings(&rendered);
assert_eq!(
lines,
vec![
"1. item with quote".to_string(),
" > quoted text that".to_string(),
" > should wrap".to_string(),
]
);
}
#[test]
fn does_not_wrap_code_blocks() {
let markdown = "````\nfn main() { println!(\"hi from a long line\"); }\n````";
let rendered = render_markdown_text_with_width(markdown, Some(10));
let lines = lines_to_strings(&rendered);
assert_eq!(
lines,
vec!["fn main() { println!(\"hi from a long line\"); }".to_string(),]
);
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/transcript_selection.rs | codex-rs/tui2/src/transcript_selection.rs | //! Transcript selection primitives.
//!
//! The transcript (history) viewport is rendered as a flattened list of visual
//! lines after wrapping. Selection in the transcript needs to be stable across
//! scrolling and terminal resizes, so endpoints are expressed in
//! *content-relative* coordinates:
//!
//! - `line_index`: index into the flattened, wrapped transcript lines (visual
//! lines).
//! - `column`: a zero-based offset within that visual line, measured from the
//! first content column to the right of the gutter.
//!
//! These coordinates are intentionally independent of the current viewport: the
//! user can scroll after selecting, and the selection should continue to refer
//! to the same conversation content.
//!
//! Clipboard reconstruction is implemented in `transcript_copy` (including
//! off-screen lines), while keybinding detection and the on-screen copy
//! affordance live in `transcript_copy_ui`.
//!
//! ## Mouse selection semantics
//!
//! The transcript supports click-and-drag selection. To avoid leaving a
//! distracting 1-cell highlight on a simple click, the selection only becomes
//! active once a drag updates the head point.
use crate::tui::scrolling::TranscriptScroll;
/// Number of columns reserved for the transcript gutter (bullet/prefix space).
///
/// Transcript rendering prefixes each line with a short gutter (e.g. `• ` or
/// continuation padding). Selection coordinates intentionally exclude this
/// gutter so selection/copy operates on content columns instead of terminal
/// absolute columns.
pub(crate) const TRANSCRIPT_GUTTER_COLS: u16 = 2;
/// Content-relative selection within the inline transcript viewport.
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
pub(crate) struct TranscriptSelection {
/// The initial selection point (where the selection drag started).
///
/// This remains fixed while dragging; the highlighted region is the span
/// between `anchor` and `head`.
pub(crate) anchor: Option<TranscriptSelectionPoint>,
/// The current selection point (where the selection drag currently ends).
///
/// This is `None` until the user drags, which prevents a simple click from
/// creating a persistent selection highlight.
pub(crate) head: Option<TranscriptSelectionPoint>,
}
/// A single endpoint of a transcript selection.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub(crate) struct TranscriptSelectionPoint {
/// Index into the flattened, wrapped transcript lines.
pub(crate) line_index: usize,
/// Zero-based content column (excluding the gutter).
///
/// This is not a terminal absolute column: callers add the gutter offset
/// when mapping it to a rendered buffer row.
pub(crate) column: u16,
}
impl TranscriptSelectionPoint {
/// Create a selection endpoint at a given wrapped line index and column.
pub(crate) const fn new(line_index: usize, column: u16) -> Self {
Self { line_index, column }
}
}
impl From<(usize, u16)> for TranscriptSelectionPoint {
fn from((line_index, column): (usize, u16)) -> Self {
Self::new(line_index, column)
}
}
/// Return `(start, end)` with `start <= end` in transcript order.
pub(crate) fn ordered_endpoints(
anchor: TranscriptSelectionPoint,
head: TranscriptSelectionPoint,
) -> (TranscriptSelectionPoint, TranscriptSelectionPoint) {
if anchor <= head {
(anchor, head)
} else {
(head, anchor)
}
}
/// Begin a potential transcript selection (left button down).
///
/// This records an anchor point and clears any existing head. The selection is
/// not considered "active" until a drag sets a head, which avoids highlighting
/// a 1-cell region on simple click.
///
/// Returns whether the selection changed (useful to decide whether to request a
/// redraw).
pub(crate) fn on_mouse_down(
selection: &mut TranscriptSelection,
point: Option<TranscriptSelectionPoint>,
) -> bool {
let before = *selection;
let Some(point) = point else {
return false;
};
begin(selection, point);
*selection != before
}
/// The outcome of a mouse drag update.
///
/// This is returned by [`on_mouse_drag`]. It separates selection state updates
/// from `App`-level actions, so callers can decide when to schedule redraws or
/// lock the transcript scroll position.
///
/// `lock_scroll` indicates the caller should lock the transcript viewport (if
/// currently following the bottom) so ongoing streaming output does not move
/// the selection under the cursor.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) struct MouseDragOutcome {
/// Whether the selection changed (useful to decide whether to request a
/// redraw).
pub(crate) changed: bool,
/// Whether the caller should lock the transcript scroll position.
pub(crate) lock_scroll: bool,
}
/// Update the selection state for a left-button drag.
///
/// This sets the selection head (creating an active selection) and returns:
///
/// - `changed`: whether the selection state changed (useful to decide whether to
/// request a redraw).
/// - `lock_scroll`: whether the caller should lock transcript scrolling to
/// freeze the viewport under the selection while streaming output arrives.
///
/// `point` is expected to already be clamped to the transcript's content area
/// (e.g. not in the gutter). If `point` is `None`, this is a no-op.
pub(crate) fn on_mouse_drag(
selection: &mut TranscriptSelection,
scroll: &TranscriptScroll,
point: Option<TranscriptSelectionPoint>,
streaming: bool,
) -> MouseDragOutcome {
let before = *selection;
let Some(point) = point else {
return MouseDragOutcome {
changed: false,
lock_scroll: false,
};
};
let lock_scroll = drag(selection, scroll, point, streaming);
MouseDragOutcome {
changed: *selection != before,
lock_scroll,
}
}
/// Finalize the selection state when the left button is released.
///
/// If the selection never became active (no head) or the head ended up equal to
/// the anchor, the selection is cleared so a click does not leave a persistent
/// highlight.
///
/// Returns whether the selection changed (useful to decide whether to request a
/// redraw).
pub(crate) fn on_mouse_up(selection: &mut TranscriptSelection) -> bool {
let before = *selection;
end(selection);
*selection != before
}
/// Begin a potential selection by recording an anchor and clearing any head.
///
/// This ensures a plain click does not create an active selection/highlight.
/// The selection becomes active on the first drag that sets `head`.
fn begin(selection: &mut TranscriptSelection, point: TranscriptSelectionPoint) {
*selection = TranscriptSelection {
anchor: Some(point),
head: None,
};
}
/// Update selection state during a drag by setting `head` when anchored.
///
/// Returns whether the caller should lock the transcript scroll position while
/// streaming and following the bottom, so new output doesn't move the selection
/// under the cursor.
fn drag(
selection: &mut TranscriptSelection,
scroll: &TranscriptScroll,
point: TranscriptSelectionPoint,
streaming: bool,
) -> bool {
let Some(anchor) = selection.anchor else {
return false;
};
let should_lock_scroll =
streaming && matches!(*scroll, TranscriptScroll::ToBottom) && point != anchor;
selection.head = Some(point);
should_lock_scroll
}
/// Finalize selection on mouse up.
///
/// Clears the selection if it never became active (no head) or if the head
/// ended up equal to the anchor, so a click doesn't leave a 1-cell highlight.
fn end(selection: &mut TranscriptSelection) {
if selection.head.is_none() || selection.anchor == selection.head {
*selection = TranscriptSelection::default();
}
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn selection_only_highlights_on_drag() {
let anchor = TranscriptSelectionPoint::new(0, 1);
let head = TranscriptSelectionPoint::new(0, 3);
let mut selection = TranscriptSelection::default();
assert!(on_mouse_down(&mut selection, Some(anchor)));
assert_eq!(
selection,
TranscriptSelection {
anchor: Some(anchor),
head: None,
}
);
assert!(on_mouse_up(&mut selection));
assert_eq!(selection, TranscriptSelection::default());
assert!(on_mouse_down(&mut selection, Some(anchor)));
let outcome = on_mouse_drag(
&mut selection,
&TranscriptScroll::ToBottom,
Some(head),
false,
);
assert!(outcome.changed);
assert!(!outcome.lock_scroll);
assert_eq!(
selection,
TranscriptSelection {
anchor: Some(anchor),
head: Some(head),
}
);
}
#[test]
fn selection_clears_when_drag_ends_at_anchor() {
let point = TranscriptSelectionPoint::new(0, 1);
let mut selection = TranscriptSelection::default();
assert!(on_mouse_down(&mut selection, Some(point)));
let outcome = on_mouse_drag(
&mut selection,
&TranscriptScroll::ToBottom,
Some(point),
false,
);
assert!(outcome.changed);
assert!(!outcome.lock_scroll);
assert!(on_mouse_up(&mut selection));
assert_eq!(selection, TranscriptSelection::default());
}
#[test]
fn drag_requests_scroll_lock_when_streaming_at_bottom_and_point_moves() {
let anchor = TranscriptSelectionPoint::new(0, 1);
let head = TranscriptSelectionPoint::new(0, 2);
let mut selection = TranscriptSelection::default();
assert!(on_mouse_down(&mut selection, Some(anchor)));
let outcome = on_mouse_drag(
&mut selection,
&TranscriptScroll::ToBottom,
Some(head),
true,
);
assert!(outcome.changed);
assert!(outcome.lock_scroll);
}
#[test]
fn selection_helpers_noop_without_points_or_anchor() {
let mut selection = TranscriptSelection::default();
assert!(!on_mouse_down(&mut selection, None));
assert_eq!(selection, TranscriptSelection::default());
let outcome = on_mouse_drag(&mut selection, &TranscriptScroll::ToBottom, None, false);
assert_eq!(
outcome,
MouseDragOutcome {
changed: false,
lock_scroll: false,
}
);
assert_eq!(selection, TranscriptSelection::default());
let outcome = on_mouse_drag(
&mut selection,
&TranscriptScroll::ToBottom,
Some(TranscriptSelectionPoint::new(0, 1)),
false,
);
assert_eq!(
outcome,
MouseDragOutcome {
changed: false,
lock_scroll: false,
}
);
assert_eq!(selection, TranscriptSelection::default());
assert!(!on_mouse_up(&mut selection));
assert_eq!(selection, TranscriptSelection::default());
}
#[test]
fn mouse_down_resets_head() {
let anchor = TranscriptSelectionPoint::new(0, 1);
let head = TranscriptSelectionPoint::new(0, 2);
let next_anchor = TranscriptSelectionPoint::new(1, 0);
let mut selection = TranscriptSelection {
anchor: Some(anchor),
head: Some(head),
};
assert!(on_mouse_down(&mut selection, Some(next_anchor)));
assert_eq!(
selection,
TranscriptSelection {
anchor: Some(next_anchor),
head: None,
}
);
}
#[test]
fn dragging_does_not_request_scroll_lock_when_not_at_bottom() {
let anchor = TranscriptSelectionPoint::new(0, 1);
let head = TranscriptSelectionPoint::new(0, 2);
let mut selection = TranscriptSelection::default();
assert!(on_mouse_down(&mut selection, Some(anchor)));
let outcome = on_mouse_drag(
&mut selection,
&TranscriptScroll::Scrolled {
cell_index: 0,
line_in_cell: 0,
},
Some(head),
true,
);
assert!(outcome.changed);
assert!(!outcome.lock_scroll);
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/markdown_render_tests.rs | codex-rs/tui2/src/markdown_render_tests.rs | use pretty_assertions::assert_eq;
use ratatui::style::Color;
use ratatui::style::Stylize;
use ratatui::text::Line;
use ratatui::text::Span;
use ratatui::text::Text;
use crate::markdown_render::render_markdown_text;
use insta::assert_snapshot;
#[test]
fn empty() {
assert_eq!(render_markdown_text(""), Text::default());
}
#[test]
fn paragraph_single() {
assert_eq!(
render_markdown_text("Hello, world!"),
Text::from("Hello, world!")
);
}
#[test]
fn paragraph_soft_break() {
assert_eq!(
render_markdown_text("Hello\nWorld"),
Text::from_iter(["Hello", "World"])
);
}
#[test]
fn paragraph_multiple() {
assert_eq!(
render_markdown_text("Paragraph 1\n\nParagraph 2"),
Text::from_iter(["Paragraph 1", "", "Paragraph 2"])
);
}
#[test]
fn headings() {
let md = "# Heading 1\n## Heading 2\n### Heading 3\n#### Heading 4\n##### Heading 5\n###### Heading 6\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["# ".bold().underlined(), "Heading 1".bold().underlined()]),
Line::default(),
Line::from_iter(["## ".bold(), "Heading 2".bold()]),
Line::default(),
Line::from_iter(["### ".bold().italic(), "Heading 3".bold().italic()]),
Line::default(),
Line::from_iter(["#### ".italic(), "Heading 4".italic()]),
Line::default(),
Line::from_iter(["##### ".italic(), "Heading 5".italic()]),
Line::default(),
Line::from_iter(["###### ".italic(), "Heading 6".italic()]),
]);
assert_eq!(text, expected);
}
#[test]
fn blockquote_single() {
let text = render_markdown_text("> Blockquote");
let expected = Text::from(Line::from_iter(["> ", "Blockquote"]).green());
assert_eq!(text, expected);
}
#[test]
fn blockquote_soft_break() {
// Soft break via lazy continuation should render as a new line in blockquotes.
let text = render_markdown_text("> This is a blockquote\nwith a soft break\n");
let lines: Vec<String> = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(
lines,
vec![
"> This is a blockquote".to_string(),
"> with a soft break".to_string()
]
);
}
#[test]
fn blockquote_multiple_with_break() {
let text = render_markdown_text("> Blockquote 1\n\n> Blockquote 2\n");
let expected = Text::from_iter([
Line::from_iter(["> ", "Blockquote 1"]).green(),
Line::default(),
Line::from_iter(["> ", "Blockquote 2"]).green(),
]);
assert_eq!(text, expected);
}
#[test]
fn blockquote_three_paragraphs_short_lines() {
let md = "> one\n>\n> two\n>\n> three\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["> ", "one"]).green(),
Line::from_iter(["> "]).green(),
Line::from_iter(["> ", "two"]).green(),
Line::from_iter(["> "]).green(),
Line::from_iter(["> ", "three"]).green(),
]);
assert_eq!(text, expected);
}
#[test]
fn blockquote_nested_two_levels() {
let md = "> Level 1\n>> Level 2\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["> ", "Level 1"]).green(),
Line::from_iter(["> "]).green(),
Line::from_iter(["> ", "> ", "Level 2"]).green(),
]);
assert_eq!(text, expected);
}
#[test]
fn blockquote_with_list_items() {
let md = "> - item 1\n> - item 2\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["> ", "- ", "item 1"]).green(),
Line::from_iter(["> ", "- ", "item 2"]).green(),
]);
assert_eq!(text, expected);
}
#[test]
fn blockquote_with_ordered_list() {
let md = "> 1. first\n> 2. second\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(vec![
Span::from("> "),
"1. ".light_blue(),
Span::from("first"),
])
.green(),
Line::from_iter(vec![
Span::from("> "),
"2. ".light_blue(),
Span::from("second"),
])
.green(),
]);
assert_eq!(text, expected);
}
#[test]
fn blockquote_list_then_nested_blockquote() {
let md = "> - parent\n> > child\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["> ", "- ", "parent"]).green(),
Line::from_iter(["> ", " ", "> ", "child"]).green(),
]);
assert_eq!(text, expected);
}
#[test]
fn list_item_with_inline_blockquote_on_same_line() {
let md = "1. > quoted\n";
let text = render_markdown_text(md);
let mut lines = text.lines.iter();
let first = lines.next().expect("one line");
// Expect content to include the ordered marker, a space, "> ", and the text
let s: String = first.spans.iter().map(|sp| sp.content.clone()).collect();
assert_eq!(s, "1. > quoted");
}
#[test]
fn blockquote_surrounded_by_blank_lines() {
let md = "foo\n\n> bar\n\nbaz\n";
let text = render_markdown_text(md);
let lines: Vec<String> = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(
lines,
vec![
"foo".to_string(),
"".to_string(),
"> bar".to_string(),
"".to_string(),
"baz".to_string(),
]
);
}
#[test]
fn blockquote_in_ordered_list_on_next_line() {
// Blockquote begins on a new line within an ordered list item; it should
// render inline on the same marker line.
let md = "1.\n > quoted\n";
let text = render_markdown_text(md);
let lines: Vec<String> = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(lines, vec!["1. > quoted".to_string()]);
}
#[test]
fn blockquote_in_unordered_list_on_next_line() {
// Blockquote begins on a new line within an unordered list item; it should
// render inline on the same marker line.
let md = "-\n > quoted\n";
let text = render_markdown_text(md);
let lines: Vec<String> = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(lines, vec!["- > quoted".to_string()]);
}
#[test]
fn blockquote_two_paragraphs_inside_ordered_list_has_blank_line() {
// Two blockquote paragraphs inside a list item should be separated by a blank line.
let md = "1.\n > para 1\n >\n > para 2\n";
let text = render_markdown_text(md);
let lines: Vec<String> = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(
lines,
vec![
"1. > para 1".to_string(),
" > ".to_string(),
" > para 2".to_string(),
],
"expected blockquote content to stay aligned after list marker"
);
}
#[test]
fn blockquote_inside_nested_list() {
let md = "1. A\n - B\n > inner\n";
let text = render_markdown_text(md);
let lines: Vec<String> = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(lines, vec!["1. A", " - B", " > inner"]);
}
#[test]
fn list_item_text_then_blockquote() {
let md = "1. before\n > quoted\n";
let text = render_markdown_text(md);
let lines: Vec<String> = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(lines, vec!["1. before", " > quoted"]);
}
#[test]
fn list_item_blockquote_then_text() {
let md = "1.\n > quoted\n after\n";
let text = render_markdown_text(md);
let lines: Vec<String> = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(lines, vec!["1. > quoted", " > after"]);
}
#[test]
fn list_item_text_blockquote_text() {
let md = "1. before\n > quoted\n after\n";
let text = render_markdown_text(md);
let lines: Vec<String> = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(lines, vec!["1. before", " > quoted", " > after"]);
}
#[test]
fn blockquote_with_heading_and_paragraph() {
let md = "> # Heading\n> paragraph text\n";
let text = render_markdown_text(md);
// Validate on content shape; styling is handled elsewhere
let lines: Vec<String> = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(
lines,
vec![
"> # Heading".to_string(),
"> ".to_string(),
"> paragraph text".to_string(),
]
);
}
#[test]
fn blockquote_heading_inherits_heading_style() {
let text = render_markdown_text("> # test header\n> in blockquote\n");
assert_eq!(
text.lines,
[
Line::from_iter([
"> ".into(),
"# ".bold().underlined(),
"test header".bold().underlined(),
])
.green(),
Line::from_iter(["> "]).green(),
Line::from_iter(["> ", "in blockquote"]).green(),
]
);
}
#[test]
fn blockquote_with_code_block() {
let md = "> ```\n> code\n> ```\n";
let text = render_markdown_text(md);
assert_eq!(text.lines, [Line::from_iter(["> ", "", "code"]).cyan()]);
}
#[test]
fn blockquote_with_multiline_code_block() {
let md = "> ```\n> first\n> second\n> ```\n";
let text = render_markdown_text(md);
assert_eq!(
text.lines,
[
Line::from_iter(["> ", "", "first"]).cyan(),
Line::from_iter(["> ", "", "second"]).cyan(),
]
);
}
#[test]
fn nested_blockquote_with_inline_and_fenced_code() {
/*
let md = \"> Nested quote with code:\n\
> > Inner quote and `inline code`\n\
> >\n\
> > ```\n\
> > # fenced code inside a quote\n\
> > echo \"hello from a quote\"\n\
> > ```\n";
*/
let md = r#"> Nested quote with code:
> > Inner quote and `inline code`
> >
> > ```
> > # fenced code inside a quote
> > echo "hello from a quote"
> > ```
"#;
let text = render_markdown_text(md);
let lines: Vec<String> = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(
lines,
vec![
"> Nested quote with code:".to_string(),
"> ".to_string(),
"> > Inner quote and inline code".to_string(),
"> > ".to_string(),
"> > # fenced code inside a quote".to_string(),
"> > echo \"hello from a quote\"".to_string(),
]
);
// Fenced code inside nested blockquotes should keep code styling so copy logic can treat it as
// preformatted.
for idx in [4usize, 5usize] {
assert_eq!(text.lines[idx].style.fg, Some(Color::Cyan));
}
}
#[test]
fn list_unordered_single() {
let text = render_markdown_text("- List item 1\n");
let expected = Text::from_iter([Line::from_iter(["- ", "List item 1"])]);
assert_eq!(text, expected);
}
#[test]
fn list_unordered_multiple() {
let text = render_markdown_text("- List item 1\n- List item 2\n");
let expected = Text::from_iter([
Line::from_iter(["- ", "List item 1"]),
Line::from_iter(["- ", "List item 2"]),
]);
assert_eq!(text, expected);
}
#[test]
fn list_ordered() {
let text = render_markdown_text("1. List item 1\n2. List item 2\n");
let expected = Text::from_iter([
Line::from_iter(["1. ".light_blue(), "List item 1".into()]),
Line::from_iter(["2. ".light_blue(), "List item 2".into()]),
]);
assert_eq!(text, expected);
}
#[test]
fn list_nested() {
let text = render_markdown_text("- List item 1\n - Nested list item 1\n");
let expected = Text::from_iter([
Line::from_iter(["- ", "List item 1"]),
Line::from_iter([" - ", "Nested list item 1"]),
]);
assert_eq!(text, expected);
}
#[test]
fn list_ordered_custom_start() {
let text = render_markdown_text("3. First\n4. Second\n");
let expected = Text::from_iter([
Line::from_iter(["3. ".light_blue(), "First".into()]),
Line::from_iter(["4. ".light_blue(), "Second".into()]),
]);
assert_eq!(text, expected);
}
#[test]
fn nested_unordered_in_ordered() {
let md = "1. Outer\n - Inner A\n - Inner B\n2. Next\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["1. ".light_blue(), "Outer".into()]),
Line::from_iter([" - ", "Inner A"]),
Line::from_iter([" - ", "Inner B"]),
Line::from_iter(["2. ".light_blue(), "Next".into()]),
]);
assert_eq!(text, expected);
}
#[test]
fn nested_ordered_in_unordered() {
let md = "- Outer\n 1. One\n 2. Two\n- Last\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["- ", "Outer"]),
Line::from_iter([" 1. ".light_blue(), "One".into()]),
Line::from_iter([" 2. ".light_blue(), "Two".into()]),
Line::from_iter(["- ", "Last"]),
]);
assert_eq!(text, expected);
}
#[test]
fn loose_list_item_multiple_paragraphs() {
let md = "1. First paragraph\n\n Second paragraph of same item\n\n2. Next item\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["1. ".light_blue(), "First paragraph".into()]),
Line::default(),
Line::from_iter([" ", "Second paragraph of same item"]),
Line::from_iter(["2. ".light_blue(), "Next item".into()]),
]);
assert_eq!(text, expected);
}
#[test]
fn tight_item_with_soft_break() {
let md = "- item line1\n item line2\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["- ", "item line1"]),
Line::from_iter([" ", "item line2"]),
]);
assert_eq!(text, expected);
}
#[test]
fn deeply_nested_mixed_three_levels() {
let md = "1. A\n - B\n 1. C\n2. D\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["1. ".light_blue(), "A".into()]),
Line::from_iter([" - ", "B"]),
Line::from_iter([" 1. ".light_blue(), "C".into()]),
Line::from_iter(["2. ".light_blue(), "D".into()]),
]);
assert_eq!(text, expected);
}
#[test]
fn loose_items_due_to_blank_line_between_items() {
let md = "1. First\n\n2. Second\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["1. ".light_blue(), "First".into()]),
Line::from_iter(["2. ".light_blue(), "Second".into()]),
]);
assert_eq!(text, expected);
}
#[test]
fn mixed_tight_then_loose_in_one_list() {
let md = "1. Tight\n\n2.\n Loose\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["1. ".light_blue(), "Tight".into()]),
Line::from_iter(["2. ".light_blue(), "Loose".into()]),
]);
assert_eq!(text, expected);
}
#[test]
fn ordered_item_with_indented_continuation_is_tight() {
let md = "1. Foo\n Bar\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["1. ".light_blue(), "Foo".into()]),
Line::from_iter([" ", "Bar"]),
]);
assert_eq!(text, expected);
}
#[test]
fn inline_code() {
let text = render_markdown_text("Example of `Inline code`");
let expected = Line::from_iter(["Example of ".into(), "Inline code".cyan()]).into();
assert_eq!(text, expected);
}
#[test]
fn strong() {
assert_eq!(
render_markdown_text("**Strong**"),
Text::from(Line::from("Strong".bold()))
);
}
#[test]
fn emphasis() {
assert_eq!(
render_markdown_text("*Emphasis*"),
Text::from(Line::from("Emphasis".italic()))
);
}
#[test]
fn strikethrough() {
assert_eq!(
render_markdown_text("~~Strikethrough~~"),
Text::from(Line::from("Strikethrough".crossed_out()))
);
}
#[test]
fn strong_emphasis() {
let text = render_markdown_text("**Strong *emphasis***");
let expected = Text::from(Line::from_iter([
"Strong ".bold(),
"emphasis".bold().italic(),
]));
assert_eq!(text, expected);
}
#[test]
fn link() {
let text = render_markdown_text("[Link](https://example.com)");
let expected = Text::from(Line::from_iter([
"Link".into(),
" (".into(),
"https://example.com".cyan().underlined(),
")".into(),
]));
assert_eq!(text, expected);
}
#[test]
fn code_block_unhighlighted() {
let text = render_markdown_text("```rust\nfn main() {}\n```\n");
let expected = Text::from_iter([Line::from_iter(["", "fn main() {}"]).cyan()]);
assert_eq!(text, expected);
}
#[test]
fn code_block_multiple_lines_root() {
let md = "```\nfirst\nsecond\n```\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["", "first"]).cyan(),
Line::from_iter(["", "second"]).cyan(),
]);
assert_eq!(text, expected);
}
#[test]
fn code_block_indented() {
let md = " function greet() {\n console.log(\"Hi\");\n }\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter([" ", "function greet() {"]).cyan(),
Line::from_iter([" ", " console.log(\"Hi\");"]).cyan(),
Line::from_iter([" ", "}"]).cyan(),
]);
assert_eq!(text, expected);
}
#[test]
fn horizontal_rule_renders_em_dashes() {
let md = "Before\n\n---\n\nAfter\n";
let text = render_markdown_text(md);
let lines: Vec<String> = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(lines, vec!["Before", "", "———", "", "After"]);
}
#[test]
fn code_block_with_inner_triple_backticks_outer_four() {
let md = r#"````text
Here is a code block that shows another fenced block:
```md
# Inside fence
- bullet
- `inline code`
```
````
"#;
let text = render_markdown_text(md);
let lines: Vec<String> = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(
lines,
vec![
"Here is a code block that shows another fenced block:".to_string(),
String::new(),
"```md".to_string(),
"# Inside fence".to_string(),
"- bullet".to_string(),
"- `inline code`".to_string(),
"```".to_string(),
]
);
}
#[test]
fn code_block_inside_unordered_list_item_is_indented() {
let md = "- Item\n\n ```\n code line\n ```\n";
let text = render_markdown_text(md);
let lines: Vec<String> = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(lines, vec!["- Item", "", " code line"]);
}
#[test]
fn code_block_multiple_lines_inside_unordered_list() {
let md = "- Item\n\n ```\n first\n second\n ```\n";
let text = render_markdown_text(md);
let lines: Vec<String> = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(lines, vec!["- Item", "", " first", " second"]);
}
#[test]
fn code_block_inside_unordered_list_item_multiple_lines() {
let md = "- Item\n\n ```\n first\n second\n ```\n";
let text = render_markdown_text(md);
let lines: Vec<String> = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(lines, vec!["- Item", "", " first", " second"]);
}
#[test]
fn markdown_render_complex_snapshot() {
let md = r#"# H1: Markdown Streaming Test
Intro paragraph with bold **text**, italic *text*, and inline code `x=1`.
Combined bold-italic ***both*** and escaped asterisks \*literal\*.
Auto-link: <https://example.com> and reference link [ref][r1].
Link with title: [hover me](https://example.com "Example") and mailto <mailto:test@example.com>.
Image: 
> Blockquote level 1
>> Blockquote level 2 with `inline code`
- Unordered list item 1
- Nested bullet with italics _inner_
- Unordered list item 2 with ~~strikethrough~~
1. Ordered item one
2. Ordered item two with sublist:
1) Alt-numbered subitem
- [ ] Task: unchecked
- [x] Task: checked with link [home](https://example.org)
---
Table below (alignment test):
| Left | Center | Right |
|:-----|:------:|------:|
| a | b | c |
Inline HTML: <sup>sup</sup> and <sub>sub</sub>.
HTML block:
<div style="border:1px solid #ccc;padding:2px">inline block</div>
Escapes: \_underscores\_, backslash \\, ticks ``code with `backtick` inside``.
Emoji shortcodes: :sparkles: :tada: (if supported).
Hard break test (line ends with two spaces)
Next line should be close to previous.
Footnote reference here[^1] and another[^longnote].
Horizontal rule with asterisks:
***
Fenced code block (JSON):
```json
{ "a": 1, "b": [true, false] }
```
Fenced code with tildes and triple backticks inside:
~~~markdown
To close ``` you need tildes.
~~~
Indented code block:
for i in range(3): print(i)
Definition-like list:
Term
: Definition with `code`.
Character entities: & < > " '
[^1]: This is the first footnote.
[^longnote]: A longer footnote with a link to [Rust](https://www.rust-lang.org/).
Escaped pipe in text: a \| b \| c.
URL with parentheses: [link](https://example.com/path_(with)_parens).
[r1]: https://example.com/ref "Reference link title"
"#;
let text = render_markdown_text(md);
// Convert to plain text lines for snapshot (ignore styles)
let rendered = text
.lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
})
.collect::<Vec<_>>()
.join("\n");
assert_snapshot!(rendered);
}
#[test]
fn ordered_item_with_code_block_and_nested_bullet() {
let md = "1. **item 1**\n\n2. **item 2**\n ```\n code\n ```\n - `PROCESS_START` (a `OnceLock<Instant>`) keeps the start time for the entire process.\n";
let text = render_markdown_text(md);
let lines: Vec<String> = text
.lines
.iter()
.map(|line| {
line.spans
.iter()
.map(|span| span.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(
lines,
vec![
"1. item 1".to_string(),
"2. item 2".to_string(),
String::new(),
" code".to_string(),
" - PROCESS_START (a OnceLock<Instant>) keeps the start time for the entire process.".to_string(),
]
);
}
#[test]
fn nested_five_levels_mixed_lists() {
let md = "1. First\n - Second level\n 1. Third level (ordered)\n - Fourth level (bullet)\n - Fifth level to test indent consistency\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["1. ".light_blue(), "First".into()]),
Line::from_iter([" - ", "Second level"]),
Line::from_iter([" 1. ".light_blue(), "Third level (ordered)".into()]),
Line::from_iter([" - ", "Fourth level (bullet)"]),
Line::from_iter([
" - ",
"Fifth level to test indent consistency",
]),
]);
assert_eq!(text, expected);
}
#[test]
fn html_inline_is_verbatim() {
let md = "Hello <span>world</span>!";
let text = render_markdown_text(md);
let expected: Text = Line::from_iter(["Hello ", "<span>", "world", "</span>", "!"]).into();
assert_eq!(text, expected);
}
#[test]
fn html_block_is_verbatim_multiline() {
let md = "<div>\n <span>hi</span>\n</div>\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["<div>"]),
Line::from_iter([" <span>hi</span>"]),
Line::from_iter(["</div>"]),
]);
assert_eq!(text, expected);
}
#[test]
fn html_in_tight_ordered_item_soft_breaks_with_space() {
let md = "1. Foo\n <i>Bar</i>\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["1. ".light_blue(), "Foo".into()]),
Line::from_iter([" ", "<i>", "Bar", "</i>"]),
]);
assert_eq!(text, expected);
}
#[test]
fn html_continuation_paragraph_in_unordered_item_indented() {
let md = "- Item\n\n <em>continued</em>\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["- ", "Item"]),
Line::default(),
Line::from_iter([" ", "<em>", "continued", "</em>"]),
]);
assert_eq!(text, expected);
}
#[test]
fn unordered_item_continuation_paragraph_is_indented() {
let md = "- Intro\n\n Continuation paragraph line 1\n Continuation paragraph line 2\n";
let text = render_markdown_text(md);
let lines: Vec<String> = text
.lines
.iter()
.map(|line| {
line.spans
.iter()
.map(|span| span.content.clone())
.collect::<String>()
})
.collect();
assert_eq!(
lines,
vec![
"- Intro".to_string(),
String::new(),
" Continuation paragraph line 1".to_string(),
" Continuation paragraph line 2".to_string(),
]
);
}
#[test]
fn ordered_item_continuation_paragraph_is_indented() {
let md = "1. Intro\n\n More details about intro\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["1. ".light_blue(), "Intro".into()]),
Line::default(),
Line::from_iter([" ", "More details about intro"]),
]);
assert_eq!(text, expected);
}
#[test]
fn nested_item_continuation_paragraph_is_indented() {
let md = "1. A\n - B\n\n Continuation for B\n2. C\n";
let text = render_markdown_text(md);
let expected = Text::from_iter([
Line::from_iter(["1. ".light_blue(), "A".into()]),
Line::from_iter([" - ", "B"]),
Line::default(),
Line::from_iter([" ", "Continuation for B"]),
Line::from_iter(["2. ".light_blue(), "C".into()]),
]);
assert_eq!(text, expected);
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/markdown_stream.rs | codex-rs/tui2/src/markdown_stream.rs | use ratatui::text::Line;
use crate::markdown;
/// Newline-gated accumulator that renders markdown and commits only fully
/// completed logical lines.
pub(crate) struct MarkdownStreamCollector {
buffer: String,
committed_line_count: usize,
width: Option<usize>,
}
impl MarkdownStreamCollector {
pub fn new(width: Option<usize>) -> Self {
Self {
buffer: String::new(),
committed_line_count: 0,
width,
}
}
pub fn clear(&mut self) {
self.buffer.clear();
self.committed_line_count = 0;
}
pub fn push_delta(&mut self, delta: &str) {
tracing::trace!("push_delta: {delta:?}");
self.buffer.push_str(delta);
}
/// Render the full buffer and return only the newly completed logical lines
/// since the last commit. When the buffer does not end with a newline, the
/// final rendered line is considered incomplete and is not emitted.
pub fn commit_complete_lines(&mut self) -> Vec<Line<'static>> {
let source = self.buffer.clone();
let last_newline_idx = source.rfind('\n');
let source = if let Some(last_newline_idx) = last_newline_idx {
source[..=last_newline_idx].to_string()
} else {
return Vec::new();
};
let mut rendered: Vec<Line<'static>> = Vec::new();
markdown::append_markdown(&source, self.width, &mut rendered);
let mut complete_line_count = rendered.len();
if complete_line_count > 0
&& crate::render::line_utils::is_blank_line_spaces_only(
&rendered[complete_line_count - 1],
)
{
complete_line_count -= 1;
}
if self.committed_line_count >= complete_line_count {
return Vec::new();
}
let out_slice = &rendered[self.committed_line_count..complete_line_count];
let out = out_slice.to_vec();
self.committed_line_count = complete_line_count;
out
}
/// Finalize the stream: emit all remaining lines beyond the last commit.
/// If the buffer does not end with a newline, a temporary one is appended
/// for rendering. Optionally unwraps ```markdown language fences in
/// non-test builds.
pub fn finalize_and_drain(&mut self) -> Vec<Line<'static>> {
let raw_buffer = self.buffer.clone();
let mut source: String = raw_buffer.clone();
if !source.ends_with('\n') {
source.push('\n');
}
tracing::debug!(
raw_len = raw_buffer.len(),
source_len = source.len(),
"markdown finalize (raw length: {}, rendered length: {})",
raw_buffer.len(),
source.len()
);
tracing::trace!("markdown finalize (raw source):\n---\n{source}\n---");
let mut rendered: Vec<Line<'static>> = Vec::new();
markdown::append_markdown(&source, self.width, &mut rendered);
let out = if self.committed_line_count >= rendered.len() {
Vec::new()
} else {
rendered[self.committed_line_count..].to_vec()
};
// Reset collector state for next stream.
self.clear();
out
}
}
#[cfg(test)]
pub(crate) fn simulate_stream_markdown_for_tests(
deltas: &[&str],
finalize: bool,
) -> Vec<Line<'static>> {
let mut collector = MarkdownStreamCollector::new(None);
let mut out = Vec::new();
for d in deltas {
collector.push_delta(d);
if d.contains('\n') {
out.extend(collector.commit_complete_lines());
}
}
if finalize {
out.extend(collector.finalize_and_drain());
}
out
}
#[cfg(test)]
mod tests {
use super::*;
use ratatui::style::Color;
#[tokio::test]
async fn no_commit_until_newline() {
let mut c = super::MarkdownStreamCollector::new(None);
c.push_delta("Hello, world");
let out = c.commit_complete_lines();
assert!(out.is_empty(), "should not commit without newline");
c.push_delta("!\n");
let out2 = c.commit_complete_lines();
assert_eq!(out2.len(), 1, "one completed line after newline");
}
#[tokio::test]
async fn finalize_commits_partial_line() {
let mut c = super::MarkdownStreamCollector::new(None);
c.push_delta("Line without newline");
let out = c.finalize_and_drain();
assert_eq!(out.len(), 1);
}
#[tokio::test]
async fn e2e_stream_blockquote_simple_is_green() {
let out = super::simulate_stream_markdown_for_tests(&["> Hello\n"], true);
assert_eq!(out.len(), 1);
let l = &out[0];
assert_eq!(
l.style.fg,
Some(Color::Green),
"expected blockquote line fg green, got {:?}",
l.style.fg
);
}
#[tokio::test]
async fn e2e_stream_blockquote_nested_is_green() {
let out = super::simulate_stream_markdown_for_tests(&["> Level 1\n>> Level 2\n"], true);
// Filter out any blank lines that may be inserted at paragraph starts.
let non_blank: Vec<_> = out
.into_iter()
.filter(|l| {
let s = l
.spans
.iter()
.map(|sp| sp.content.clone())
.collect::<Vec<_>>()
.join("");
let t = s.trim();
// Ignore quote-only blank lines like ">" inserted at paragraph boundaries.
!(t.is_empty() || t == ">")
})
.collect();
assert_eq!(non_blank.len(), 2);
assert_eq!(non_blank[0].style.fg, Some(Color::Green));
assert_eq!(non_blank[1].style.fg, Some(Color::Green));
}
#[tokio::test]
async fn e2e_stream_blockquote_with_list_items_is_green() {
let out = super::simulate_stream_markdown_for_tests(&["> - item 1\n> - item 2\n"], true);
assert_eq!(out.len(), 2);
assert_eq!(out[0].style.fg, Some(Color::Green));
assert_eq!(out[1].style.fg, Some(Color::Green));
}
#[tokio::test]
async fn e2e_stream_nested_mixed_lists_ordered_marker_is_light_blue() {
let md = [
"1. First\n",
" - Second level\n",
" 1. Third level (ordered)\n",
" - Fourth level (bullet)\n",
" - Fifth level to test indent consistency\n",
];
let out = super::simulate_stream_markdown_for_tests(&md, true);
// Find the line that contains the third-level ordered text
let find_idx = out.iter().position(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<String>()
.contains("Third level (ordered)")
});
let idx = find_idx.expect("expected third-level ordered line");
let line = &out[idx];
// Expect at least one span on this line to be styled light blue
let has_light_blue = line
.spans
.iter()
.any(|s| s.style.fg == Some(ratatui::style::Color::LightBlue));
assert!(
has_light_blue,
"expected an ordered-list marker span with light blue fg on: {line:?}"
);
}
#[tokio::test]
async fn e2e_stream_blockquote_wrap_preserves_green_style() {
let long = "> This is a very long quoted line that should wrap across multiple columns to verify style preservation.";
let out = super::simulate_stream_markdown_for_tests(&[long, "\n"], true);
// Wrap to a narrow width to force multiple output lines.
let wrapped =
crate::wrapping::word_wrap_lines(out.iter(), crate::wrapping::RtOptions::new(24));
// Filter out purely blank lines
let non_blank: Vec<_> = wrapped
.into_iter()
.filter(|l| {
let s = l
.spans
.iter()
.map(|sp| sp.content.clone())
.collect::<Vec<_>>()
.join("");
!s.trim().is_empty()
})
.collect();
assert!(
non_blank.len() >= 2,
"expected wrapped blockquote to span multiple lines"
);
for (i, l) in non_blank.iter().enumerate() {
assert_eq!(
l.spans[0].style.fg,
Some(Color::Green),
"wrapped line {} should preserve green style, got {:?}",
i,
l.spans[0].style.fg
);
}
}
#[tokio::test]
async fn heading_starts_on_new_line_when_following_paragraph() {
// Stream a paragraph line, then a heading on the next line.
// Expect two distinct rendered lines: "Hello." and "Heading".
let mut c = super::MarkdownStreamCollector::new(None);
c.push_delta("Hello.\n");
let out1 = c.commit_complete_lines();
let s1: Vec<String> = out1
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<Vec<_>>()
.join("")
})
.collect();
assert_eq!(
out1.len(),
1,
"first commit should contain only the paragraph line, got {}: {:?}",
out1.len(),
s1
);
c.push_delta("## Heading\n");
let out2 = c.commit_complete_lines();
let s2: Vec<String> = out2
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<Vec<_>>()
.join("")
})
.collect();
assert_eq!(
s2,
vec!["", "## Heading"],
"expected a blank separator then the heading line"
);
let line_to_string = |l: &ratatui::text::Line<'_>| -> String {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<Vec<_>>()
.join("")
};
assert_eq!(line_to_string(&out1[0]), "Hello.");
assert_eq!(line_to_string(&out2[1]), "## Heading");
}
#[tokio::test]
async fn heading_not_inlined_when_split_across_chunks() {
// Paragraph without trailing newline, then a chunk that starts with the newline
// and the heading text, then a final newline. The collector should first commit
// only the paragraph line, and later commit the heading as its own line.
let mut c = super::MarkdownStreamCollector::new(None);
c.push_delta("Sounds good!");
// No commit yet
assert!(c.commit_complete_lines().is_empty());
// Introduce the newline that completes the paragraph and the start of the heading.
c.push_delta("\n## Adding Bird subcommand");
let out1 = c.commit_complete_lines();
let s1: Vec<String> = out1
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<Vec<_>>()
.join("")
})
.collect();
assert_eq!(
s1,
vec!["Sounds good!"],
"expected paragraph followed by blank separator before heading chunk"
);
// Now finish the heading line with the trailing newline.
c.push_delta("\n");
let out2 = c.commit_complete_lines();
let s2: Vec<String> = out2
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<Vec<_>>()
.join("")
})
.collect();
assert_eq!(
s2,
vec!["", "## Adding Bird subcommand"],
"expected the heading line only on the final commit"
);
// Sanity check raw markdown rendering for a simple line does not produce spurious extras.
let mut rendered: Vec<ratatui::text::Line<'static>> = Vec::new();
crate::markdown::append_markdown("Hello.\n", None, &mut rendered);
let rendered_strings: Vec<String> = rendered
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<Vec<_>>()
.join("")
})
.collect();
assert_eq!(
rendered_strings,
vec!["Hello."],
"unexpected markdown lines: {rendered_strings:?}"
);
}
fn lines_to_plain_strings(lines: &[ratatui::text::Line<'_>]) -> Vec<String> {
lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.clone())
.collect::<Vec<_>>()
.join("")
})
.collect()
}
#[tokio::test]
async fn lists_and_fences_commit_without_duplication() {
// List case
assert_streamed_equals_full(&["- a\n- ", "b\n- c\n"]).await;
// Fenced code case: stream in small chunks
assert_streamed_equals_full(&["```", "\nco", "de 1\ncode 2\n", "```\n"]).await;
}
#[tokio::test]
async fn utf8_boundary_safety_and_wide_chars() {
// Emoji (wide), CJK, control char, digit + combining macron sequences
let input = "🙂🙂🙂\n汉字漢字\nA\u{0003}0\u{0304}\n";
let deltas = vec![
"🙂",
"🙂",
"🙂\n汉",
"字漢",
"字\nA",
"\u{0003}",
"0",
"\u{0304}",
"\n",
];
let streamed = simulate_stream_markdown_for_tests(&deltas, true);
let streamed_str = lines_to_plain_strings(&streamed);
let mut rendered_all: Vec<ratatui::text::Line<'static>> = Vec::new();
crate::markdown::append_markdown(input, None, &mut rendered_all);
let rendered_all_str = lines_to_plain_strings(&rendered_all);
assert_eq!(
streamed_str, rendered_all_str,
"utf8/wide-char streaming should equal full render without duplication or truncation"
);
}
#[tokio::test]
async fn e2e_stream_deep_nested_third_level_marker_is_light_blue() {
let md = "1. First\n - Second level\n 1. Third level (ordered)\n - Fourth level (bullet)\n - Fifth level to test indent consistency\n";
let streamed = super::simulate_stream_markdown_for_tests(&[md], true);
let streamed_strs = lines_to_plain_strings(&streamed);
// Locate the third-level line in the streamed output; avoid relying on exact indent.
let target_suffix = "1. Third level (ordered)";
let mut found = None;
for line in &streamed {
let s: String = line.spans.iter().map(|sp| sp.content.clone()).collect();
if s.contains(target_suffix) {
found = Some(line.clone());
break;
}
}
let line = found.unwrap_or_else(|| {
panic!("expected to find the third-level ordered list line; got: {streamed_strs:?}")
});
// The marker (including indent and "1.") is expected to be in the first span
// and colored LightBlue; following content should be default color.
assert!(
!line.spans.is_empty(),
"expected non-empty spans for the third-level line"
);
let marker_span = &line.spans[0];
assert_eq!(
marker_span.style.fg,
Some(Color::LightBlue),
"expected LightBlue 3rd-level ordered marker, got {:?}",
marker_span.style.fg
);
// Find the first non-empty non-space content span and verify it is default color.
let mut content_fg = None;
for sp in &line.spans[1..] {
let t = sp.content.trim();
if !t.is_empty() {
content_fg = Some(sp.style.fg);
break;
}
}
assert_eq!(
content_fg.flatten(),
None,
"expected default color for 3rd-level content, got {content_fg:?}"
);
}
#[tokio::test]
async fn empty_fenced_block_is_dropped_and_separator_preserved_before_heading() {
// An empty fenced code block followed by a heading should not render the fence,
// but should preserve a blank separator line so the heading starts on a new line.
let deltas = vec!["```bash\n```\n", "## Heading\n"]; // empty block and close in same commit
let streamed = simulate_stream_markdown_for_tests(&deltas, true);
let texts = lines_to_plain_strings(&streamed);
assert!(
texts.iter().all(|s| !s.contains("```")),
"no fence markers expected: {texts:?}"
);
// Expect the heading and no fence markers. A blank separator may or may not be rendered at start.
assert!(
texts.iter().any(|s| s == "## Heading"),
"expected heading line: {texts:?}"
);
}
#[tokio::test]
async fn paragraph_then_empty_fence_then_heading_keeps_heading_on_new_line() {
let deltas = vec!["Para.\n", "```\n```\n", "## Title\n"]; // empty fence block in one commit
let streamed = simulate_stream_markdown_for_tests(&deltas, true);
let texts = lines_to_plain_strings(&streamed);
let para_idx = match texts.iter().position(|s| s == "Para.") {
Some(i) => i,
None => panic!("para present"),
};
let head_idx = match texts.iter().position(|s| s == "## Title") {
Some(i) => i,
None => panic!("heading present"),
};
assert!(
head_idx > para_idx,
"heading should not merge with paragraph: {texts:?}"
);
}
#[tokio::test]
async fn loose_list_with_split_dashes_matches_full_render() {
// Minimized failing sequence discovered by the helper: two chunks
// that still reproduce the mismatch.
let deltas = vec!["- item.\n\n", "-"];
let streamed = simulate_stream_markdown_for_tests(&deltas, true);
let streamed_strs = lines_to_plain_strings(&streamed);
let full: String = deltas.iter().copied().collect();
let mut rendered_all: Vec<ratatui::text::Line<'static>> = Vec::new();
crate::markdown::append_markdown(&full, None, &mut rendered_all);
let rendered_all_strs = lines_to_plain_strings(&rendered_all);
assert_eq!(
streamed_strs, rendered_all_strs,
"streamed output should match full render without dangling '-' lines"
);
}
#[tokio::test]
async fn loose_vs_tight_list_items_streaming_matches_full() {
// Deltas extracted from the session log around 2025-08-27T00:33:18.216Z
let deltas = vec![
"\n\n",
"Loose",
" vs",
".",
" tight",
" list",
" items",
":\n",
"1",
".",
" Tight",
" item",
"\n",
"2",
".",
" Another",
" tight",
" item",
"\n\n",
"1",
".",
" Loose",
" item",
" with",
" its",
" own",
" paragraph",
".\n\n",
" ",
" This",
" paragraph",
" belongs",
" to",
" the",
" same",
" list",
" item",
".\n\n",
"2",
".",
" Second",
" loose",
" item",
" with",
" a",
" nested",
" list",
" after",
" a",
" blank",
" line",
".\n\n",
" ",
" -",
" Nested",
" bullet",
" under",
" a",
" loose",
" item",
"\n",
" ",
" -",
" Another",
" nested",
" bullet",
"\n\n",
];
let streamed = simulate_stream_markdown_for_tests(&deltas, true);
let streamed_strs = lines_to_plain_strings(&streamed);
// Compute a full render for diagnostics only.
let full: String = deltas.iter().copied().collect();
let mut rendered_all: Vec<ratatui::text::Line<'static>> = Vec::new();
crate::markdown::append_markdown(&full, None, &mut rendered_all);
// Also assert exact expected plain strings for clarity.
let expected = vec![
"Loose vs. tight list items:".to_string(),
"".to_string(),
"1. Tight item".to_string(),
"2. Another tight item".to_string(),
"3. Loose item with its own paragraph.".to_string(),
"".to_string(),
" This paragraph belongs to the same list item.".to_string(),
"4. Second loose item with a nested list after a blank line.".to_string(),
" - Nested bullet under a loose item".to_string(),
" - Another nested bullet".to_string(),
];
assert_eq!(
streamed_strs, expected,
"expected exact rendered lines for loose/tight section"
);
}
// Targeted tests derived from fuzz findings. Each asserts streamed == full render.
async fn assert_streamed_equals_full(deltas: &[&str]) {
let streamed = simulate_stream_markdown_for_tests(deltas, true);
let streamed_strs = lines_to_plain_strings(&streamed);
let full: String = deltas.iter().copied().collect();
let mut rendered: Vec<ratatui::text::Line<'static>> = Vec::new();
crate::markdown::append_markdown(&full, None, &mut rendered);
let rendered_strs = lines_to_plain_strings(&rendered);
assert_eq!(streamed_strs, rendered_strs, "full:\n---\n{full}\n---");
}
#[tokio::test]
async fn fuzz_class_bullet_duplication_variant_1() {
assert_streamed_equals_full(&[
"aph.\n- let one\n- bull",
"et two\n\n second paragraph \n",
])
.await;
}
#[tokio::test]
async fn fuzz_class_bullet_duplication_variant_2() {
assert_streamed_equals_full(&[
"- e\n c",
"e\n- bullet two\n\n second paragraph in bullet two\n",
])
.await;
}
#[tokio::test]
async fn streaming_html_block_then_text_matches_full() {
assert_streamed_equals_full(&[
"HTML block:\n",
"<div>inline block</div>\n",
"more stuff\n",
])
.await;
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/additional_dirs.rs | codex-rs/tui2/src/additional_dirs.rs | use codex_core::protocol::SandboxPolicy;
use std::path::PathBuf;
/// Returns a warning describing why `--add-dir` entries will be ignored for the
/// resolved sandbox policy. The caller is responsible for presenting the
/// warning to the user (for example, printing to stderr).
pub fn add_dir_warning_message(
additional_dirs: &[PathBuf],
sandbox_policy: &SandboxPolicy,
) -> Option<String> {
if additional_dirs.is_empty() {
return None;
}
match sandbox_policy {
SandboxPolicy::WorkspaceWrite { .. }
| SandboxPolicy::DangerFullAccess
| SandboxPolicy::ExternalSandbox { .. } => None,
SandboxPolicy::ReadOnly => Some(format_warning(additional_dirs)),
}
}
fn format_warning(additional_dirs: &[PathBuf]) -> String {
let joined_paths = additional_dirs
.iter()
.map(|path| path.to_string_lossy())
.collect::<Vec<_>>()
.join(", ");
format!(
"Ignoring --add-dir ({joined_paths}) because the effective sandbox mode is read-only. Switch to workspace-write or danger-full-access to allow additional writable roots."
)
}
#[cfg(test)]
mod tests {
use super::add_dir_warning_message;
use codex_core::protocol::NetworkAccess;
use codex_core::protocol::SandboxPolicy;
use pretty_assertions::assert_eq;
use std::path::PathBuf;
#[test]
fn returns_none_for_workspace_write() {
let sandbox = SandboxPolicy::new_workspace_write_policy();
let dirs = vec![PathBuf::from("/tmp/example")];
assert_eq!(add_dir_warning_message(&dirs, &sandbox), None);
}
#[test]
fn returns_none_for_danger_full_access() {
let sandbox = SandboxPolicy::DangerFullAccess;
let dirs = vec![PathBuf::from("/tmp/example")];
assert_eq!(add_dir_warning_message(&dirs, &sandbox), None);
}
#[test]
fn returns_none_for_external_sandbox() {
let sandbox = SandboxPolicy::ExternalSandbox {
network_access: NetworkAccess::Enabled,
};
let dirs = vec![PathBuf::from("/tmp/example")];
assert_eq!(add_dir_warning_message(&dirs, &sandbox), None);
}
#[test]
fn warns_for_read_only() {
let sandbox = SandboxPolicy::ReadOnly;
let dirs = vec![PathBuf::from("relative"), PathBuf::from("/abs")];
let message = add_dir_warning_message(&dirs, &sandbox)
.expect("expected warning for read-only sandbox");
assert_eq!(
message,
"Ignoring --add-dir (relative, /abs) because the effective sandbox mode is read-only. Switch to workspace-write or danger-full-access to allow additional writable roots."
);
}
#[test]
fn returns_none_when_no_additional_dirs() {
let sandbox = SandboxPolicy::ReadOnly;
let dirs: Vec<PathBuf> = Vec::new();
assert_eq!(add_dir_warning_message(&dirs, &sandbox), None);
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/session_log.rs | codex-rs/tui2/src/session_log.rs | use std::fs::File;
use std::fs::OpenOptions;
use std::io::Write;
use std::path::PathBuf;
use std::sync::LazyLock;
use std::sync::Mutex;
use std::sync::OnceLock;
use codex_core::config::Config;
use codex_core::protocol::Op;
use serde::Serialize;
use serde_json::json;
use crate::app_event::AppEvent;
static LOGGER: LazyLock<SessionLogger> = LazyLock::new(SessionLogger::new);
struct SessionLogger {
file: OnceLock<Mutex<File>>,
}
impl SessionLogger {
fn new() -> Self {
Self {
file: OnceLock::new(),
}
}
fn open(&self, path: PathBuf) -> std::io::Result<()> {
let mut opts = OpenOptions::new();
opts.create(true).truncate(true).write(true);
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
opts.mode(0o600);
}
let file = opts.open(path)?;
self.file.get_or_init(|| Mutex::new(file));
Ok(())
}
fn write_json_line(&self, value: serde_json::Value) {
let Some(mutex) = self.file.get() else {
return;
};
let mut guard = match mutex.lock() {
Ok(g) => g,
Err(poisoned) => poisoned.into_inner(),
};
match serde_json::to_string(&value) {
Ok(serialized) => {
if let Err(e) = guard.write_all(serialized.as_bytes()) {
tracing::warn!("session log write error: {}", e);
return;
}
if let Err(e) = guard.write_all(b"\n") {
tracing::warn!("session log write error: {}", e);
return;
}
if let Err(e) = guard.flush() {
tracing::warn!("session log flush error: {}", e);
}
}
Err(e) => tracing::warn!("session log serialize error: {}", e),
}
}
fn is_enabled(&self) -> bool {
self.file.get().is_some()
}
}
fn now_ts() -> String {
// RFC3339 for readability; consumers can parse as needed.
chrono::Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Millis, true)
}
pub(crate) fn maybe_init(config: &Config) {
let enabled = std::env::var("CODEX_TUI_RECORD_SESSION")
.map(|v| matches!(v.as_str(), "1" | "true" | "TRUE" | "yes" | "YES"))
.unwrap_or(false);
if !enabled {
return;
}
let path = if let Ok(path) = std::env::var("CODEX_TUI_SESSION_LOG_PATH") {
PathBuf::from(path)
} else {
let mut p = match codex_core::config::log_dir(config) {
Ok(dir) => dir,
Err(_) => std::env::temp_dir(),
};
let filename = format!(
"session-{}.jsonl",
chrono::Utc::now().format("%Y%m%dT%H%M%SZ")
);
p.push(filename);
p
};
if let Err(e) = LOGGER.open(path.clone()) {
tracing::error!("failed to open session log {:?}: {}", path, e);
return;
}
// Write a header record so we can attach context.
let header = json!({
"ts": now_ts(),
"dir": "meta",
"kind": "session_start",
"cwd": config.cwd,
"model": config.model,
"model_provider_id": config.model_provider_id,
"model_provider_name": config.model_provider.name,
});
LOGGER.write_json_line(header);
}
pub(crate) fn log_inbound_app_event(event: &AppEvent) {
// Log only if enabled
if !LOGGER.is_enabled() {
return;
}
match event {
AppEvent::CodexEvent(ev) => {
write_record("to_tui", "codex_event", ev);
}
AppEvent::NewSession => {
let value = json!({
"ts": now_ts(),
"dir": "to_tui",
"kind": "new_session",
});
LOGGER.write_json_line(value);
}
AppEvent::InsertHistoryCell(cell) => {
let value = json!({
"ts": now_ts(),
"dir": "to_tui",
"kind": "insert_history_cell",
"lines": cell.transcript_lines(u16::MAX).len(),
});
LOGGER.write_json_line(value);
}
AppEvent::StartFileSearch(query) => {
let value = json!({
"ts": now_ts(),
"dir": "to_tui",
"kind": "file_search_start",
"query": query,
});
LOGGER.write_json_line(value);
}
AppEvent::FileSearchResult { query, matches } => {
let value = json!({
"ts": now_ts(),
"dir": "to_tui",
"kind": "file_search_result",
"query": query,
"matches": matches.len(),
});
LOGGER.write_json_line(value);
}
// Noise or control flow – record variant only
other => {
let value = json!({
"ts": now_ts(),
"dir": "to_tui",
"kind": "app_event",
"variant": format!("{other:?}").split('(').next().unwrap_or("app_event"),
});
LOGGER.write_json_line(value);
}
}
}
pub(crate) fn log_outbound_op(op: &Op) {
if !LOGGER.is_enabled() {
return;
}
write_record("from_tui", "op", op);
}
pub(crate) fn log_session_end() {
if !LOGGER.is_enabled() {
return;
}
let value = json!({
"ts": now_ts(),
"dir": "meta",
"kind": "session_end",
});
LOGGER.write_json_line(value);
}
fn write_record<T>(dir: &str, kind: &str, obj: &T)
where
T: Serialize,
{
let value = json!({
"ts": now_ts(),
"dir": dir,
"kind": kind,
"payload": obj,
});
LOGGER.write_json_line(value);
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/diff_render.rs | codex-rs/tui2/src/diff_render.rs | use diffy::Hunk;
use ratatui::buffer::Buffer;
use ratatui::layout::Rect;
use ratatui::style::Color;
use ratatui::style::Modifier;
use ratatui::style::Style;
use ratatui::style::Stylize;
use ratatui::text::Line as RtLine;
use ratatui::text::Span as RtSpan;
use ratatui::widgets::Paragraph;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use crate::exec_command::relativize_to_home;
use crate::render::Insets;
use crate::render::line_utils::prefix_lines;
use crate::render::renderable::ColumnRenderable;
use crate::render::renderable::InsetRenderable;
use crate::render::renderable::Renderable;
use codex_core::git_info::get_git_repo_root;
use codex_core::protocol::FileChange;
// Internal representation for diff line rendering
enum DiffLineType {
Insert,
Delete,
Context,
}
pub struct DiffSummary {
changes: HashMap<PathBuf, FileChange>,
cwd: PathBuf,
}
impl DiffSummary {
pub fn new(changes: HashMap<PathBuf, FileChange>, cwd: PathBuf) -> Self {
Self { changes, cwd }
}
}
impl Renderable for FileChange {
fn render(&self, area: Rect, buf: &mut Buffer) {
let mut lines = vec![];
render_change(self, &mut lines, area.width as usize);
Paragraph::new(lines).render(area, buf);
}
fn desired_height(&self, width: u16) -> u16 {
let mut lines = vec![];
render_change(self, &mut lines, width as usize);
lines.len() as u16
}
}
impl From<DiffSummary> for Box<dyn Renderable> {
fn from(val: DiffSummary) -> Self {
let mut rows: Vec<Box<dyn Renderable>> = vec![];
for (i, row) in collect_rows(&val.changes).into_iter().enumerate() {
if i > 0 {
rows.push(Box::new(RtLine::from("")));
}
let mut path = RtLine::from(display_path_for(&row.path, &val.cwd));
path.push_span(" ");
path.extend(render_line_count_summary(row.added, row.removed));
rows.push(Box::new(path));
rows.push(Box::new(RtLine::from("")));
rows.push(Box::new(InsetRenderable::new(
Box::new(row.change) as Box<dyn Renderable>,
Insets::tlbr(0, 2, 0, 0),
)));
}
Box::new(ColumnRenderable::with(rows))
}
}
pub(crate) fn create_diff_summary(
changes: &HashMap<PathBuf, FileChange>,
cwd: &Path,
wrap_cols: usize,
) -> Vec<RtLine<'static>> {
let rows = collect_rows(changes);
render_changes_block(rows, wrap_cols, cwd)
}
// Shared row for per-file presentation
#[derive(Clone)]
struct Row {
#[allow(dead_code)]
path: PathBuf,
move_path: Option<PathBuf>,
added: usize,
removed: usize,
change: FileChange,
}
fn collect_rows(changes: &HashMap<PathBuf, FileChange>) -> Vec<Row> {
let mut rows: Vec<Row> = Vec::new();
for (path, change) in changes.iter() {
let (added, removed) = match change {
FileChange::Add { content } => (content.lines().count(), 0),
FileChange::Delete { content } => (0, content.lines().count()),
FileChange::Update { unified_diff, .. } => calculate_add_remove_from_diff(unified_diff),
};
let move_path = match change {
FileChange::Update {
move_path: Some(new),
..
} => Some(new.clone()),
_ => None,
};
rows.push(Row {
path: path.clone(),
move_path,
added,
removed,
change: change.clone(),
});
}
rows.sort_by_key(|r| r.path.clone());
rows
}
fn render_line_count_summary(added: usize, removed: usize) -> Vec<RtSpan<'static>> {
let mut spans = Vec::new();
spans.push("(".into());
spans.push(format!("+{added}").green());
spans.push(" ".into());
spans.push(format!("-{removed}").red());
spans.push(")".into());
spans
}
fn render_changes_block(rows: Vec<Row>, wrap_cols: usize, cwd: &Path) -> Vec<RtLine<'static>> {
let mut out: Vec<RtLine<'static>> = Vec::new();
let render_path = |row: &Row| -> Vec<RtSpan<'static>> {
let mut spans = Vec::new();
spans.push(display_path_for(&row.path, cwd).into());
if let Some(move_path) = &row.move_path {
spans.push(format!(" → {}", display_path_for(move_path, cwd)).into());
}
spans
};
// Header
let total_added: usize = rows.iter().map(|r| r.added).sum();
let total_removed: usize = rows.iter().map(|r| r.removed).sum();
let file_count = rows.len();
let noun = if file_count == 1 { "file" } else { "files" };
let mut header_spans: Vec<RtSpan<'static>> = vec!["• ".dim()];
if let [row] = &rows[..] {
let verb = match &row.change {
FileChange::Add { .. } => "Added",
FileChange::Delete { .. } => "Deleted",
_ => "Edited",
};
header_spans.push(verb.bold());
header_spans.push(" ".into());
header_spans.extend(render_path(row));
header_spans.push(" ".into());
header_spans.extend(render_line_count_summary(row.added, row.removed));
} else {
header_spans.push("Edited".bold());
header_spans.push(format!(" {file_count} {noun} ").into());
header_spans.extend(render_line_count_summary(total_added, total_removed));
}
out.push(RtLine::from(header_spans));
for (idx, r) in rows.into_iter().enumerate() {
// Insert a blank separator between file chunks (except before the first)
if idx > 0 {
out.push("".into());
}
// File header line (skip when single-file header already shows the name)
let skip_file_header = file_count == 1;
if !skip_file_header {
let mut header: Vec<RtSpan<'static>> = Vec::new();
header.push(" └ ".dim());
header.extend(render_path(&r));
header.push(" ".into());
header.extend(render_line_count_summary(r.added, r.removed));
out.push(RtLine::from(header));
}
let mut lines = vec![];
render_change(&r.change, &mut lines, wrap_cols - 4);
out.extend(prefix_lines(lines, " ".into(), " ".into()));
}
out
}
fn render_change(change: &FileChange, out: &mut Vec<RtLine<'static>>, width: usize) {
match change {
FileChange::Add { content } => {
let line_number_width = line_number_width(content.lines().count());
for (i, raw) in content.lines().enumerate() {
out.extend(push_wrapped_diff_line(
i + 1,
DiffLineType::Insert,
raw,
width,
line_number_width,
));
}
}
FileChange::Delete { content } => {
let line_number_width = line_number_width(content.lines().count());
for (i, raw) in content.lines().enumerate() {
out.extend(push_wrapped_diff_line(
i + 1,
DiffLineType::Delete,
raw,
width,
line_number_width,
));
}
}
FileChange::Update { unified_diff, .. } => {
if let Ok(patch) = diffy::Patch::from_str(unified_diff) {
let mut max_line_number = 0;
for h in patch.hunks() {
let mut old_ln = h.old_range().start();
let mut new_ln = h.new_range().start();
for l in h.lines() {
match l {
diffy::Line::Insert(_) => {
max_line_number = max_line_number.max(new_ln);
new_ln += 1;
}
diffy::Line::Delete(_) => {
max_line_number = max_line_number.max(old_ln);
old_ln += 1;
}
diffy::Line::Context(_) => {
max_line_number = max_line_number.max(new_ln);
old_ln += 1;
new_ln += 1;
}
}
}
}
let line_number_width = line_number_width(max_line_number);
let mut is_first_hunk = true;
for h in patch.hunks() {
if !is_first_hunk {
let spacer = format!("{:width$} ", "", width = line_number_width.max(1));
let spacer_span = RtSpan::styled(spacer, style_gutter());
out.push(RtLine::from(vec![spacer_span, "⋮".dim()]));
}
is_first_hunk = false;
let mut old_ln = h.old_range().start();
let mut new_ln = h.new_range().start();
for l in h.lines() {
match l {
diffy::Line::Insert(text) => {
let s = text.trim_end_matches('\n');
out.extend(push_wrapped_diff_line(
new_ln,
DiffLineType::Insert,
s,
width,
line_number_width,
));
new_ln += 1;
}
diffy::Line::Delete(text) => {
let s = text.trim_end_matches('\n');
out.extend(push_wrapped_diff_line(
old_ln,
DiffLineType::Delete,
s,
width,
line_number_width,
));
old_ln += 1;
}
diffy::Line::Context(text) => {
let s = text.trim_end_matches('\n');
out.extend(push_wrapped_diff_line(
new_ln,
DiffLineType::Context,
s,
width,
line_number_width,
));
old_ln += 1;
new_ln += 1;
}
}
}
}
}
}
}
}
pub(crate) fn display_path_for(path: &Path, cwd: &Path) -> String {
// Prefer a stable, user-local relative path when the file is under the current working
// directory. This keeps output deterministic in jj-only repos (no `.git`) and matches user
// expectations for "files in this project".
if let Some(rel) = pathdiff::diff_paths(path, cwd)
&& !rel
.components()
.any(|c| matches!(c, std::path::Component::ParentDir))
{
return rel.display().to_string();
}
let path_in_same_repo = match (get_git_repo_root(cwd), get_git_repo_root(path)) {
(Some(cwd_repo), Some(path_repo)) => cwd_repo == path_repo,
_ => false,
};
let chosen = if path_in_same_repo {
pathdiff::diff_paths(path, cwd).unwrap_or_else(|| path.to_path_buf())
} else {
relativize_to_home(path)
.map(|p| PathBuf::from_iter([Path::new("~"), p.as_path()]))
.unwrap_or_else(|| path.to_path_buf())
};
chosen.display().to_string()
}
fn calculate_add_remove_from_diff(diff: &str) -> (usize, usize) {
if let Ok(patch) = diffy::Patch::from_str(diff) {
patch
.hunks()
.iter()
.flat_map(Hunk::lines)
.fold((0, 0), |(a, d), l| match l {
diffy::Line::Insert(_) => (a + 1, d),
diffy::Line::Delete(_) => (a, d + 1),
diffy::Line::Context(_) => (a, d),
})
} else {
// For unparsable diffs, return 0 for both counts.
(0, 0)
}
}
fn push_wrapped_diff_line(
line_number: usize,
kind: DiffLineType,
text: &str,
width: usize,
line_number_width: usize,
) -> Vec<RtLine<'static>> {
let ln_str = line_number.to_string();
let mut remaining_text: &str = text;
// Reserve a fixed number of spaces (equal to the widest line number plus a
// trailing spacer) so the sign column stays aligned across the diff block.
let gutter_width = line_number_width.max(1);
let prefix_cols = gutter_width + 1;
let mut first = true;
let (sign_char, line_style) = match kind {
DiffLineType::Insert => ('+', style_add()),
DiffLineType::Delete => ('-', style_del()),
DiffLineType::Context => (' ', style_context()),
};
let mut lines: Vec<RtLine<'static>> = Vec::new();
loop {
// Fit the content for the current terminal row:
// compute how many columns are available after the prefix, then split
// at a UTF-8 character boundary so this row's chunk fits exactly.
let available_content_cols = width.saturating_sub(prefix_cols + 1).max(1);
let split_at_byte_index = remaining_text
.char_indices()
.nth(available_content_cols)
.map(|(i, _)| i)
.unwrap_or_else(|| remaining_text.len());
let (chunk, rest) = remaining_text.split_at(split_at_byte_index);
remaining_text = rest;
if first {
// Build gutter (right-aligned line number plus spacer) as a dimmed span
let gutter = format!("{ln_str:>gutter_width$} ");
// Content with a sign ('+'/'-'/' ') styled per diff kind
let content = format!("{sign_char}{chunk}");
lines.push(RtLine::from(vec![
RtSpan::styled(gutter, style_gutter()),
RtSpan::styled(content, line_style),
]));
first = false;
} else {
// Continuation lines keep a space for the sign column so content aligns
let gutter = format!("{:gutter_width$} ", "");
lines.push(RtLine::from(vec![
RtSpan::styled(gutter, style_gutter()),
RtSpan::styled(chunk.to_string(), line_style),
]));
}
if remaining_text.is_empty() {
break;
}
}
lines
}
fn line_number_width(max_line_number: usize) -> usize {
if max_line_number == 0 {
1
} else {
max_line_number.to_string().len()
}
}
fn style_gutter() -> Style {
Style::default().add_modifier(Modifier::DIM)
}
fn style_context() -> Style {
Style::default()
}
fn style_add() -> Style {
Style::default().fg(Color::Green)
}
fn style_del() -> Style {
Style::default().fg(Color::Red)
}
#[cfg(test)]
mod tests {
use super::*;
use insta::assert_snapshot;
use ratatui::Terminal;
use ratatui::backend::TestBackend;
use ratatui::text::Text;
use ratatui::widgets::Paragraph;
use ratatui::widgets::WidgetRef;
use ratatui::widgets::Wrap;
fn diff_summary_for_tests(changes: &HashMap<PathBuf, FileChange>) -> Vec<RtLine<'static>> {
create_diff_summary(changes, &PathBuf::from("/"), 80)
}
fn snapshot_lines(name: &str, lines: Vec<RtLine<'static>>, width: u16, height: u16) {
let mut terminal = Terminal::new(TestBackend::new(width, height)).expect("terminal");
terminal
.draw(|f| {
Paragraph::new(Text::from(lines))
.wrap(Wrap { trim: false })
.render_ref(f.area(), f.buffer_mut())
})
.expect("draw");
assert_snapshot!(name, terminal.backend());
}
fn snapshot_lines_text(name: &str, lines: &[RtLine<'static>]) {
// Convert Lines to plain text rows and trim trailing spaces so it's
// easier to validate indentation visually in snapshots.
let text = lines
.iter()
.map(|l| {
l.spans
.iter()
.map(|s| s.content.as_ref())
.collect::<String>()
})
.map(|s| s.trim_end().to_string())
.collect::<Vec<_>>()
.join("\n");
assert_snapshot!(name, text);
}
#[test]
fn ui_snapshot_wrap_behavior_insert() {
// Narrow width to force wrapping within our diff line rendering
let long_line = "this is a very long line that should wrap across multiple terminal columns and continue";
// Call the wrapping function directly so we can precisely control the width
let lines =
push_wrapped_diff_line(1, DiffLineType::Insert, long_line, 80, line_number_width(1));
// Render into a small terminal to capture the visual layout
snapshot_lines("wrap_behavior_insert", lines, 90, 8);
}
#[test]
fn ui_snapshot_apply_update_block() {
let mut changes: HashMap<PathBuf, FileChange> = HashMap::new();
let original = "line one\nline two\nline three\n";
let modified = "line one\nline two changed\nline three\n";
let patch = diffy::create_patch(original, modified).to_string();
changes.insert(
PathBuf::from("example.txt"),
FileChange::Update {
unified_diff: patch,
move_path: None,
},
);
let lines = diff_summary_for_tests(&changes);
snapshot_lines("apply_update_block", lines, 80, 12);
}
#[test]
fn ui_snapshot_apply_update_with_rename_block() {
let mut changes: HashMap<PathBuf, FileChange> = HashMap::new();
let original = "A\nB\nC\n";
let modified = "A\nB changed\nC\n";
let patch = diffy::create_patch(original, modified).to_string();
changes.insert(
PathBuf::from("old_name.rs"),
FileChange::Update {
unified_diff: patch,
move_path: Some(PathBuf::from("new_name.rs")),
},
);
let lines = diff_summary_for_tests(&changes);
snapshot_lines("apply_update_with_rename_block", lines, 80, 12);
}
#[test]
fn ui_snapshot_apply_multiple_files_block() {
// Two files: one update and one add, to exercise combined header and per-file rows
let mut changes: HashMap<PathBuf, FileChange> = HashMap::new();
// File a.txt: single-line replacement (one delete, one insert)
let patch_a = diffy::create_patch("one\n", "one changed\n").to_string();
changes.insert(
PathBuf::from("a.txt"),
FileChange::Update {
unified_diff: patch_a,
move_path: None,
},
);
// File b.txt: newly added with one line
changes.insert(
PathBuf::from("b.txt"),
FileChange::Add {
content: "new\n".to_string(),
},
);
let lines = diff_summary_for_tests(&changes);
snapshot_lines("apply_multiple_files_block", lines, 80, 14);
}
#[test]
fn ui_snapshot_apply_add_block() {
let mut changes: HashMap<PathBuf, FileChange> = HashMap::new();
changes.insert(
PathBuf::from("new_file.txt"),
FileChange::Add {
content: "alpha\nbeta\n".to_string(),
},
);
let lines = diff_summary_for_tests(&changes);
snapshot_lines("apply_add_block", lines, 80, 10);
}
#[test]
fn ui_snapshot_apply_delete_block() {
let mut changes: HashMap<PathBuf, FileChange> = HashMap::new();
changes.insert(
PathBuf::from("tmp_delete_example.txt"),
FileChange::Delete {
content: "first\nsecond\nthird\n".to_string(),
},
);
let lines = diff_summary_for_tests(&changes);
snapshot_lines("apply_delete_block", lines, 80, 12);
}
#[test]
fn ui_snapshot_apply_update_block_wraps_long_lines() {
// Create a patch with a long modified line to force wrapping
let original = "line 1\nshort\nline 3\n";
let modified = "line 1\nshort this_is_a_very_long_modified_line_that_should_wrap_across_multiple_terminal_columns_and_continue_even_further_beyond_eighty_columns_to_force_multiple_wraps\nline 3\n";
let patch = diffy::create_patch(original, modified).to_string();
let mut changes: HashMap<PathBuf, FileChange> = HashMap::new();
changes.insert(
PathBuf::from("long_example.txt"),
FileChange::Update {
unified_diff: patch,
move_path: None,
},
);
let lines = create_diff_summary(&changes, &PathBuf::from("/"), 72);
// Render with backend width wider than wrap width to avoid Paragraph auto-wrap.
snapshot_lines("apply_update_block_wraps_long_lines", lines, 80, 12);
}
#[test]
fn ui_snapshot_apply_update_block_wraps_long_lines_text() {
// This mirrors the desired layout example: sign only on first inserted line,
// subsequent wrapped pieces start aligned under the line number gutter.
let original = "1\n2\n3\n4\n";
let modified = "1\nadded long line which wraps and_if_there_is_a_long_token_it_will_be_broken\n3\n4 context line which also wraps across\n";
let patch = diffy::create_patch(original, modified).to_string();
let mut changes: HashMap<PathBuf, FileChange> = HashMap::new();
changes.insert(
PathBuf::from("wrap_demo.txt"),
FileChange::Update {
unified_diff: patch,
move_path: None,
},
);
let lines = create_diff_summary(&changes, &PathBuf::from("/"), 28);
snapshot_lines_text("apply_update_block_wraps_long_lines_text", &lines);
}
#[test]
fn ui_snapshot_apply_update_block_line_numbers_three_digits_text() {
let original = (1..=110).map(|i| format!("line {i}\n")).collect::<String>();
let modified = (1..=110)
.map(|i| {
if i == 100 {
format!("line {i} changed\n")
} else {
format!("line {i}\n")
}
})
.collect::<String>();
let patch = diffy::create_patch(&original, &modified).to_string();
let mut changes: HashMap<PathBuf, FileChange> = HashMap::new();
changes.insert(
PathBuf::from("hundreds.txt"),
FileChange::Update {
unified_diff: patch,
move_path: None,
},
);
let lines = create_diff_summary(&changes, &PathBuf::from("/"), 80);
snapshot_lines_text("apply_update_block_line_numbers_three_digits_text", &lines);
}
#[test]
fn ui_snapshot_apply_update_block_relativizes_path() {
let cwd = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("/"));
let abs_old = cwd.join("abs_old.rs");
let abs_new = cwd.join("abs_new.rs");
let original = "X\nY\n";
let modified = "X changed\nY\n";
let patch = diffy::create_patch(original, modified).to_string();
let mut changes: HashMap<PathBuf, FileChange> = HashMap::new();
changes.insert(
abs_old,
FileChange::Update {
unified_diff: patch,
move_path: Some(abs_new),
},
);
let lines = create_diff_summary(&changes, &cwd, 80);
snapshot_lines("apply_update_block_relativizes_path", lines, 80, 10);
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/ascii_animation.rs | codex-rs/tui2/src/ascii_animation.rs | use std::convert::TryFrom;
use std::time::Duration;
use std::time::Instant;
use rand::Rng as _;
use crate::frames::ALL_VARIANTS;
use crate::frames::FRAME_TICK_DEFAULT;
use crate::tui::FrameRequester;
/// Drives ASCII art animations shared across popups and onboarding widgets.
pub(crate) struct AsciiAnimation {
request_frame: FrameRequester,
variants: &'static [&'static [&'static str]],
variant_idx: usize,
frame_tick: Duration,
start: Instant,
}
impl AsciiAnimation {
pub(crate) fn new(request_frame: FrameRequester) -> Self {
Self::with_variants(request_frame, ALL_VARIANTS, 0)
}
pub(crate) fn with_variants(
request_frame: FrameRequester,
variants: &'static [&'static [&'static str]],
variant_idx: usize,
) -> Self {
assert!(
!variants.is_empty(),
"AsciiAnimation requires at least one animation variant",
);
let clamped_idx = variant_idx.min(variants.len() - 1);
Self {
request_frame,
variants,
variant_idx: clamped_idx,
frame_tick: FRAME_TICK_DEFAULT,
start: Instant::now(),
}
}
pub(crate) fn schedule_next_frame(&self) {
let tick_ms = self.frame_tick.as_millis();
if tick_ms == 0 {
self.request_frame.schedule_frame();
return;
}
let elapsed_ms = self.start.elapsed().as_millis();
let rem_ms = elapsed_ms % tick_ms;
let delay_ms = if rem_ms == 0 {
tick_ms
} else {
tick_ms - rem_ms
};
if let Ok(delay_ms_u64) = u64::try_from(delay_ms) {
self.request_frame
.schedule_frame_in(Duration::from_millis(delay_ms_u64));
} else {
self.request_frame.schedule_frame();
}
}
pub(crate) fn current_frame(&self) -> &'static str {
let frames = self.frames();
if frames.is_empty() {
return "";
}
let tick_ms = self.frame_tick.as_millis();
if tick_ms == 0 {
return frames[0];
}
let elapsed_ms = self.start.elapsed().as_millis();
let idx = ((elapsed_ms / tick_ms) % frames.len() as u128) as usize;
frames[idx]
}
pub(crate) fn pick_random_variant(&mut self) -> bool {
if self.variants.len() <= 1 {
return false;
}
let mut rng = rand::rng();
let mut next = self.variant_idx;
while next == self.variant_idx {
next = rng.random_range(0..self.variants.len());
}
self.variant_idx = next;
self.request_frame.schedule_frame();
true
}
#[allow(dead_code)]
pub(crate) fn request_frame(&self) {
self.request_frame.schedule_frame();
}
fn frames(&self) -> &'static [&'static str] {
self.variants[self.variant_idx]
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn frame_tick_must_be_nonzero() {
assert!(FRAME_TICK_DEFAULT.as_millis() > 0);
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/clipboard_paste.rs | codex-rs/tui2/src/clipboard_paste.rs | use std::path::Path;
use std::path::PathBuf;
use tempfile::Builder;
#[derive(Debug, Clone)]
pub enum PasteImageError {
ClipboardUnavailable(String),
NoImage(String),
EncodeFailed(String),
IoError(String),
}
impl std::fmt::Display for PasteImageError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
PasteImageError::ClipboardUnavailable(msg) => write!(f, "clipboard unavailable: {msg}"),
PasteImageError::NoImage(msg) => write!(f, "no image on clipboard: {msg}"),
PasteImageError::EncodeFailed(msg) => write!(f, "could not encode image: {msg}"),
PasteImageError::IoError(msg) => write!(f, "io error: {msg}"),
}
}
}
impl std::error::Error for PasteImageError {}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum EncodedImageFormat {
Png,
Jpeg,
Other,
}
impl EncodedImageFormat {
pub fn label(self) -> &'static str {
match self {
EncodedImageFormat::Png => "PNG",
EncodedImageFormat::Jpeg => "JPEG",
EncodedImageFormat::Other => "IMG",
}
}
}
#[derive(Debug, Clone)]
pub struct PastedImageInfo {
pub width: u32,
pub height: u32,
pub encoded_format: EncodedImageFormat, // Always PNG for now.
}
/// Capture image from system clipboard, encode to PNG, and return bytes + info.
#[cfg(not(target_os = "android"))]
pub fn paste_image_as_png() -> Result<(Vec<u8>, PastedImageInfo), PasteImageError> {
let _span = tracing::debug_span!("paste_image_as_png").entered();
tracing::debug!("attempting clipboard image read");
let mut cb = arboard::Clipboard::new()
.map_err(|e| PasteImageError::ClipboardUnavailable(e.to_string()))?;
// Sometimes images on the clipboard come as files (e.g. when copy/pasting from
// Finder), sometimes they come as image data (e.g. when pasting from Chrome).
// Accept both, and prefer files if both are present.
let files = cb
.get()
.file_list()
.map_err(|e| PasteImageError::ClipboardUnavailable(e.to_string()));
let dyn_img = if let Some(img) = files
.unwrap_or_default()
.into_iter()
.find_map(|f| image::open(f).ok())
{
tracing::debug!(
"clipboard image opened from file: {}x{}",
img.width(),
img.height()
);
img
} else {
let _span = tracing::debug_span!("get_image").entered();
let img = cb
.get_image()
.map_err(|e| PasteImageError::NoImage(e.to_string()))?;
let w = img.width as u32;
let h = img.height as u32;
tracing::debug!("clipboard image opened from image: {}x{}", w, h);
let Some(rgba_img) = image::RgbaImage::from_raw(w, h, img.bytes.into_owned()) else {
return Err(PasteImageError::EncodeFailed("invalid RGBA buffer".into()));
};
image::DynamicImage::ImageRgba8(rgba_img)
};
let mut png: Vec<u8> = Vec::new();
{
let span =
tracing::debug_span!("encode_image", byte_length = tracing::field::Empty).entered();
let mut cursor = std::io::Cursor::new(&mut png);
dyn_img
.write_to(&mut cursor, image::ImageFormat::Png)
.map_err(|e| PasteImageError::EncodeFailed(e.to_string()))?;
span.record("byte_length", png.len());
}
Ok((
png,
PastedImageInfo {
width: dyn_img.width(),
height: dyn_img.height(),
encoded_format: EncodedImageFormat::Png,
},
))
}
/// Android/Termux does not support arboard; return a clear error.
#[cfg(target_os = "android")]
pub fn paste_image_as_png() -> Result<(Vec<u8>, PastedImageInfo), PasteImageError> {
Err(PasteImageError::ClipboardUnavailable(
"clipboard image paste is unsupported on Android".into(),
))
}
/// Convenience: write to a temp file and return its path + info.
#[cfg(not(target_os = "android"))]
pub fn paste_image_to_temp_png() -> Result<(PathBuf, PastedImageInfo), PasteImageError> {
// First attempt: read image from system clipboard via arboard (native paths or image data).
match paste_image_as_png() {
Ok((png, info)) => {
// Create a unique temporary file with a .png suffix to avoid collisions.
let tmp = Builder::new()
.prefix("codex-clipboard-")
.suffix(".png")
.tempfile()
.map_err(|e| PasteImageError::IoError(e.to_string()))?;
std::fs::write(tmp.path(), &png)
.map_err(|e| PasteImageError::IoError(e.to_string()))?;
// Persist the file (so it remains after the handle is dropped) and return its PathBuf.
let (_file, path) = tmp
.keep()
.map_err(|e| PasteImageError::IoError(e.error.to_string()))?;
Ok((path, info))
}
Err(e) => {
#[cfg(target_os = "linux")]
{
try_wsl_clipboard_fallback(&e).or(Err(e))
}
#[cfg(not(target_os = "linux"))]
{
Err(e)
}
}
}
}
/// Attempt WSL fallback for clipboard image paste.
///
/// If clipboard is unavailable (common under WSL because arboard cannot access
/// the Windows clipboard), attempt a WSL fallback that calls PowerShell on the
/// Windows side to write the clipboard image to a temporary file, then return
/// the corresponding WSL path.
#[cfg(target_os = "linux")]
fn try_wsl_clipboard_fallback(
error: &PasteImageError,
) -> Result<(PathBuf, PastedImageInfo), PasteImageError> {
use PasteImageError::ClipboardUnavailable;
use PasteImageError::NoImage;
if !is_probably_wsl() || !matches!(error, ClipboardUnavailable(_) | NoImage(_)) {
return Err(error.clone());
}
tracing::debug!("attempting Windows PowerShell clipboard fallback");
let Some(win_path) = try_dump_windows_clipboard_image() else {
return Err(error.clone());
};
tracing::debug!("powershell produced path: {}", win_path);
let Some(mapped_path) = convert_windows_path_to_wsl(&win_path) else {
return Err(error.clone());
};
let Ok((w, h)) = image::image_dimensions(&mapped_path) else {
return Err(error.clone());
};
// Return the mapped path directly without copying.
// The file will be read and base64-encoded during serialization.
Ok((
mapped_path,
PastedImageInfo {
width: w,
height: h,
encoded_format: EncodedImageFormat::Png,
},
))
}
/// Try to call a Windows PowerShell command (several common names) to save the
/// clipboard image to a temporary PNG and return the Windows path to that file.
/// Returns None if no command succeeded or no image was present.
#[cfg(target_os = "linux")]
fn try_dump_windows_clipboard_image() -> Option<String> {
// Powershell script: save image from clipboard to a temp png and print the path.
// Force UTF-8 output to avoid encoding issues between powershell.exe (UTF-16LE default)
// and pwsh (UTF-8 default).
let script = r#"[Console]::OutputEncoding = [System.Text.Encoding]::UTF8; $img = Get-Clipboard -Format Image; if ($img -ne $null) { $p=[System.IO.Path]::GetTempFileName(); $p = [System.IO.Path]::ChangeExtension($p,'png'); $img.Save($p,[System.Drawing.Imaging.ImageFormat]::Png); Write-Output $p } else { exit 1 }"#;
for cmd in ["powershell.exe", "pwsh", "powershell"] {
match std::process::Command::new(cmd)
.args(["-NoProfile", "-Command", script])
.output()
{
// Executing PowerShell command
Ok(output) => {
if output.status.success() {
// Decode as UTF-8 (forced by the script above).
let win_path = String::from_utf8_lossy(&output.stdout).trim().to_string();
if !win_path.is_empty() {
tracing::debug!("{} saved clipboard image to {}", cmd, win_path);
return Some(win_path);
}
} else {
tracing::debug!("{} returned non-zero status", cmd);
}
}
Err(err) => {
tracing::debug!("{} not executable: {}", cmd, err);
}
}
}
None
}
#[cfg(target_os = "android")]
pub fn paste_image_to_temp_png() -> Result<(PathBuf, PastedImageInfo), PasteImageError> {
// Keep error consistent with paste_image_as_png.
Err(PasteImageError::ClipboardUnavailable(
"clipboard image paste is unsupported on Android".into(),
))
}
/// Normalize pasted text that may represent a filesystem path.
///
/// Supports:
/// - `file://` URLs (converted to local paths)
/// - Windows/UNC paths
/// - shell-escaped single paths (via `shlex`)
pub fn normalize_pasted_path(pasted: &str) -> Option<PathBuf> {
let pasted = pasted.trim();
// file:// URL → filesystem path
if let Ok(url) = url::Url::parse(pasted)
&& url.scheme() == "file"
{
return url.to_file_path().ok();
}
// TODO: We'll improve the implementation/unit tests over time, as appropriate.
// Possibly use typed-path: https://github.com/openai/codex/pull/2567/commits/3cc92b78e0a1f94e857cf4674d3a9db918ed352e
//
// Detect unquoted Windows paths and bypass POSIX shlex which
// treats backslashes as escapes (e.g., C:\Users\Alice\file.png).
// Also handles UNC paths (\\server\share\path).
let looks_like_windows_path = {
// Drive letter path: C:\ or C:/
let drive = pasted
.chars()
.next()
.map(|c| c.is_ascii_alphabetic())
.unwrap_or(false)
&& pasted.get(1..2) == Some(":")
&& pasted
.get(2..3)
.map(|s| s == "\\" || s == "/")
.unwrap_or(false);
// UNC path: \\server\share
let unc = pasted.starts_with("\\\\");
drive || unc
};
if looks_like_windows_path {
#[cfg(target_os = "linux")]
{
if is_probably_wsl()
&& let Some(converted) = convert_windows_path_to_wsl(pasted)
{
return Some(converted);
}
}
return Some(PathBuf::from(pasted));
}
// shell-escaped single path → unescaped
let parts: Vec<String> = shlex::Shlex::new(pasted).collect();
if parts.len() == 1 {
return parts.into_iter().next().map(PathBuf::from);
}
None
}
#[cfg(target_os = "linux")]
pub(crate) fn is_probably_wsl() -> bool {
// Primary: Check /proc/version for "microsoft" or "WSL" (most reliable for standard WSL).
if let Ok(version) = std::fs::read_to_string("/proc/version") {
let version_lower = version.to_lowercase();
if version_lower.contains("microsoft") || version_lower.contains("wsl") {
return true;
}
}
// Fallback: Check WSL environment variables. This handles edge cases like
// custom Linux kernels installed in WSL where /proc/version may not contain
// "microsoft" or "WSL".
std::env::var_os("WSL_DISTRO_NAME").is_some() || std::env::var_os("WSL_INTEROP").is_some()
}
#[cfg(target_os = "linux")]
fn convert_windows_path_to_wsl(input: &str) -> Option<PathBuf> {
if input.starts_with("\\\\") {
return None;
}
let drive_letter = input.chars().next()?.to_ascii_lowercase();
if !drive_letter.is_ascii_lowercase() {
return None;
}
if input.get(1..2) != Some(":") {
return None;
}
let mut result = PathBuf::from(format!("/mnt/{drive_letter}"));
for component in input
.get(2..)?
.trim_start_matches(['\\', '/'])
.split(['\\', '/'])
.filter(|component| !component.is_empty())
{
result.push(component);
}
Some(result)
}
/// Infer an image format for the provided path based on its extension.
pub fn pasted_image_format(path: &Path) -> EncodedImageFormat {
match path
.extension()
.and_then(|e| e.to_str())
.map(str::to_ascii_lowercase)
.as_deref()
{
Some("png") => EncodedImageFormat::Png,
Some("jpg") | Some("jpeg") => EncodedImageFormat::Jpeg,
_ => EncodedImageFormat::Other,
}
}
#[cfg(test)]
mod pasted_paths_tests {
use super::*;
#[cfg(not(windows))]
#[test]
fn normalize_file_url() {
let input = "file:///tmp/example.png";
let result = normalize_pasted_path(input).expect("should parse file URL");
assert_eq!(result, PathBuf::from("/tmp/example.png"));
}
#[test]
fn normalize_file_url_windows() {
let input = r"C:\Temp\example.png";
let result = normalize_pasted_path(input).expect("should parse file URL");
#[cfg(target_os = "linux")]
let expected = if is_probably_wsl()
&& let Some(converted) = convert_windows_path_to_wsl(input)
{
converted
} else {
PathBuf::from(r"C:\Temp\example.png")
};
#[cfg(not(target_os = "linux"))]
let expected = PathBuf::from(r"C:\Temp\example.png");
assert_eq!(result, expected);
}
#[test]
fn normalize_shell_escaped_single_path() {
let input = "/home/user/My\\ File.png";
let result = normalize_pasted_path(input).expect("should unescape shell-escaped path");
assert_eq!(result, PathBuf::from("/home/user/My File.png"));
}
#[test]
fn normalize_simple_quoted_path_fallback() {
let input = "\"/home/user/My File.png\"";
let result = normalize_pasted_path(input).expect("should trim simple quotes");
assert_eq!(result, PathBuf::from("/home/user/My File.png"));
}
#[test]
fn normalize_single_quoted_unix_path() {
let input = "'/home/user/My File.png'";
let result = normalize_pasted_path(input).expect("should trim single quotes via shlex");
assert_eq!(result, PathBuf::from("/home/user/My File.png"));
}
#[test]
fn normalize_multiple_tokens_returns_none() {
// Two tokens after shell splitting → not a single path
let input = "/home/user/a\\ b.png /home/user/c.png";
let result = normalize_pasted_path(input);
assert!(result.is_none());
}
#[test]
fn pasted_image_format_png_jpeg_unknown() {
assert_eq!(
pasted_image_format(Path::new("/a/b/c.PNG")),
EncodedImageFormat::Png
);
assert_eq!(
pasted_image_format(Path::new("/a/b/c.jpg")),
EncodedImageFormat::Jpeg
);
assert_eq!(
pasted_image_format(Path::new("/a/b/c.JPEG")),
EncodedImageFormat::Jpeg
);
assert_eq!(
pasted_image_format(Path::new("/a/b/c")),
EncodedImageFormat::Other
);
assert_eq!(
pasted_image_format(Path::new("/a/b/c.webp")),
EncodedImageFormat::Other
);
}
#[test]
fn normalize_single_quoted_windows_path() {
let input = r"'C:\\Users\\Alice\\My File.jpeg'";
let result =
normalize_pasted_path(input).expect("should trim single quotes on windows path");
assert_eq!(result, PathBuf::from(r"C:\\Users\\Alice\\My File.jpeg"));
}
#[test]
fn normalize_unquoted_windows_path_with_spaces() {
let input = r"C:\\Users\\Alice\\My Pictures\\example image.png";
let result = normalize_pasted_path(input).expect("should accept unquoted windows path");
#[cfg(target_os = "linux")]
let expected = if is_probably_wsl()
&& let Some(converted) = convert_windows_path_to_wsl(input)
{
converted
} else {
PathBuf::from(r"C:\\Users\\Alice\\My Pictures\\example image.png")
};
#[cfg(not(target_os = "linux"))]
let expected = PathBuf::from(r"C:\\Users\\Alice\\My Pictures\\example image.png");
assert_eq!(result, expected);
}
#[test]
fn normalize_unc_windows_path() {
let input = r"\\\\server\\share\\folder\\file.jpg";
let result = normalize_pasted_path(input).expect("should accept UNC windows path");
assert_eq!(
result,
PathBuf::from(r"\\\\server\\share\\folder\\file.jpg")
);
}
#[test]
fn pasted_image_format_with_windows_style_paths() {
assert_eq!(
pasted_image_format(Path::new(r"C:\\a\\b\\c.PNG")),
EncodedImageFormat::Png
);
assert_eq!(
pasted_image_format(Path::new(r"C:\\a\\b\\c.jpeg")),
EncodedImageFormat::Jpeg
);
assert_eq!(
pasted_image_format(Path::new(r"C:\\a\\b\\noext")),
EncodedImageFormat::Other
);
}
#[cfg(target_os = "linux")]
#[test]
fn normalize_windows_path_in_wsl() {
// This test only runs on actual WSL systems
if !is_probably_wsl() {
// Skip test if not on WSL
return;
}
let input = r"C:\\Users\\Alice\\Pictures\\example image.png";
let result = normalize_pasted_path(input).expect("should convert windows path on wsl");
assert_eq!(
result,
PathBuf::from("/mnt/c/Users/Alice/Pictures/example image.png")
);
}
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/main.rs | codex-rs/tui2/src/main.rs | use clap::Parser;
use codex_arg0::arg0_dispatch_or_else;
use codex_common::CliConfigOverrides;
use codex_tui2::Cli;
use codex_tui2::run_main;
#[derive(Parser, Debug)]
struct TopCli {
#[clap(flatten)]
config_overrides: CliConfigOverrides,
#[clap(flatten)]
inner: Cli,
}
fn main() -> anyhow::Result<()> {
arg0_dispatch_or_else(|codex_linux_sandbox_exe| async move {
let top_cli = TopCli::parse();
let mut inner = top_cli.inner;
inner
.config_overrides
.raw_overrides
.splice(0..0, top_cli.config_overrides.raw_overrides);
let exit_info = run_main(inner, codex_linux_sandbox_exe).await?;
let token_usage = exit_info.token_usage;
if !token_usage.is_zero() {
println!("{}", codex_core::protocol::FinalOutput::from(token_usage),);
}
Ok(())
})
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/transcript_view_cache.rs | codex-rs/tui2/src/transcript_view_cache.rs | //! Caches for transcript rendering in `codex-tui2`.
//!
//! The inline transcript view is drawn every frame. Two parts of that draw can
//! be expensive in steady state:
//!
//! - Building the *wrapped transcript* (`HistoryCell` → flattened `Line`s +
//! per-line metadata). This work is needed for rendering and for scroll math.
//! - Rendering each visible `Line` into the frame buffer. Ratatui's rendering
//! path performs grapheme segmentation and width/layout work; repeatedly
//! rerendering the same visible lines can dominate CPU during streaming.
//!
//! This module provides a pair of caches:
//!
//! - [`WrappedTranscriptCache`] memoizes the wrapped transcript for a given
//! terminal width and supports incremental append when new history cells are
//! added.
//! - [`TranscriptRasterCache`] memoizes the *rasterized* representation of
//! individual wrapped lines (a single terminal row of `Cell`s) so redraws can
//! cheaply copy already-rendered cells instead of re-running grapheme
//! segmentation for every frame.
//!
//! Notes:
//! - All caches are invalidated on width changes because wrapping and layout
//! depend on the viewport width.
//! - Rasterization is cached for base transcript content only; selection
//! highlight and copy affordances are applied after the rows are drawn, so
//! they do not pollute the cache.
//!
//! ## Algorithm overview
//!
//! At a high level, transcript rendering is a two-stage pipeline:
//!
//! 1. **Build wrapped transcript lines**: flatten the logical `HistoryCell` list into a single
//! vector of visual [`Line`]s and a parallel `meta` vector (`TranscriptLineMeta`) that maps each
//! visual line back to `(cell_index, line_in_cell)` or `Spacer`.
//! 2. **Render visible lines into the frame buffer**: draw the subset of wrapped lines that are
//! currently visible in the viewport.
//!
//! The cache mirrors that pipeline:
//!
//! - [`WrappedTranscriptCache`] memoizes stage (1) for the current `width` and supports incremental
//! append when new cells are pushed during streaming.
//! - [`TranscriptRasterCache`] memoizes stage (2) per line by caching the final rendered row
//! (`Vec<Cell>`) for a given `(line_index, is_user_row)` at the current `width`.
//!
//! ### Per draw tick
//!
//! Callers typically do the following during a draw tick:
//!
//! 1. Call [`TranscriptViewCache::ensure_wrapped`] with the current `cells` and viewport `width`.
//! This may append new cells or rebuild from scratch (on width change/truncation/replacement).
//! 2. Use [`TranscriptViewCache::lines`] and [`TranscriptViewCache::line_meta`] for scroll math and
//! to resolve the visible `line_index` range.
//! 3. Configure row caching via [`TranscriptViewCache::set_raster_capacity`] (usually a few
//! viewports worth).
//! 4. For each visible `line_index`, call [`TranscriptViewCache::render_row_index_into`] to draw a
//! single terminal row.
//!
//! ### Rasterization details
//!
//! `render_row_index_into` delegates to `TranscriptRasterCache::render_row_into`:
//!
//! - On a **cache hit**, it copies cached cells into the destination buffer (no grapheme
//! segmentation, no span layout).
//! - On a **cache miss**, it renders the wrapped [`Line`] into a scratch `Buffer` with height 1,
//! copies out the resulting cells, inserts them into the cache, and then copies them into the
//! destination buffer.
//!
//! Cached rows are invalidated when:
//! - the wrapped transcript is rebuilt (line indices shift)
//! - the width changes (layout changes)
//!
//! The raster cache is bounded by `capacity` using an approximate LRU so it does not grow without
//! bound during long sessions.
use crate::history_cell::HistoryCell;
use crate::history_cell::UserHistoryCell;
use crate::transcript_render::TranscriptLines;
use crate::tui::scrolling::TranscriptLineMeta;
use ratatui::buffer::Buffer;
use ratatui::prelude::Rect;
use ratatui::text::Line;
use ratatui::widgets::WidgetRef;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::sync::Arc;
/// Top-level cache for the inline transcript viewport.
///
/// This combines two caches that are used together during a draw tick:
///
/// - [`WrappedTranscriptCache`] produces the flattened wrapped transcript lines and metadata used
/// for rendering, scrolling, and selection/copy mapping.
/// - [`TranscriptRasterCache`] caches the expensive conversion from a wrapped [`Line`] into a row
/// of terminal [`ratatui::buffer::Cell`]s so repeated redraws can copy cells instead of redoing
/// grapheme segmentation.
///
/// The caches are intentionally coupled:
/// - width changes invalidate both layers
/// - wrapped transcript rebuilds invalidate the raster cache because line indices shift
pub(crate) struct TranscriptViewCache {
/// Memoized flattened wrapped transcript content for the current width.
wrapped: WrappedTranscriptCache,
/// Per-line row rasterization cache for the current width.
raster: TranscriptRasterCache,
}
impl TranscriptViewCache {
/// Create an empty transcript view cache.
pub(crate) fn new() -> Self {
Self {
wrapped: WrappedTranscriptCache::new(),
raster: TranscriptRasterCache::new(),
}
}
/// Ensure the wrapped transcript cache is up to date for `cells` at `width`.
///
/// This is the shared entrypoint for the transcript renderer and scroll math. It ensures the
/// cache reflects the current transcript and viewport width while preserving scroll/copy
/// invariants (`lines`, `meta`, and `joiner_before` remain aligned).
///
/// Rebuild conditions:
/// - `width` changes (wrapping/layout is width-dependent)
/// - the transcript is truncated (fewer `cells` than last time), which means the previously
/// cached suffix may refer to cells that no longer exist and the cached `(cell_index,
/// line_in_cell)` mapping is no longer valid. In `tui2` today, this happens when the user
/// backtracks/forks a conversation: `app_backtrack` trims `App::transcript_cells` to preserve
/// only content up to the selected user message.
/// - the transcript is replaced (detected by a change in the first cell pointer), which
/// commonly happens when history is rotated/dropped from the front while keeping a similar
/// length (e.g. to cap history size) or when switching to a different transcript. We don't
/// currently replace the transcript list in the main render loop, but we keep this guard so
/// future history-capping or transcript-reload features can't accidentally treat a shifted
/// list as an append. In that case, treating the new list as an append would misattribute
/// line origins and break scroll anchors and selection/copy mapping.
///
/// The raster cache is invalidated whenever the wrapped transcript is rebuilt or the width no
/// longer matches.
pub(crate) fn ensure_wrapped(&mut self, cells: &[Arc<dyn HistoryCell>], width: u16) {
let update = self.wrapped.ensure(cells, width);
if update == WrappedTranscriptUpdate::Rebuilt {
self.raster.width = width;
self.raster.clear();
} else if width != self.raster.width {
// Keep the invariant that raster cache always matches the active wrapped width.
self.raster.clear();
self.raster.width = width;
}
}
/// Return the cached flattened wrapped transcript lines.
///
/// This is primarily used for:
/// - computing `total_lines` for scroll/viewport logic
/// - any code that needs a read-only view of the current flattened transcript
///
/// Callers should generally avoid iterating these lines to render them in the draw hot path;
/// use [`Self::render_row_index_into`] so redraws can take advantage of the raster cache.
pub(crate) fn lines(&self) -> &[Line<'static>] {
&self.wrapped.transcript.lines
}
/// Return per-line origin metadata aligned with [`Self::lines`].
///
/// This mapping is what makes scroll/selection stable as the transcript grows and reflows:
/// each visible line index can be mapped back to the originating `(cell_index, line_in_cell)`
/// pair (or to a `Spacer` row).
///
/// Typical uses:
/// - scroll anchoring (`TranscriptScroll` resolves/anchors using this metadata)
/// - determining whether a visible row is a user-authored row (`cell_index → is_user_cell`)
pub(crate) fn line_meta(&self) -> &[TranscriptLineMeta] {
&self.wrapped.transcript.meta
}
/// Configure the per-line raster cache capacity.
///
/// When `capacity == 0`, raster caching is disabled and rows are rendered directly into the
/// destination buffer (but wrapped transcript caching still applies).
pub(crate) fn set_raster_capacity(&mut self, capacity: usize) {
self.raster.set_capacity(capacity);
}
/// Whether a flattened transcript line belongs to a user-authored history cell.
///
/// User rows apply a row-wide base style (background). This is a property of the originating
/// cell, not of the line content, so it is derived from the cached `line_meta` mapping.
pub(crate) fn is_user_row(&self, line_index: usize) -> bool {
let Some(cell_index) = self
.wrapped
.transcript
.meta
.get(line_index)
.and_then(TranscriptLineMeta::cell_index)
else {
return false;
};
self.wrapped
.is_user_cell
.get(cell_index)
.copied()
.unwrap_or(false)
}
/// Render a single cached line index into the destination `buf`.
///
/// This is the draw hot-path helper: it looks up the wrapped `Line` for `line_index`, applies
/// user-row styling if needed, and then either rasterizes the line or copies cached cells into
/// place.
///
/// Callers are expected to have already ensured the cache via [`Self::ensure_wrapped`].
pub(crate) fn render_row_index_into(
&mut self,
line_index: usize,
row_area: Rect,
buf: &mut Buffer,
) {
let is_user_row = self.is_user_row(line_index);
let line = &self.wrapped.transcript.lines[line_index];
self.raster
.render_row_into(line_index, is_user_row, line, row_area, buf);
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum WrappedTranscriptUpdate {
/// The cache already represented the provided `cells` and `width`.
Unchanged,
/// The cache appended additional cells without rebuilding.
Appended,
/// The cache rebuilt from scratch (width change, truncation, or replacement).
Rebuilt,
}
/// Incremental memoization of wrapped transcript lines for a given width.
///
/// This cache exists so callers doing tight-loop scroll math (mouse wheel, PgUp/PgDn) and render
/// ticks do not repeatedly rebuild the wrapped transcript (`HistoryCell` → flattened `Line`s).
///
/// It assumes the transcript is append-mostly: when new cells arrive, they are appended to the end
/// of `cells` and existing cells do not mutate. If the underlying cell list is replaced or
/// truncated, the cache rebuilds from scratch.
struct WrappedTranscriptCache {
/// Width this cache was last built for.
width: u16,
/// Number of leading cells already incorporated into [`Self::transcript`].
cell_count: usize,
/// Pointer identity of the first cell at the time the cache was built.
///
/// This is a cheap replacement/truncation detector: if the caller swaps the transcript list
/// (for example, drops old cells from the front to cap history length), the length may remain
/// the same while the content shifts. In that case, we must rebuild because `(cell_index,
/// line_in_cell)` mappings and scroll anchors would otherwise become inconsistent.
first_cell_ptr: Option<*const dyn HistoryCell>,
/// Cached flattened wrapped transcript output.
///
/// Invariant: `lines.len() == meta.len() == joiner_before.len()`.
transcript: TranscriptLines,
/// Whether the flattened transcript has emitted at least one non-spacer line.
///
/// This is used to decide whether to insert a spacer line between non-continuation cells.
has_emitted_lines: bool,
/// Per-cell marker indicating whether a logical cell is a [`UserHistoryCell`].
///
/// We store this alongside the wrapped transcript so user-row styling can be derived cheaply
/// from `TranscriptLineMeta::cell_index()` without re-inspecting the cell type every frame.
is_user_cell: Vec<bool>,
}
impl WrappedTranscriptCache {
/// Create an empty wrapped transcript cache.
///
/// The cache is inert until the first [`Self::ensure`] call; until then it contains no
/// rendered transcript state.
fn new() -> Self {
Self {
width: 0,
cell_count: 0,
first_cell_ptr: None,
transcript: TranscriptLines {
lines: Vec::new(),
meta: Vec::new(),
joiner_before: Vec::new(),
},
has_emitted_lines: false,
is_user_cell: Vec::new(),
}
}
/// Ensure the wrapped transcript represents `cells` at `width`.
///
/// This cache is intentionally single-entry and width-scoped:
/// - when `width` is unchanged and `cells` has grown, append only the new cells
/// - when `width` changes or the transcript is replaced/truncated, rebuild from scratch
///
/// The cache assumes history cells are append-only and immutable once inserted. If existing
/// cell contents can change without changing identity, callers must treat that as a rebuild.
fn ensure(&mut self, cells: &[Arc<dyn HistoryCell>], width: u16) -> WrappedTranscriptUpdate {
if width == 0 {
self.width = width;
self.cell_count = cells.len();
self.first_cell_ptr = cells.first().map(Arc::as_ptr);
self.transcript.lines.clear();
self.transcript.meta.clear();
self.transcript.joiner_before.clear();
self.has_emitted_lines = false;
self.is_user_cell.clear();
return WrappedTranscriptUpdate::Rebuilt;
}
let current_first_ptr = cells.first().map(Arc::as_ptr);
if self.width != width
|| self.cell_count > cells.len()
|| (self.cell_count > 0
&& current_first_ptr.is_some()
&& self.first_cell_ptr != current_first_ptr)
{
self.rebuild(cells, width);
return WrappedTranscriptUpdate::Rebuilt;
}
if self.cell_count == cells.len() {
return WrappedTranscriptUpdate::Unchanged;
}
let old_cell_count = self.cell_count;
self.cell_count = cells.len();
self.first_cell_ptr = current_first_ptr;
let base_opts: crate::wrapping::RtOptions<'_> =
crate::wrapping::RtOptions::new(width.max(1) as usize);
for (cell_index, cell) in cells.iter().enumerate().skip(old_cell_count) {
self.is_user_cell
.push(cell.as_any().is::<UserHistoryCell>());
crate::transcript_render::append_wrapped_transcript_cell(
&mut self.transcript,
&mut self.has_emitted_lines,
cell_index,
cell,
width,
&base_opts,
);
}
WrappedTranscriptUpdate::Appended
}
/// Rebuild the wrapped transcript cache from scratch.
///
/// This is used when width changes, the transcript is truncated, or the caller provides a new
/// cell list that cannot be treated as an append to the previous one.
fn rebuild(&mut self, cells: &[Arc<dyn HistoryCell>], width: u16) {
self.width = width;
self.cell_count = cells.len();
self.first_cell_ptr = cells.first().map(Arc::as_ptr);
self.transcript.lines.clear();
self.transcript.meta.clear();
self.transcript.joiner_before.clear();
self.has_emitted_lines = false;
self.is_user_cell.clear();
self.is_user_cell.reserve(cells.len());
let base_opts: crate::wrapping::RtOptions<'_> =
crate::wrapping::RtOptions::new(width.max(1) as usize);
for (cell_index, cell) in cells.iter().enumerate() {
self.is_user_cell
.push(cell.as_any().is::<UserHistoryCell>());
crate::transcript_render::append_wrapped_transcript_cell(
&mut self.transcript,
&mut self.has_emitted_lines,
cell_index,
cell,
width,
&base_opts,
);
}
}
}
/// Bounded cache of rasterized transcript rows.
///
/// Each cached entry stores the final rendered [`ratatui::buffer::Cell`] values for a single
/// transcript line rendered into a 1-row buffer.
///
/// Keying:
/// - The cache key includes `(line_index, is_user_row)`.
/// - Width is stored out-of-band and any width change clears the cache.
///
/// Eviction:
/// - The cache uses an approximate LRU implemented with a monotonic stamp (`clock`) and an
/// `(key, stamp)` queue.
/// - This avoids per-access list manipulation while still keeping memory bounded.
struct TranscriptRasterCache {
/// Width this cache's rasterized rows were rendered at.
width: u16,
/// Maximum number of rasterized rows to retain.
capacity: usize,
/// Monotonic counter used to stamp accesses for eviction.
clock: u64,
/// Version of the terminal palette used for the cached rows.
palette_version: u64,
/// Access log used for approximate LRU eviction.
lru: VecDeque<(u64, u64)>,
/// Cached rasterized rows by key.
rows: HashMap<u64, RasterizedRow>,
}
/// Cached raster for a single transcript line at a particular width.
#[derive(Clone)]
struct RasterizedRow {
/// The last access stamp recorded for this row.
///
/// Eviction only removes a row when a popped `(key, stamp)` matches this value.
last_used: u64,
/// The full row of rendered cells (length is `width` at the time of rasterization).
cells: Vec<ratatui::buffer::Cell>,
}
impl TranscriptRasterCache {
/// Create an empty raster cache (caching disabled until a non-zero capacity is set).
fn new() -> Self {
Self {
width: 0,
capacity: 0,
clock: 0,
palette_version: crate::terminal_palette::palette_version(),
lru: VecDeque::new(),
rows: HashMap::new(),
}
}
/// Drop all cached rasterized rows and reset access tracking.
///
/// This is used on width changes and when disabling caching so we don't retain stale rows or
/// unbounded memory.
fn clear(&mut self) {
self.lru.clear();
self.rows.clear();
self.clock = 0;
}
/// Set the maximum number of cached rasterized rows.
///
/// When set to 0, caching is disabled and any existing cached rows are dropped.
fn set_capacity(&mut self, capacity: usize) {
self.capacity = capacity;
self.evict_if_needed();
}
/// Render a single wrapped transcript line into `buf`, using a cached raster when possible.
///
/// The cache key includes `is_user_row` because user rows apply a row-wide base style, so the
/// final raster differs even when the text spans are identical.
fn render_row_into(
&mut self,
line_index: usize,
is_user_row: bool,
line: &Line<'static>,
row_area: Rect,
buf: &mut Buffer,
) {
if row_area.width == 0 || row_area.height == 0 {
return;
}
let palette_version = crate::terminal_palette::palette_version();
if palette_version != self.palette_version {
self.palette_version = palette_version;
self.clear();
}
if self.width != row_area.width {
self.width = row_area.width;
self.clear();
}
if self.capacity == 0 {
let cells = rasterize_line(line, row_area.width, is_user_row);
copy_row(row_area, buf, &cells);
return;
}
let key = raster_key(line_index, is_user_row);
let stamp = self.bump_clock();
if let Some(row) = self.rows.get_mut(&key) {
row.last_used = stamp;
self.lru.push_back((key, stamp));
copy_row(row_area, buf, &row.cells);
return;
}
let cells = rasterize_line(line, row_area.width, is_user_row);
copy_row(row_area, buf, &cells);
self.rows.insert(
key,
RasterizedRow {
last_used: stamp,
cells,
},
);
self.lru.push_back((key, stamp));
self.evict_if_needed();
}
/// Return a new access stamp.
///
/// The stamp is used only for equality checks ("is this the latest access for this key?") so a
/// wrapping counter is sufficient; `u64` wraparound is effectively unreachable in practice for
/// a UI cache.
fn bump_clock(&mut self) -> u64 {
let stamp = self.clock;
self.clock = self.clock.wrapping_add(1);
stamp
}
/// Evict old cached rows until `rows.len() <= capacity`.
///
/// The cache uses an approximate LRU: we push `(key, stamp)` on every access, and only evict a
/// row when the popped entry matches the row's current `last_used` stamp.
fn evict_if_needed(&mut self) {
if self.capacity == 0 {
self.clear();
return;
}
while self.rows.len() > self.capacity {
let Some((key, stamp)) = self.lru.pop_front() else {
break;
};
if self
.rows
.get(&key)
.is_some_and(|row| row.last_used == stamp)
{
self.rows.remove(&key);
}
}
}
}
/// Compute the cache key for a rasterized transcript row.
///
/// We key by `line_index` (not by hashing line content) because:
/// - it is effectively free in the draw loop
/// - the wrapped transcript cache defines a stable `(index → Line)` mapping until the next rebuild
/// - rebuilds clear the raster cache, so indices cannot alias across different transcripts
///
/// `is_user_row` is included because user rows apply a row-wide base style that affects every cell.
fn raster_key(line_index: usize, is_user_row: bool) -> u64 {
(line_index as u64) << 1 | u64::from(is_user_row)
}
/// Rasterize a single wrapped transcript [`Line`] into a 1-row cell vector.
///
/// This is the expensive step we want to avoid repeating on every redraw: it runs Ratatui's
/// rendering for the line (including grapheme segmentation) into a scratch buffer and then copies
/// out the rendered cells.
///
/// For user rows, we pre-fill the row with the base user style so the cached raster includes the
/// full-width background, matching the viewport behavior.
fn rasterize_line(
line: &Line<'static>,
width: u16,
is_user_row: bool,
) -> Vec<ratatui::buffer::Cell> {
let scratch_area = Rect::new(0, 0, width, 1);
let mut scratch = Buffer::empty(scratch_area);
if is_user_row {
let base_style = crate::style::user_message_style();
for x in 0..width {
scratch[(x, 0)].set_style(base_style);
}
}
line.render_ref(scratch_area, &mut scratch);
let mut out = Vec::with_capacity(width as usize);
for x in 0..width {
out.push(scratch[(x, 0)].clone());
}
out
}
/// Copy a cached rasterized row into a destination buffer at `area`.
///
/// This is the "fast path" for redraws: once a row is cached, a redraw copies the pre-rendered
/// cells into the frame buffer without re-running span layout/grapheme segmentation.
fn copy_row(area: Rect, buf: &mut Buffer, cells: &[ratatui::buffer::Cell]) {
let y = area.y;
for (dx, cell) in cells.iter().enumerate() {
let x = area.x.saturating_add(dx as u16);
if x >= area.right() {
break;
}
buf[(x, y)] = cell.clone();
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::history_cell::TranscriptLinesWithJoiners;
use crate::history_cell::UserHistoryCell;
use pretty_assertions::assert_eq;
use ratatui::style::Color;
use ratatui::style::Style;
use ratatui::style::Stylize;
use ratatui::text::Span;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
#[derive(Debug)]
struct FakeCell {
lines: Vec<Line<'static>>,
joiner_before: Vec<Option<String>>,
is_stream_continuation: bool,
transcript_calls: Arc<AtomicUsize>,
}
impl FakeCell {
fn new(
lines: Vec<Line<'static>>,
joiner_before: Vec<Option<String>>,
is_stream_continuation: bool,
transcript_calls: Arc<AtomicUsize>,
) -> Self {
Self {
lines,
joiner_before,
is_stream_continuation,
transcript_calls,
}
}
}
impl HistoryCell for FakeCell {
fn display_lines(&self, _width: u16) -> Vec<Line<'static>> {
self.lines.clone()
}
fn transcript_lines_with_joiners(&self, _width: u16) -> TranscriptLinesWithJoiners {
self.transcript_calls.fetch_add(1, Ordering::Relaxed);
TranscriptLinesWithJoiners {
lines: self.lines.clone(),
joiner_before: self.joiner_before.clone(),
}
}
fn is_stream_continuation(&self) -> bool {
self.is_stream_continuation
}
}
#[test]
fn wrapped_cache_matches_build_wrapped_transcript_lines() {
let calls0 = Arc::new(AtomicUsize::new(0));
let calls1 = Arc::new(AtomicUsize::new(0));
let calls2 = Arc::new(AtomicUsize::new(0));
let cells: Vec<Arc<dyn HistoryCell>> = vec![
// Wrapping case: expect a soft-wrap joiner for the continuation segment.
Arc::new(FakeCell::new(
vec![Line::from("• hello world")],
vec![None],
false,
calls0,
)),
// Preformatted (cyan) lines are not wrapped by the viewport wrapper.
Arc::new(FakeCell::new(
vec![Line::from(" let x = 12345;").cyan()],
vec![None],
true,
calls1,
)),
// New non-continuation cell inserts a spacer.
Arc::new(FakeCell::new(
vec![Line::from("• foo bar")],
vec![None],
false,
calls2,
)),
];
let width = 8;
let expected = crate::transcript_render::build_wrapped_transcript_lines(&cells, width);
let mut cache = TranscriptViewCache::new();
cache.ensure_wrapped(&cells, width);
assert_eq!(cache.lines(), expected.lines.as_slice());
assert_eq!(cache.line_meta(), expected.meta.as_slice());
assert_eq!(
cache.wrapped.transcript.joiner_before,
expected.joiner_before
);
assert_eq!(cache.lines().len(), cache.line_meta().len());
assert_eq!(
cache.lines().len(),
cache.wrapped.transcript.joiner_before.len()
);
}
#[test]
fn wrapped_cache_ensure_appends_only_new_cells_when_width_is_unchanged() {
let calls0 = Arc::new(AtomicUsize::new(0));
let calls1 = Arc::new(AtomicUsize::new(0));
let cells: Vec<Arc<dyn HistoryCell>> = vec![
Arc::new(FakeCell::new(
vec![Line::from("• hello world")],
vec![None],
false,
calls0.clone(),
)),
Arc::new(FakeCell::new(
vec![Line::from("• foo bar")],
vec![None],
false,
calls1.clone(),
)),
];
let mut cache = TranscriptViewCache::new();
cache.ensure_wrapped(&cells[..1], 8);
cache.ensure_wrapped(&cells, 8);
assert_eq!(calls0.load(Ordering::Relaxed), 1);
assert_eq!(calls1.load(Ordering::Relaxed), 1);
assert_eq!(
cache.lines(),
&[
Line::from("• hello"),
Line::from("world"),
Line::from(""),
Line::from("• foo"),
Line::from("bar")
]
);
assert_eq!(
cache.line_meta(),
&[
TranscriptLineMeta::CellLine {
cell_index: 0,
line_in_cell: 0
},
TranscriptLineMeta::CellLine {
cell_index: 0,
line_in_cell: 1
},
TranscriptLineMeta::Spacer,
TranscriptLineMeta::CellLine {
cell_index: 1,
line_in_cell: 0
},
TranscriptLineMeta::CellLine {
cell_index: 1,
line_in_cell: 1
},
]
);
assert_eq!(
cache.wrapped.transcript.joiner_before.as_slice(),
&[
None,
Some(" ".to_string()),
None,
None,
Some(" ".to_string()),
]
);
}
#[test]
fn wrapped_cache_ensure_rebuilds_on_width_change() {
let calls0 = Arc::new(AtomicUsize::new(0));
let calls1 = Arc::new(AtomicUsize::new(0));
let cells: Vec<Arc<dyn HistoryCell>> = vec![
Arc::new(FakeCell::new(
vec![Line::from("• hello world")],
vec![None],
false,
calls0.clone(),
)),
Arc::new(FakeCell::new(
vec![Line::from("• foo bar")],
vec![None],
false,
calls1.clone(),
)),
];
let mut cache = TranscriptViewCache::new();
cache.ensure_wrapped(&cells, 8);
cache.ensure_wrapped(&cells, 10);
assert_eq!(calls0.load(Ordering::Relaxed), 2);
assert_eq!(calls1.load(Ordering::Relaxed), 2);
let expected = crate::transcript_render::build_wrapped_transcript_lines(&cells, 10);
assert_eq!(cache.lines(), expected.lines.as_slice());
assert_eq!(cache.line_meta(), expected.meta.as_slice());
assert_eq!(
cache.wrapped.transcript.joiner_before,
expected.joiner_before
);
}
#[test]
fn wrapped_cache_ensure_rebuilds_on_truncation() {
let calls0 = Arc::new(AtomicUsize::new(0));
let calls1 = Arc::new(AtomicUsize::new(0));
let cells: Vec<Arc<dyn HistoryCell>> = vec![
Arc::new(FakeCell::new(
vec![Line::from("• hello world")],
vec![None],
false,
calls0.clone(),
)),
Arc::new(FakeCell::new(
vec![Line::from("• foo bar")],
vec![None],
false,
calls1.clone(),
)),
];
let mut cache = TranscriptViewCache::new();
cache.ensure_wrapped(&cells, 8);
cache.ensure_wrapped(&cells[..1], 8);
// The second ensure is a rebuild of the truncated prefix; only the first cell is rendered.
assert_eq!(calls0.load(Ordering::Relaxed), 2);
assert_eq!(calls1.load(Ordering::Relaxed), 1);
let expected = crate::transcript_render::build_wrapped_transcript_lines(&cells[..1], 8);
assert_eq!(cache.lines(), expected.lines.as_slice());
assert_eq!(cache.line_meta(), expected.meta.as_slice());
}
#[test]
fn wrapped_cache_ensure_with_zero_width_clears_without_calling_cell_render() {
let calls = Arc::new(AtomicUsize::new(0));
let cells: Vec<Arc<dyn HistoryCell>> = vec![Arc::new(FakeCell::new(
vec![Line::from("• hello world")],
vec![None],
false,
calls.clone(),
))];
let mut cache = TranscriptViewCache::new();
cache.ensure_wrapped(&cells, 0);
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | true |
openai/codex | https://github.com/openai/codex/blob/279283fe02bf0ce7f93a160db34dd8cf9c8f42c8/codex-rs/tui2/src/app_event.rs | codex-rs/tui2/src/app_event.rs | use std::path::PathBuf;
use codex_common::approval_presets::ApprovalPreset;
use codex_core::protocol::ConversationPathResponseEvent;
use codex_core::protocol::Event;
use codex_core::protocol::RateLimitSnapshot;
use codex_file_search::FileMatch;
use codex_protocol::openai_models::ModelPreset;
use crate::bottom_pane::ApprovalRequest;
use crate::history_cell::HistoryCell;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::SandboxPolicy;
use codex_protocol::openai_models::ReasoningEffort;
#[allow(clippy::large_enum_variant)]
#[derive(Debug)]
pub(crate) enum AppEvent {
CodexEvent(Event),
/// Start a new session.
NewSession,
/// Open the resume picker inside the running TUI session.
OpenResumePicker,
/// Request to exit the application gracefully.
ExitRequest,
/// Forward an `Op` to the Agent. Using an `AppEvent` for this avoids
/// bubbling channels through layers of widgets.
CodexOp(codex_core::protocol::Op),
/// Kick off an asynchronous file search for the given query (text after
/// the `@`). Previous searches may be cancelled by the app layer so there
/// is at most one in-flight search.
StartFileSearch(String),
/// Result of a completed asynchronous file search. The `query` echoes the
/// original search term so the UI can decide whether the results are
/// still relevant.
FileSearchResult {
query: String,
matches: Vec<FileMatch>,
},
/// Result of refreshing rate limits
RateLimitSnapshotFetched(RateLimitSnapshot),
/// Result of computing a `/diff` command.
DiffResult(String),
InsertHistoryCell(Box<dyn HistoryCell>),
StartCommitAnimation,
StopCommitAnimation,
CommitTick,
/// Update the current reasoning effort in the running app and widget.
UpdateReasoningEffort(Option<ReasoningEffort>),
/// Update the current model slug in the running app and widget.
UpdateModel(String),
/// Persist the selected model and reasoning effort to the appropriate config.
PersistModelSelection {
model: String,
effort: Option<ReasoningEffort>,
},
/// Open the reasoning selection popup after picking a model.
OpenReasoningPopup {
model: ModelPreset,
},
/// Open the full model picker (non-auto models).
OpenAllModelsPopup {
models: Vec<ModelPreset>,
},
/// Open the confirmation prompt before enabling full access mode.
OpenFullAccessConfirmation {
preset: ApprovalPreset,
},
/// Open the Windows world-writable directories warning.
/// If `preset` is `Some`, the confirmation will apply the provided
/// approval/sandbox configuration on Continue; if `None`, it performs no
/// policy change and only acknowledges/dismisses the warning.
#[cfg_attr(not(target_os = "windows"), allow(dead_code))]
OpenWorldWritableWarningConfirmation {
preset: Option<ApprovalPreset>,
/// Up to 3 sample world-writable directories to display in the warning.
sample_paths: Vec<String>,
/// If there are more than `sample_paths`, this carries the remaining count.
extra_count: usize,
/// True when the scan failed (e.g. ACL query error) and protections could not be verified.
failed_scan: bool,
},
/// Prompt to enable the Windows sandbox feature before using Agent mode.
#[cfg_attr(not(target_os = "windows"), allow(dead_code))]
OpenWindowsSandboxEnablePrompt {
preset: ApprovalPreset,
},
/// Enable the Windows sandbox feature and switch to Agent mode.
#[cfg_attr(not(target_os = "windows"), allow(dead_code))]
EnableWindowsSandboxForAgentMode {
preset: ApprovalPreset,
},
/// Update the current approval policy in the running app and widget.
UpdateAskForApprovalPolicy(AskForApproval),
/// Update the current sandbox policy in the running app and widget.
UpdateSandboxPolicy(SandboxPolicy),
/// Update whether the full access warning prompt has been acknowledged.
UpdateFullAccessWarningAcknowledged(bool),
/// Update whether the world-writable directories warning has been acknowledged.
#[cfg_attr(not(target_os = "windows"), allow(dead_code))]
UpdateWorldWritableWarningAcknowledged(bool),
/// Update whether the rate limit switch prompt has been acknowledged for the session.
UpdateRateLimitSwitchPromptHidden(bool),
/// Persist the acknowledgement flag for the full access warning prompt.
PersistFullAccessWarningAcknowledged,
/// Persist the acknowledgement flag for the world-writable directories warning.
#[cfg_attr(not(target_os = "windows"), allow(dead_code))]
PersistWorldWritableWarningAcknowledged,
/// Persist the acknowledgement flag for the rate limit switch prompt.
PersistRateLimitSwitchPromptHidden,
/// Persist the acknowledgement flag for the model migration prompt.
PersistModelMigrationPromptAcknowledged {
from_model: String,
to_model: String,
},
/// Skip the next world-writable scan (one-shot) after a user-confirmed continue.
#[cfg_attr(not(target_os = "windows"), allow(dead_code))]
SkipNextWorldWritableScan,
/// Re-open the approval presets popup.
OpenApprovalsPopup,
/// Forwarded conversation history snapshot from the current conversation.
ConversationHistory(ConversationPathResponseEvent),
/// Open the branch picker option from the review popup.
OpenReviewBranchPicker(PathBuf),
/// Open the commit picker option from the review popup.
OpenReviewCommitPicker(PathBuf),
/// Open the custom prompt option from the review popup.
OpenReviewCustomPrompt,
/// Open the approval popup.
FullScreenApprovalRequest(ApprovalRequest),
/// Open the feedback note entry overlay after the user selects a category.
OpenFeedbackNote {
category: FeedbackCategory,
include_logs: bool,
},
/// Open the upload consent popup for feedback after selecting a category.
OpenFeedbackConsent {
category: FeedbackCategory,
},
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum FeedbackCategory {
BadResult,
GoodResult,
Bug,
Other,
}
| rust | Apache-2.0 | 279283fe02bf0ce7f93a160db34dd8cf9c8f42c8 | 2026-01-04T15:31:59.292600Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.