repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/encryption.rs
crates/atuin-client/src/encryption.rs
// The general idea is that we NEVER send cleartext history to the server // This way the odds of anything private ending up where it should not are // very low // The server authenticates via the usual username and password. This has // nothing to do with the encryption, and is purely authentication! The client // generates its own secret key, and encrypts all shell history with libsodium's // secretbox. The data is then sent to the server, where it is stored. All // clients must share the secret in order to be able to sync, as it is needed // to decrypt use std::{io::prelude::*, path::PathBuf}; use base64::prelude::{BASE64_STANDARD, Engine}; pub use crypto_secretbox::Key; use crypto_secretbox::{ AeadCore, AeadInPlace, KeyInit, XSalsa20Poly1305, aead::{Nonce, OsRng}, }; use eyre::{Context, Result, bail, ensure, eyre}; use fs_err as fs; use rmp::{Marker, decode::Bytes}; use serde::{Deserialize, Serialize}; use time::{OffsetDateTime, format_description::well_known::Rfc3339, macros::format_description}; use crate::{history::History, settings::Settings}; #[derive(Debug, Serialize, Deserialize)] pub struct EncryptedHistory { pub ciphertext: Vec<u8>, pub nonce: Nonce<XSalsa20Poly1305>, } pub fn generate_encoded_key() -> Result<(Key, String)> { let key = XSalsa20Poly1305::generate_key(&mut OsRng); let encoded = encode_key(&key)?; Ok((key, encoded)) } pub fn new_key(settings: &Settings) -> Result<Key> { let path = settings.key_path.as_str(); let path = PathBuf::from(path); if path.exists() { bail!("key already exists! cannot overwrite"); } let (key, encoded) = generate_encoded_key()?; let mut file = fs::File::create(path)?; file.write_all(encoded.as_bytes())?; Ok(key) } // Loads the secret key, will create + save if it doesn't exist pub fn load_key(settings: &Settings) -> Result<Key> { let path = settings.key_path.as_str(); let key = if PathBuf::from(path).exists() { let key = fs_err::read_to_string(path)?; decode_key(key)? } else { new_key(settings)? }; Ok(key) } pub fn encode_key(key: &Key) -> Result<String> { let mut buf = vec![]; rmp::encode::write_array_len(&mut buf, key.len() as u32) .wrap_err("could not encode key to message pack")?; for b in key { rmp::encode::write_uint(&mut buf, *b as u64) .wrap_err("could not encode key to message pack")?; } let buf = BASE64_STANDARD.encode(buf); Ok(buf) } pub fn decode_key(key: String) -> Result<Key> { use rmp::decode; let buf = BASE64_STANDARD .decode(key.trim_end()) .wrap_err("encryption key is not a valid base64 encoding")?; // old code wrote the key as a fixed length array of 32 bytes // new code writes the key with a length prefix match <[u8; 32]>::try_from(&*buf) { Ok(key) => Ok(key.into()), Err(_) => { let mut bytes = rmp::decode::Bytes::new(&buf); match Marker::from_u8(buf[0]) { Marker::Bin8 => { let len = decode::read_bin_len(&mut bytes).map_err(|err| eyre!("{err:?}"))?; ensure!(len == 32, "encryption key is not the correct size"); let key = <[u8; 32]>::try_from(bytes.remaining_slice()) .context("could not decode encryption key")?; Ok(key.into()) } Marker::Array16 => { let len = decode::read_array_len(&mut bytes).map_err(|err| eyre!("{err:?}"))?; ensure!(len == 32, "encryption key is not the correct size"); let mut key = Key::default(); for i in &mut key { *i = rmp::decode::read_int(&mut bytes).map_err(|err| eyre!("{err:?}"))?; } Ok(key) } _ => bail!("could not decode encryption key"), } } } } pub fn encrypt(history: &History, key: &Key) -> Result<EncryptedHistory> { // serialize with msgpack let mut buf = encode(history)?; let nonce = XSalsa20Poly1305::generate_nonce(&mut OsRng); XSalsa20Poly1305::new(key) .encrypt_in_place(&nonce, &[], &mut buf) .map_err(|_| eyre!("could not encrypt"))?; Ok(EncryptedHistory { ciphertext: buf, nonce, }) } pub fn decrypt(mut encrypted_history: EncryptedHistory, key: &Key) -> Result<History> { XSalsa20Poly1305::new(key) .decrypt_in_place( &encrypted_history.nonce, &[], &mut encrypted_history.ciphertext, ) .map_err(|_| eyre!("could not decrypt history"))?; let plaintext = encrypted_history.ciphertext; let history = decode(&plaintext)?; Ok(history) } fn format_rfc3339(ts: OffsetDateTime) -> Result<String> { // horrible hack. chrono AutoSI limits to 0, 3, 6, or 9 decimal places for nanoseconds. // time does not have this functionality. static PARTIAL_RFC3339_0: &[time::format_description::FormatItem<'static>] = format_description!("[year]-[month]-[day]T[hour]:[minute]:[second]Z"); static PARTIAL_RFC3339_3: &[time::format_description::FormatItem<'static>] = format_description!("[year]-[month]-[day]T[hour]:[minute]:[second].[subsecond digits:3]Z"); static PARTIAL_RFC3339_6: &[time::format_description::FormatItem<'static>] = format_description!("[year]-[month]-[day]T[hour]:[minute]:[second].[subsecond digits:6]Z"); static PARTIAL_RFC3339_9: &[time::format_description::FormatItem<'static>] = format_description!("[year]-[month]-[day]T[hour]:[minute]:[second].[subsecond digits:9]Z"); let fmt = match ts.nanosecond() { 0 => PARTIAL_RFC3339_0, ns if ns % 1_000_000 == 0 => PARTIAL_RFC3339_3, ns if ns % 1_000 == 0 => PARTIAL_RFC3339_6, _ => PARTIAL_RFC3339_9, }; Ok(ts.format(fmt)?) } fn encode(h: &History) -> Result<Vec<u8>> { use rmp::encode; let mut output = vec![]; // INFO: ensure this is updated when adding new fields encode::write_array_len(&mut output, 9)?; encode::write_str(&mut output, &h.id.0)?; encode::write_str(&mut output, &(format_rfc3339(h.timestamp)?))?; encode::write_sint(&mut output, h.duration)?; encode::write_sint(&mut output, h.exit)?; encode::write_str(&mut output, &h.command)?; encode::write_str(&mut output, &h.cwd)?; encode::write_str(&mut output, &h.session)?; encode::write_str(&mut output, &h.hostname)?; match h.deleted_at { Some(d) => encode::write_str(&mut output, &format_rfc3339(d)?)?, None => encode::write_nil(&mut output)?, } Ok(output) } fn decode(bytes: &[u8]) -> Result<History> { use rmp::decode::{self, DecodeStringError}; let mut bytes = Bytes::new(bytes); let nfields = decode::read_array_len(&mut bytes).map_err(error_report)?; if nfields < 8 { bail!("malformed decrypted history") } if nfields > 9 { bail!("cannot decrypt history from a newer version of atuin"); } let bytes = bytes.remaining_slice(); let (id, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; let (timestamp, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; let mut bytes = Bytes::new(bytes); let duration = decode::read_int(&mut bytes).map_err(error_report)?; let exit = decode::read_int(&mut bytes).map_err(error_report)?; let bytes = bytes.remaining_slice(); let (command, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; let (cwd, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; let (session, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; let (hostname, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; // if we have more fields, try and get the deleted_at let mut deleted_at = None; let mut bytes = bytes; if nfields > 8 { bytes = match decode::read_str_from_slice(bytes) { Ok((d, b)) => { deleted_at = Some(d); b } // we accept null here Err(DecodeStringError::TypeMismatch(Marker::Null)) => { // consume the null marker let mut c = Bytes::new(bytes); decode::read_nil(&mut c).map_err(error_report)?; c.remaining_slice() } Err(err) => return Err(error_report(err)), }; } if !bytes.is_empty() { bail!("trailing bytes in encoded history. malformed") } Ok(History { id: id.to_owned().into(), timestamp: OffsetDateTime::parse(timestamp, &Rfc3339)?, duration, exit, command: command.to_owned(), cwd: cwd.to_owned(), session: session.to_owned(), hostname: hostname.to_owned(), deleted_at: deleted_at .map(|t| OffsetDateTime::parse(t, &Rfc3339)) .transpose()?, }) } fn error_report<E: std::fmt::Debug>(err: E) -> eyre::Report { eyre!("{err:?}") } #[cfg(test)] mod test { use crypto_secretbox::{KeyInit, XSalsa20Poly1305, aead::OsRng}; use pretty_assertions::assert_eq; use time::{OffsetDateTime, macros::datetime}; use crate::history::History; use super::{decode, decrypt, encode, encrypt}; #[test] fn test_encrypt_decrypt() { let key1 = XSalsa20Poly1305::generate_key(&mut OsRng); let key2 = XSalsa20Poly1305::generate_key(&mut OsRng); let history = History::from_db() .id("1".into()) .timestamp(OffsetDateTime::now_utc()) .command("ls".into()) .cwd("/home/ellie".into()) .exit(0) .duration(1) .session("beep boop".into()) .hostname("booop".into()) .deleted_at(None) .build() .into(); let e1 = encrypt(&history, &key1).unwrap(); let e2 = encrypt(&history, &key2).unwrap(); assert_ne!(e1.ciphertext, e2.ciphertext); assert_ne!(e1.nonce, e2.nonce); // test decryption works // this should pass match decrypt(e1, &key1) { Err(e) => panic!("failed to decrypt, got {e}"), Ok(h) => assert_eq!(h, history), }; // this should err let _ = decrypt(e2, &key1).expect_err("expected an error decrypting with invalid key"); } #[test] fn test_decode() { let bytes = [ 0x99, 0xD9, 32, 54, 54, 100, 49, 54, 99, 98, 101, 101, 55, 99, 100, 52, 55, 53, 51, 56, 101, 53, 99, 53, 98, 56, 98, 52, 52, 101, 57, 48, 48, 54, 101, 187, 50, 48, 50, 51, 45, 48, 53, 45, 50, 56, 84, 49, 56, 58, 51, 53, 58, 52, 48, 46, 54, 51, 51, 56, 55, 50, 90, 206, 2, 238, 210, 240, 0, 170, 103, 105, 116, 32, 115, 116, 97, 116, 117, 115, 217, 42, 47, 85, 115, 101, 114, 115, 47, 99, 111, 110, 114, 97, 100, 46, 108, 117, 100, 103, 97, 116, 101, 47, 68, 111, 99, 117, 109, 101, 110, 116, 115, 47, 99, 111, 100, 101, 47, 97, 116, 117, 105, 110, 217, 32, 98, 57, 55, 100, 57, 97, 51, 48, 54, 102, 50, 55, 52, 52, 55, 51, 97, 50, 48, 51, 100, 50, 101, 98, 97, 52, 49, 102, 57, 52, 53, 55, 187, 102, 118, 102, 103, 57, 51, 54, 99, 48, 107, 112, 102, 58, 99, 111, 110, 114, 97, 100, 46, 108, 117, 100, 103, 97, 116, 101, 192, ]; let history = History { id: "66d16cbee7cd47538e5c5b8b44e9006e".to_owned().into(), timestamp: datetime!(2023-05-28 18:35:40.633872 +00:00), duration: 49206000, exit: 0, command: "git status".to_owned(), cwd: "/Users/conrad.ludgate/Documents/code/atuin".to_owned(), session: "b97d9a306f274473a203d2eba41f9457".to_owned(), hostname: "fvfg936c0kpf:conrad.ludgate".to_owned(), deleted_at: None, }; let h = decode(&bytes).unwrap(); assert_eq!(history, h); let b = encode(&h).unwrap(); assert_eq!(&bytes, &*b); } #[test] fn test_decode_deleted() { let history = History { id: "66d16cbee7cd47538e5c5b8b44e9006e".to_owned().into(), timestamp: datetime!(2023-05-28 18:35:40.633872 +00:00), duration: 49206000, exit: 0, command: "git status".to_owned(), cwd: "/Users/conrad.ludgate/Documents/code/atuin".to_owned(), session: "b97d9a306f274473a203d2eba41f9457".to_owned(), hostname: "fvfg936c0kpf:conrad.ludgate".to_owned(), deleted_at: Some(datetime!(2023-05-28 18:35:40.633872 +00:00)), }; let b = encode(&history).unwrap(); let h = decode(&b).unwrap(); assert_eq!(history, h); } #[test] fn test_decode_old() { let bytes = [ 0x98, 0xD9, 32, 54, 54, 100, 49, 54, 99, 98, 101, 101, 55, 99, 100, 52, 55, 53, 51, 56, 101, 53, 99, 53, 98, 56, 98, 52, 52, 101, 57, 48, 48, 54, 101, 187, 50, 48, 50, 51, 45, 48, 53, 45, 50, 56, 84, 49, 56, 58, 51, 53, 58, 52, 48, 46, 54, 51, 51, 56, 55, 50, 90, 206, 2, 238, 210, 240, 0, 170, 103, 105, 116, 32, 115, 116, 97, 116, 117, 115, 217, 42, 47, 85, 115, 101, 114, 115, 47, 99, 111, 110, 114, 97, 100, 46, 108, 117, 100, 103, 97, 116, 101, 47, 68, 111, 99, 117, 109, 101, 110, 116, 115, 47, 99, 111, 100, 101, 47, 97, 116, 117, 105, 110, 217, 32, 98, 57, 55, 100, 57, 97, 51, 48, 54, 102, 50, 55, 52, 52, 55, 51, 97, 50, 48, 51, 100, 50, 101, 98, 97, 52, 49, 102, 57, 52, 53, 55, 187, 102, 118, 102, 103, 57, 51, 54, 99, 48, 107, 112, 102, 58, 99, 111, 110, 114, 97, 100, 46, 108, 117, 100, 103, 97, 116, 101, ]; let history = History { id: "66d16cbee7cd47538e5c5b8b44e9006e".to_owned().into(), timestamp: datetime!(2023-05-28 18:35:40.633872 +00:00), duration: 49206000, exit: 0, command: "git status".to_owned(), cwd: "/Users/conrad.ludgate/Documents/code/atuin".to_owned(), session: "b97d9a306f274473a203d2eba41f9457".to_owned(), hostname: "fvfg936c0kpf:conrad.ludgate".to_owned(), deleted_at: None, }; let h = decode(&bytes).unwrap(); assert_eq!(history, h); } #[test] fn key_encodings() { use super::{Key, decode_key, encode_key}; // a history of our key encodings. // v11.0.0 xCAbWypb0msJ2Kq+8j4GVEWUlDX7deKnrTRSIopuqXxc5Q== // v12.0.0 xCAbWypb0msJ2Kq+8j4GVEWUlDX7deKnrTRSIopuqXxc5Q== // v13.0.0 xCAbWypb0msJ2Kq+8j4GVEWUlDX7deKnrTRSIopuqXxc5Q== // v13.0.1 xCAbWypb0msJ2Kq+8j4GVEWUlDX7deKnrTRSIopuqXxc5Q== // v14.0.0 xCAbWypb0msJ2Kq+8j4GVEWUlDX7deKnrTRSIopuqXxc5Q== // v14.0.1 xCAbWypb0msJ2Kq+8j4GVEWUlDX7deKnrTRSIopuqXxc5Q== // c7d89c1 3AAgG1sqW8zSawnM2MyqzL7M8j4GVEXMlMyUNcz7dczizKfMrTRSIsyKbsypfFzM5Q== (https://github.com/ellie/atuin/pull/805) // b53ca35 3AAgG1sqW8zSawnM2MyqzL7M8j4GVEXMlMyUNcz7dczizKfMrTRSIsyKbsypfFzM5Q== (https://github.com/ellie/atuin/pull/974) // v15.0.0 3AAgG1sqW8zSawnM2MyqzL7M8j4GVEXMlMyUNcz7dczizKfMrTRSIsyKbsypfFzM5Q== // b8b57c8 xCAbWypb0msJ2Kq+8j4GVEWUlDX7deKnrTRSIopuqXxc5Q== (https://github.com/ellie/atuin/pull/1057) // 8c94d79 3AAgG1sqW8zSawnM2MyqzL7M8j4GVEXMlMyUNcz7dczizKfMrTRSIsyKbsypfFzM5Q== (https://github.com/ellie/atuin/pull/1089) let key = Key::from([ 27, 91, 42, 91, 210, 107, 9, 216, 170, 190, 242, 62, 6, 84, 69, 148, 148, 53, 251, 117, 226, 167, 173, 52, 82, 34, 138, 110, 169, 124, 92, 229, ]); assert_eq!( encode_key(&key).unwrap(), "3AAgG1sqW8zSawnM2MyqzL7M8j4GVEXMlMyUNcz7dczizKfMrTRSIsyKbsypfFzM5Q==" ); // key encodings we have to support let valid_encodings = [ "xCAbWypb0msJ2Kq+8j4GVEWUlDX7deKnrTRSIopuqXxc5Q==", "3AAgG1sqW8zSawnM2MyqzL7M8j4GVEXMlMyUNcz7dczizKfMrTRSIsyKbsypfFzM5Q==", ]; for k in valid_encodings { assert_eq!(decode_key(k.to_owned()).expect(k), key); } } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/database.rs
crates/atuin-client/src/database.rs
use std::{ borrow::Cow, env, path::{Path, PathBuf}, str::FromStr, time::Duration, }; use async_trait::async_trait; use atuin_common::utils; use fs_err as fs; use itertools::Itertools; use rand::{Rng, distributions::Alphanumeric}; use sql_builder::{SqlBuilder, SqlName, bind::Bind, esc, quote}; use sqlx::{ Result, Row, sqlite::{ SqliteConnectOptions, SqliteJournalMode, SqlitePool, SqlitePoolOptions, SqliteRow, SqliteSynchronous, }, }; use time::OffsetDateTime; use uuid::Uuid; use crate::{ history::{HistoryId, HistoryStats}, utils::get_host_user, }; use super::{ history::History, ordering, settings::{FilterMode, SearchMode, Settings}, }; pub struct Context { pub session: String, pub cwd: String, pub hostname: String, pub host_id: String, pub git_root: Option<PathBuf>, } #[derive(Default, Clone)] pub struct OptFilters { pub exit: Option<i64>, pub exclude_exit: Option<i64>, pub cwd: Option<String>, pub exclude_cwd: Option<String>, pub before: Option<String>, pub after: Option<String>, pub limit: Option<i64>, pub offset: Option<i64>, pub reverse: bool, pub include_duplicates: bool, } pub fn current_context() -> Context { let Ok(session) = env::var("ATUIN_SESSION") else { eprintln!( "ERROR: Failed to find $ATUIN_SESSION in the environment. Check that you have correctly set up your shell." ); std::process::exit(1); }; let hostname = get_host_user(); let cwd = utils::get_current_dir(); let host_id = Settings::host_id().expect("failed to load host ID"); let git_root = utils::in_git_repo(cwd.as_str()); Context { session, hostname, cwd, git_root, host_id: host_id.0.as_simple().to_string(), } } fn get_session_start_time(session_id: &str) -> Option<i64> { if let Ok(uuid) = Uuid::parse_str(session_id) && let Some(timestamp) = uuid.get_timestamp() { let (seconds, nanos) = timestamp.to_unix(); return Some(seconds as i64 * 1_000_000_000 + nanos as i64); } None } #[async_trait] pub trait Database: Send + Sync + 'static { async fn save(&self, h: &History) -> Result<()>; async fn save_bulk(&self, h: &[History]) -> Result<()>; async fn load(&self, id: &str) -> Result<Option<History>>; async fn list( &self, filters: &[FilterMode], context: &Context, max: Option<usize>, unique: bool, include_deleted: bool, ) -> Result<Vec<History>>; async fn range(&self, from: OffsetDateTime, to: OffsetDateTime) -> Result<Vec<History>>; async fn update(&self, h: &History) -> Result<()>; async fn history_count(&self, include_deleted: bool) -> Result<i64>; async fn last(&self) -> Result<Option<History>>; async fn before(&self, timestamp: OffsetDateTime, count: i64) -> Result<Vec<History>>; async fn delete(&self, h: History) -> Result<()>; async fn delete_rows(&self, ids: &[HistoryId]) -> Result<()>; async fn deleted(&self) -> Result<Vec<History>>; // Yes I know, it's a lot. // Could maybe break it down to a searchparams struct or smth but that feels a little... pointless. // Been debating maybe a DSL for search? eg "before:time limit:1 the query" #[allow(clippy::too_many_arguments)] async fn search( &self, search_mode: SearchMode, filter: FilterMode, context: &Context, query: &str, filter_options: OptFilters, ) -> Result<Vec<History>>; async fn query_history(&self, query: &str) -> Result<Vec<History>>; async fn all_with_count(&self) -> Result<Vec<(History, i32)>>; async fn stats(&self, h: &History) -> Result<HistoryStats>; async fn get_dups(&self, before: i64, dupkeep: u32) -> Result<Vec<History>>; } // Intended for use on a developer machine and not a sync server. // TODO: implement IntoIterator #[derive(Debug, Clone)] pub struct Sqlite { pub pool: SqlitePool, } impl Sqlite { pub async fn new(path: impl AsRef<Path>, timeout: f64) -> Result<Self> { let path = path.as_ref(); debug!("opening sqlite database at {path:?}"); if utils::broken_symlink(path) { eprintln!( "Atuin: Sqlite db path ({path:?}) is a broken symlink. Unable to read or create replacement." ); std::process::exit(1); } if !path.exists() && let Some(dir) = path.parent() { fs::create_dir_all(dir)?; } let opts = SqliteConnectOptions::from_str(path.as_os_str().to_str().unwrap())? .journal_mode(SqliteJournalMode::Wal) .optimize_on_close(true, None) .synchronous(SqliteSynchronous::Normal) .with_regexp() .create_if_missing(true); let pool = SqlitePoolOptions::new() .acquire_timeout(Duration::from_secs_f64(timeout)) .connect_with(opts) .await?; Self::setup_db(&pool).await?; Ok(Self { pool }) } pub async fn sqlite_version(&self) -> Result<String> { sqlx::query_scalar("SELECT sqlite_version()") .fetch_one(&self.pool) .await } async fn setup_db(pool: &SqlitePool) -> Result<()> { debug!("running sqlite database setup"); sqlx::migrate!("./migrations").run(pool).await?; Ok(()) } async fn save_raw(tx: &mut sqlx::Transaction<'_, sqlx::Sqlite>, h: &History) -> Result<()> { sqlx::query( "insert or ignore into history(id, timestamp, duration, exit, command, cwd, session, hostname, deleted_at) values(?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)", ) .bind(h.id.0.as_str()) .bind(h.timestamp.unix_timestamp_nanos() as i64) .bind(h.duration) .bind(h.exit) .bind(h.command.as_str()) .bind(h.cwd.as_str()) .bind(h.session.as_str()) .bind(h.hostname.as_str()) .bind(h.deleted_at.map(|t|t.unix_timestamp_nanos() as i64)) .execute(&mut **tx) .await?; Ok(()) } async fn delete_row_raw( tx: &mut sqlx::Transaction<'_, sqlx::Sqlite>, id: HistoryId, ) -> Result<()> { sqlx::query("delete from history where id = ?1") .bind(id.0.as_str()) .execute(&mut **tx) .await?; Ok(()) } fn query_history(row: SqliteRow) -> History { let deleted_at: Option<i64> = row.get("deleted_at"); History::from_db() .id(row.get("id")) .timestamp( OffsetDateTime::from_unix_timestamp_nanos(row.get::<i64, _>("timestamp") as i128) .unwrap(), ) .duration(row.get("duration")) .exit(row.get("exit")) .command(row.get("command")) .cwd(row.get("cwd")) .session(row.get("session")) .hostname(row.get("hostname")) .deleted_at( deleted_at.and_then(|t| OffsetDateTime::from_unix_timestamp_nanos(t as i128).ok()), ) .build() .into() } } #[async_trait] impl Database for Sqlite { async fn save(&self, h: &History) -> Result<()> { debug!("saving history to sqlite"); let mut tx = self.pool.begin().await?; Self::save_raw(&mut tx, h).await?; tx.commit().await?; Ok(()) } async fn save_bulk(&self, h: &[History]) -> Result<()> { debug!("saving history to sqlite"); let mut tx = self.pool.begin().await?; for i in h { Self::save_raw(&mut tx, i).await?; } tx.commit().await?; Ok(()) } async fn load(&self, id: &str) -> Result<Option<History>> { debug!("loading history item {}", id); let res = sqlx::query("select * from history where id = ?1") .bind(id) .map(Self::query_history) .fetch_optional(&self.pool) .await?; Ok(res) } async fn update(&self, h: &History) -> Result<()> { debug!("updating sqlite history"); sqlx::query( "update history set timestamp = ?2, duration = ?3, exit = ?4, command = ?5, cwd = ?6, session = ?7, hostname = ?8, deleted_at = ?9 where id = ?1", ) .bind(h.id.0.as_str()) .bind(h.timestamp.unix_timestamp_nanos() as i64) .bind(h.duration) .bind(h.exit) .bind(h.command.as_str()) .bind(h.cwd.as_str()) .bind(h.session.as_str()) .bind(h.hostname.as_str()) .bind(h.deleted_at.map(|t|t.unix_timestamp_nanos() as i64)) .execute(&self.pool) .await?; Ok(()) } // make a unique list, that only shows the *newest* version of things async fn list( &self, filters: &[FilterMode], context: &Context, max: Option<usize>, unique: bool, include_deleted: bool, ) -> Result<Vec<History>> { debug!("listing history"); let mut query = SqlBuilder::select_from(SqlName::new("history").alias("h").baquoted()); query.field("*").order_desc("timestamp"); if !include_deleted { query.and_where_is_null("deleted_at"); } let git_root = if let Some(git_root) = context.git_root.clone() { git_root.to_str().unwrap_or("/").to_string() } else { context.cwd.clone() }; let session_start = get_session_start_time(&context.session); for filter in filters { match filter { FilterMode::Global => &mut query, FilterMode::Host => query.and_where_eq("hostname", quote(&context.hostname)), FilterMode::Session => query.and_where_eq("session", quote(&context.session)), FilterMode::SessionPreload => { query.and_where_eq("session", quote(&context.session)); if let Some(session_start) = session_start { query.or_where_lt("timestamp", session_start); } &mut query } FilterMode::Directory => query.and_where_eq("cwd", quote(&context.cwd)), FilterMode::Workspace => query.and_where_like_left("cwd", &git_root), }; } if unique { query.group_by("command").having("max(timestamp)"); } if let Some(max) = max { query.limit(max); } let query = query.sql().expect("bug in list query. please report"); let res = sqlx::query(&query) .map(Self::query_history) .fetch_all(&self.pool) .await?; Ok(res) } async fn range(&self, from: OffsetDateTime, to: OffsetDateTime) -> Result<Vec<History>> { debug!("listing history from {:?} to {:?}", from, to); let res = sqlx::query( "select * from history where timestamp >= ?1 and timestamp <= ?2 order by timestamp asc", ) .bind(from.unix_timestamp_nanos() as i64) .bind(to.unix_timestamp_nanos() as i64) .map(Self::query_history) .fetch_all(&self.pool) .await?; Ok(res) } async fn last(&self) -> Result<Option<History>> { let res = sqlx::query( "select * from history where duration >= 0 order by timestamp desc limit 1", ) .map(Self::query_history) .fetch_optional(&self.pool) .await?; Ok(res) } async fn before(&self, timestamp: OffsetDateTime, count: i64) -> Result<Vec<History>> { let res = sqlx::query( "select * from history where timestamp < ?1 order by timestamp desc limit ?2", ) .bind(timestamp.unix_timestamp_nanos() as i64) .bind(count) .map(Self::query_history) .fetch_all(&self.pool) .await?; Ok(res) } async fn deleted(&self) -> Result<Vec<History>> { let res = sqlx::query("select * from history where deleted_at is not null") .map(Self::query_history) .fetch_all(&self.pool) .await?; Ok(res) } async fn history_count(&self, include_deleted: bool) -> Result<i64> { let query = if include_deleted { "select count(1) from history" } else { "select count(1) from history where deleted_at is null" }; let res: (i64,) = sqlx::query_as(query).fetch_one(&self.pool).await?; Ok(res.0) } async fn search( &self, search_mode: SearchMode, filter: FilterMode, context: &Context, query: &str, filter_options: OptFilters, ) -> Result<Vec<History>> { let mut sql = SqlBuilder::select_from("history"); if !filter_options.include_duplicates { sql.group_by("command").having("max(timestamp)"); } if let Some(limit) = filter_options.limit { sql.limit(limit); } if let Some(offset) = filter_options.offset { sql.offset(offset); } if filter_options.reverse { sql.order_asc("timestamp"); } else { sql.order_desc("timestamp"); } let git_root = if let Some(git_root) = context.git_root.clone() { git_root.to_str().unwrap_or("/").to_string() } else { context.cwd.clone() }; let session_start = get_session_start_time(&context.session); match filter { FilterMode::Global => &mut sql, FilterMode::Host => { sql.and_where_eq("lower(hostname)", quote(context.hostname.to_lowercase())) } FilterMode::Session => sql.and_where_eq("session", quote(&context.session)), FilterMode::SessionPreload => { sql.and_where_eq("session", quote(&context.session)); if let Some(session_start) = session_start { sql.or_where_lt("timestamp", session_start); } &mut sql } FilterMode::Directory => sql.and_where_eq("cwd", quote(&context.cwd)), FilterMode::Workspace => sql.and_where_like_left("cwd", git_root), }; let orig_query = query; let mut regexes = Vec::new(); match search_mode { SearchMode::Prefix => sql.and_where_like_left("command", query.replace('*', "%")), _ => { let mut is_or = false; let mut regex = None; for part in query.split_inclusive(' ') { let query_part: Cow<str> = match (&mut regex, part.starts_with("r/")) { (None, false) => { if part.trim_end().is_empty() { continue; } Cow::Owned(part.trim_end().replace('*', "%")) // allow wildcard char } (None, true) => { if part[2..].trim_end().ends_with('/') { let end_pos = part.trim_end().len() - 1; regexes.push(String::from(&part[2..end_pos])); } else { regex = Some(String::from(&part[2..])); } continue; } (Some(r), _) => { if part.trim_end().ends_with('/') { let end_pos = part.trim_end().len() - 1; r.push_str(&part.trim_end()[..end_pos]); regexes.push(regex.take().unwrap()); } else { r.push_str(part); } continue; } }; // TODO smart case mode could be made configurable like in fzf let (is_glob, glob) = if query_part.contains(char::is_uppercase) { (true, "*") } else { (false, "%") }; let (is_inverse, query_part) = match query_part.strip_prefix('!') { Some(stripped) => (true, Cow::Borrowed(stripped)), None => (false, query_part), }; #[allow(clippy::if_same_then_else)] let param = if query_part == "|" { if !is_or { is_or = true; continue; } else { format!("{glob}|{glob}") } } else if let Some(term) = query_part.strip_prefix('^') { format!("{term}{glob}") } else if let Some(term) = query_part.strip_suffix('$') { format!("{glob}{term}") } else if let Some(term) = query_part.strip_prefix('\'') { format!("{glob}{term}{glob}") } else if is_inverse { format!("{glob}{query_part}{glob}") } else if search_mode == SearchMode::FullText { format!("{glob}{query_part}{glob}") } else { query_part.split("").join(glob) }; sql.fuzzy_condition("command", param, is_inverse, is_glob, is_or); is_or = false; } if let Some(r) = regex { regexes.push(r); } &mut sql } }; for regex in regexes { sql.and_where("command regexp ?".bind(&regex)); } filter_options .exit .map(|exit| sql.and_where_eq("exit", exit)); filter_options .exclude_exit .map(|exclude_exit| sql.and_where_ne("exit", exclude_exit)); filter_options .cwd .map(|cwd| sql.and_where_eq("cwd", quote(cwd))); filter_options .exclude_cwd .map(|exclude_cwd| sql.and_where_ne("cwd", quote(exclude_cwd))); filter_options.before.map(|before| { interim::parse_date_string( before.as_str(), OffsetDateTime::now_utc(), interim::Dialect::Uk, ) .map(|before| { sql.and_where_lt("timestamp", quote(before.unix_timestamp_nanos() as i64)) }) }); filter_options.after.map(|after| { interim::parse_date_string( after.as_str(), OffsetDateTime::now_utc(), interim::Dialect::Uk, ) .map(|after| sql.and_where_gt("timestamp", quote(after.unix_timestamp_nanos() as i64))) }); sql.and_where_is_null("deleted_at"); let query = sql.sql().expect("bug in search query. please report"); let res = sqlx::query(&query) .map(Self::query_history) .fetch_all(&self.pool) .await?; Ok(ordering::reorder_fuzzy(search_mode, orig_query, res)) } async fn query_history(&self, query: &str) -> Result<Vec<History>> { let res = sqlx::query(query) .map(Self::query_history) .fetch_all(&self.pool) .await?; Ok(res) } async fn all_with_count(&self) -> Result<Vec<(History, i32)>> { debug!("listing history"); let mut query = SqlBuilder::select_from(SqlName::new("history").alias("h").baquoted()); query .fields(&[ "id", "max(timestamp) as timestamp", "max(duration) as duration", "exit", "command", "deleted_at", "group_concat(cwd, ':') as cwd", "group_concat(session) as session", "group_concat(hostname, ',') as hostname", "count(*) as count", ]) .group_by("command") .group_by("exit") .and_where("deleted_at is null") .order_desc("timestamp"); let query = query.sql().expect("bug in list query. please report"); let res = sqlx::query(&query) .map(|row: SqliteRow| { let count: i32 = row.get("count"); (Self::query_history(row), count) }) .fetch_all(&self.pool) .await?; Ok(res) } // deleted_at doesn't mean the actual time that the user deleted it, // but the time that the system marks it as deleted async fn delete(&self, mut h: History) -> Result<()> { let now = OffsetDateTime::now_utc(); h.command = rand::thread_rng() .sample_iter(&Alphanumeric) .take(32) .map(char::from) .collect(); // overwrite with random string h.deleted_at = Some(now); // delete it self.update(&h).await?; // save it Ok(()) } async fn delete_rows(&self, ids: &[HistoryId]) -> Result<()> { let mut tx = self.pool.begin().await?; for id in ids { Self::delete_row_raw(&mut tx, id.clone()).await?; } tx.commit().await?; Ok(()) } async fn stats(&self, h: &History) -> Result<HistoryStats> { // We select the previous in the session by time let mut prev = SqlBuilder::select_from("history"); prev.field("*") .and_where("timestamp < ?1") .and_where("session = ?2") .order_by("timestamp", true) .limit(1); let mut next = SqlBuilder::select_from("history"); next.field("*") .and_where("timestamp > ?1") .and_where("session = ?2") .order_by("timestamp", false) .limit(1); let mut total = SqlBuilder::select_from("history"); total.field("count(1)").and_where("command = ?1"); let mut average = SqlBuilder::select_from("history"); average.field("avg(duration)").and_where("command = ?1"); let mut exits = SqlBuilder::select_from("history"); exits .fields(&["exit", "count(1) as count"]) .and_where("command = ?1") .group_by("exit"); // rewrite the following with sqlbuilder let mut day_of_week = SqlBuilder::select_from("history"); day_of_week .fields(&[ "strftime('%w', ROUND(timestamp / 1000000000), 'unixepoch') AS day_of_week", "count(1) as count", ]) .and_where("command = ?1") .group_by("day_of_week"); // Intentionally format the string with 01 hardcoded. We want the average runtime for the // _entire month_, but will later parse it as a datetime for sorting // Sqlite has no datetime so we cannot do it there, and otherwise sorting will just be a // string sort, which won't be correct. let mut duration_over_time = SqlBuilder::select_from("history"); duration_over_time .fields(&[ "strftime('01-%m-%Y', ROUND(timestamp / 1000000000), 'unixepoch') AS month_year", "avg(duration) as duration", ]) .and_where("command = ?1") .group_by("month_year") .having("duration > 0"); let prev = prev.sql().expect("issue in stats previous query"); let next = next.sql().expect("issue in stats next query"); let total = total.sql().expect("issue in stats average query"); let average = average.sql().expect("issue in stats previous query"); let exits = exits.sql().expect("issue in stats exits query"); let day_of_week = day_of_week.sql().expect("issue in stats day of week query"); let duration_over_time = duration_over_time .sql() .expect("issue in stats duration over time query"); let prev = sqlx::query(&prev) .bind(h.timestamp.unix_timestamp_nanos() as i64) .bind(&h.session) .map(Self::query_history) .fetch_optional(&self.pool) .await?; let next = sqlx::query(&next) .bind(h.timestamp.unix_timestamp_nanos() as i64) .bind(&h.session) .map(Self::query_history) .fetch_optional(&self.pool) .await?; let total: (i64,) = sqlx::query_as(&total) .bind(&h.command) .fetch_one(&self.pool) .await?; let average: (f64,) = sqlx::query_as(&average) .bind(&h.command) .fetch_one(&self.pool) .await?; let exits: Vec<(i64, i64)> = sqlx::query_as(&exits) .bind(&h.command) .fetch_all(&self.pool) .await?; let day_of_week: Vec<(String, i64)> = sqlx::query_as(&day_of_week) .bind(&h.command) .fetch_all(&self.pool) .await?; let duration_over_time: Vec<(String, f64)> = sqlx::query_as(&duration_over_time) .bind(&h.command) .fetch_all(&self.pool) .await?; let duration_over_time = duration_over_time .iter() .map(|f| (f.0.clone(), f.1.round() as i64)) .collect(); Ok(HistoryStats { next, previous: prev, total: total.0 as u64, average_duration: average.0 as u64, exits, day_of_week, duration_over_time, }) } async fn get_dups(&self, before: i64, dupkeep: u32) -> Result<Vec<History>> { let res = sqlx::query( "SELECT * FROM ( SELECT *, ROW_NUMBER() OVER (PARTITION BY command, cwd, hostname ORDER BY timestamp DESC) AS rn FROM history ) sub WHERE rn > ?1 and timestamp < ?2; ", ) .bind(dupkeep) .bind(before) .map(Self::query_history) .fetch_all(&self.pool) .await?; Ok(res) } } trait SqlBuilderExt { fn fuzzy_condition<S: ToString, T: ToString>( &mut self, field: S, mask: T, inverse: bool, glob: bool, is_or: bool, ) -> &mut Self; } impl SqlBuilderExt for SqlBuilder { /// adapted from the sql-builder *like functions fn fuzzy_condition<S: ToString, T: ToString>( &mut self, field: S, mask: T, inverse: bool, glob: bool, is_or: bool, ) -> &mut Self { let mut cond = field.to_string(); if inverse { cond.push_str(" NOT"); } if glob { cond.push_str(" GLOB '"); } else { cond.push_str(" LIKE '"); } cond.push_str(&esc(mask.to_string())); cond.push('\''); if is_or { self.or_where(cond) } else { self.and_where(cond) } } } #[cfg(test)] mod test { use crate::settings::test_local_timeout; use super::*; use std::time::{Duration, Instant}; async fn assert_search_eq( db: &impl Database, mode: SearchMode, filter_mode: FilterMode, query: &str, expected: usize, ) -> Result<Vec<History>> { let context = Context { hostname: "test:host".to_string(), session: "beepboopiamasession".to_string(), cwd: "/home/ellie".to_string(), host_id: "test-host".to_string(), git_root: None, }; let results = db .search( mode, filter_mode, &context, query, OptFilters { ..Default::default() }, ) .await?; assert_eq!( results.len(), expected, "query \"{}\", commands: {:?}", query, results.iter().map(|a| &a.command).collect::<Vec<&String>>() ); Ok(results) } async fn assert_search_commands( db: &impl Database, mode: SearchMode, filter_mode: FilterMode, query: &str, expected_commands: Vec<&str>, ) { let results = assert_search_eq(db, mode, filter_mode, query, expected_commands.len()) .await .unwrap(); let commands: Vec<&str> = results.iter().map(|a| a.command.as_str()).collect(); assert_eq!(commands, expected_commands); } async fn new_history_item(db: &mut impl Database, cmd: &str) -> Result<()> { let mut captured: History = History::capture() .timestamp(OffsetDateTime::now_utc()) .command(cmd) .cwd("/home/ellie") .build() .into(); captured.exit = 0; captured.duration = 1; captured.session = "beep boop".to_string(); captured.hostname = "booop".to_string(); db.save(&captured).await } #[tokio::test(flavor = "multi_thread")] async fn test_search_prefix() { let mut db = Sqlite::new("sqlite::memory:", test_local_timeout()) .await .unwrap(); new_history_item(&mut db, "ls /home/ellie").await.unwrap(); assert_search_eq(&db, SearchMode::Prefix, FilterMode::Global, "ls", 1) .await .unwrap(); assert_search_eq(&db, SearchMode::Prefix, FilterMode::Global, "/home", 0) .await .unwrap(); assert_search_eq(&db, SearchMode::Prefix, FilterMode::Global, "ls ", 0) .await .unwrap(); } #[tokio::test(flavor = "multi_thread")] async fn test_search_fulltext() { let mut db = Sqlite::new("sqlite::memory:", test_local_timeout()) .await .unwrap(); new_history_item(&mut db, "ls /home/ellie").await.unwrap(); assert_search_eq(&db, SearchMode::FullText, FilterMode::Global, "ls", 1) .await .unwrap(); assert_search_eq(&db, SearchMode::FullText, FilterMode::Global, "/home", 1) .await .unwrap(); assert_search_eq(&db, SearchMode::FullText, FilterMode::Global, "ls ho", 1) .await .unwrap(); assert_search_eq(&db, SearchMode::FullText, FilterMode::Global, "hm", 0) .await .unwrap(); // regex assert_search_eq(&db, SearchMode::FullText, FilterMode::Global, "r/^ls ", 1) .await .unwrap(); assert_search_eq( &db, SearchMode::FullText, FilterMode::Global, "r/ls / ie$", 1, ) .await .unwrap(); assert_search_eq( &db, SearchMode::FullText, FilterMode::Global, "r/ls / !ie", 0, ) .await .unwrap(); assert_search_eq( &db, SearchMode::FullText, FilterMode::Global, "meow r/ls/", 0, ) .await .unwrap(); assert_search_eq(&db, SearchMode::FullText, FilterMode::Global, "r//hom/", 1) .await .unwrap(); assert_search_eq( &db, SearchMode::FullText, FilterMode::Global, "r//home//", 1, ) .await .unwrap(); assert_search_eq( &db, SearchMode::FullText, FilterMode::Global, "r//home///", 0, ) .await .unwrap(); assert_search_eq(&db, SearchMode::FullText, FilterMode::Global, "/home.*e", 0) .await .unwrap(); assert_search_eq( &db, SearchMode::FullText, FilterMode::Global, "r/home.*e", 1, ) .await .unwrap(); } #[tokio::test(flavor = "multi_thread")]
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
true
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/history.rs
crates/atuin-client/src/history.rs
use core::fmt::Formatter; use rmp::decode::ValueReadError; use rmp::{Marker, decode::Bytes}; use std::env; use std::fmt::Display; use atuin_common::record::DecryptedData; use atuin_common::utils::uuid_v7; use eyre::{Result, bail, eyre}; use crate::secrets::SECRET_PATTERNS_RE; use crate::settings::Settings; use crate::utils::get_host_user; use time::OffsetDateTime; mod builder; pub mod store; const HISTORY_VERSION: &str = "v0"; pub const HISTORY_TAG: &str = "history"; #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct HistoryId(pub String); impl Display for HistoryId { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0) } } impl From<String> for HistoryId { fn from(s: String) -> Self { Self(s) } } /// Client-side history entry. /// /// Client stores data unencrypted, and only encrypts it before sending to the server. /// /// To create a new history entry, use one of the builders: /// - [`History::import()`] to import an entry from the shell history file /// - [`History::capture()`] to capture an entry via hook /// - [`History::from_db()`] to create an instance from the database entry // // ## Implementation Notes // // New fields must should be added to `encryption::{encode, decode}` in a backwards // compatible way. (eg sensible defaults and updating the nfields parameter) #[derive(Debug, Clone, PartialEq, Eq, sqlx::FromRow)] pub struct History { /// A client-generated ID, used to identify the entry when syncing. /// /// Stored as `client_id` in the database. pub id: HistoryId, /// When the command was run. pub timestamp: OffsetDateTime, /// How long the command took to run. pub duration: i64, /// The exit code of the command. pub exit: i64, /// The command that was run. pub command: String, /// The current working directory when the command was run. pub cwd: String, /// The session ID, associated with a terminal session. pub session: String, /// The hostname of the machine the command was run on. pub hostname: String, /// Timestamp, which is set when the entry is deleted, allowing a soft delete. pub deleted_at: Option<OffsetDateTime>, } #[derive(Debug, Clone, PartialEq, Eq, sqlx::FromRow)] pub struct HistoryStats { /// The command that was ran after this one in the session pub next: Option<History>, /// /// The command that was ran before this one in the session pub previous: Option<History>, /// How many times has this command been ran? pub total: u64, pub average_duration: u64, pub exits: Vec<(i64, i64)>, pub day_of_week: Vec<(String, i64)>, pub duration_over_time: Vec<(String, i64)>, } impl History { #[allow(clippy::too_many_arguments)] fn new( timestamp: OffsetDateTime, command: String, cwd: String, exit: i64, duration: i64, session: Option<String>, hostname: Option<String>, deleted_at: Option<OffsetDateTime>, ) -> Self { let session = session .or_else(|| env::var("ATUIN_SESSION").ok()) .unwrap_or_else(|| uuid_v7().as_simple().to_string()); let hostname = hostname.unwrap_or_else(get_host_user); Self { id: uuid_v7().as_simple().to_string().into(), timestamp, command, cwd, exit, duration, session, hostname, deleted_at, } } pub fn serialize(&self) -> Result<DecryptedData> { // This is pretty much the same as what we used for the old history, with one difference - // it uses integers for timestamps rather than a string format. use rmp::encode; let mut output = vec![]; // write the version encode::write_u16(&mut output, 0)?; // INFO: ensure this is updated when adding new fields encode::write_array_len(&mut output, 9)?; encode::write_str(&mut output, &self.id.0)?; encode::write_u64(&mut output, self.timestamp.unix_timestamp_nanos() as u64)?; encode::write_sint(&mut output, self.duration)?; encode::write_sint(&mut output, self.exit)?; encode::write_str(&mut output, &self.command)?; encode::write_str(&mut output, &self.cwd)?; encode::write_str(&mut output, &self.session)?; encode::write_str(&mut output, &self.hostname)?; match self.deleted_at { Some(d) => encode::write_u64(&mut output, d.unix_timestamp_nanos() as u64)?, None => encode::write_nil(&mut output)?, } Ok(DecryptedData(output)) } fn deserialize_v0(bytes: &[u8]) -> Result<History> { use rmp::decode; fn error_report<E: std::fmt::Debug>(err: E) -> eyre::Report { eyre!("{err:?}") } let mut bytes = Bytes::new(bytes); let version = decode::read_u16(&mut bytes).map_err(error_report)?; if version != 0 { bail!("expected decoding v0 record, found v{version}"); } let nfields = decode::read_array_len(&mut bytes).map_err(error_report)?; if nfields != 9 { bail!("cannot decrypt history from a different version of Atuin"); } let bytes = bytes.remaining_slice(); let (id, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; let mut bytes = Bytes::new(bytes); let timestamp = decode::read_u64(&mut bytes).map_err(error_report)?; let duration = decode::read_int(&mut bytes).map_err(error_report)?; let exit = decode::read_int(&mut bytes).map_err(error_report)?; let bytes = bytes.remaining_slice(); let (command, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; let (cwd, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; let (session, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; let (hostname, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; // if we have more fields, try and get the deleted_at let mut bytes = Bytes::new(bytes); let (deleted_at, bytes) = match decode::read_u64(&mut bytes) { Ok(unix) => (Some(unix), bytes.remaining_slice()), // we accept null here Err(ValueReadError::TypeMismatch(Marker::Null)) => (None, bytes.remaining_slice()), Err(err) => return Err(error_report(err)), }; if !bytes.is_empty() { bail!("trailing bytes in encoded history. malformed") } Ok(History { id: id.to_owned().into(), timestamp: OffsetDateTime::from_unix_timestamp_nanos(timestamp as i128)?, duration, exit, command: command.to_owned(), cwd: cwd.to_owned(), session: session.to_owned(), hostname: hostname.to_owned(), deleted_at: deleted_at .map(|t| OffsetDateTime::from_unix_timestamp_nanos(t as i128)) .transpose()?, }) } pub fn deserialize(bytes: &[u8], version: &str) -> Result<History> { match version { HISTORY_VERSION => Self::deserialize_v0(bytes), _ => bail!("unknown version {version:?}"), } } /// Builder for a history entry that is imported from shell history. /// /// The only two required fields are `timestamp` and `command`. /// /// ## Examples /// ``` /// use atuin_client::history::History; /// /// let history: History = History::import() /// .timestamp(time::OffsetDateTime::now_utc()) /// .command("ls -la") /// .build() /// .into(); /// ``` /// /// If shell history contains more information, it can be added to the builder: /// ``` /// use atuin_client::history::History; /// /// let history: History = History::import() /// .timestamp(time::OffsetDateTime::now_utc()) /// .command("ls -la") /// .cwd("/home/user") /// .exit(0) /// .duration(100) /// .build() /// .into(); /// ``` /// /// Unknown command or command without timestamp cannot be imported, which /// is forced at compile time: /// /// ```compile_fail /// use atuin_client::history::History; /// /// // this will not compile because timestamp is missing /// let history: History = History::import() /// .command("ls -la") /// .build() /// .into(); /// ``` pub fn import() -> builder::HistoryImportedBuilder { builder::HistoryImported::builder() } /// Builder for a history entry that is captured via hook. /// /// This builder is used only at the `start` step of the hook, /// so it doesn't have any fields which are known only after /// the command is finished, such as `exit` or `duration`. /// /// ## Examples /// ```rust /// use atuin_client::history::History; /// /// let history: History = History::capture() /// .timestamp(time::OffsetDateTime::now_utc()) /// .command("ls -la") /// .cwd("/home/user") /// .build() /// .into(); /// ``` /// /// Command without any required info cannot be captured, which is forced at compile time: /// /// ```compile_fail /// use atuin_client::history::History; /// /// // this will not compile because `cwd` is missing /// let history: History = History::capture() /// .timestamp(time::OffsetDateTime::now_utc()) /// .command("ls -la") /// .build() /// .into(); /// ``` pub fn capture() -> builder::HistoryCapturedBuilder { builder::HistoryCaptured::builder() } /// Builder for a history entry that is captured via hook, and sent to the daemon. /// /// This builder is used only at the `start` step of the hook, /// so it doesn't have any fields which are known only after /// the command is finished, such as `exit` or `duration`. /// /// It does, however, include information that can usually be inferred. /// /// This is because the daemon we are sending a request to lacks the context of the command /// /// ## Examples /// ```rust /// use atuin_client::history::History; /// /// let history: History = History::daemon() /// .timestamp(time::OffsetDateTime::now_utc()) /// .command("ls -la") /// .cwd("/home/user") /// .session("018deb6e8287781f9973ef40e0fde76b") /// .hostname("computer:ellie") /// .build() /// .into(); /// ``` /// /// Command without any required info cannot be captured, which is forced at compile time: /// /// ```compile_fail /// use atuin_client::history::History; /// /// // this will not compile because `hostname` is missing /// let history: History = History::daemon() /// .timestamp(time::OffsetDateTime::now_utc()) /// .command("ls -la") /// .cwd("/home/user") /// .session("018deb6e8287781f9973ef40e0fde76b") /// .build() /// .into(); /// ``` pub fn daemon() -> builder::HistoryDaemonCaptureBuilder { builder::HistoryDaemonCapture::builder() } /// Builder for a history entry that is imported from the database. /// /// All fields are required, as they are all present in the database. /// /// ```compile_fail /// use atuin_client::history::History; /// /// // this will not compile because `id` field is missing /// let history: History = History::from_db() /// .timestamp(time::OffsetDateTime::now_utc()) /// .command("ls -la".to_string()) /// .cwd("/home/user".to_string()) /// .exit(0) /// .duration(100) /// .session("somesession".to_string()) /// .hostname("localhost".to_string()) /// .deleted_at(None) /// .build() /// .into(); /// ``` pub fn from_db() -> builder::HistoryFromDbBuilder { builder::HistoryFromDb::builder() } pub fn success(&self) -> bool { self.exit == 0 || self.duration == -1 } pub fn should_save(&self, settings: &Settings) -> bool { !(self.command.starts_with(' ') || self.command.is_empty() || settings.history_filter.is_match(&self.command) || settings.cwd_filter.is_match(&self.cwd) || (settings.secrets_filter && SECRET_PATTERNS_RE.is_match(&self.command))) } } #[cfg(test)] mod tests { use regex::RegexSet; use time::macros::datetime; use crate::{history::HISTORY_VERSION, settings::Settings}; use super::History; // Test that we don't save history where necessary #[test] fn privacy_test() { let settings = Settings { cwd_filter: RegexSet::new(["^/supasecret"]).unwrap(), history_filter: RegexSet::new(["^psql"]).unwrap(), ..Settings::utc() }; let normal_command: History = History::capture() .timestamp(time::OffsetDateTime::now_utc()) .command("echo foo") .cwd("/") .build() .into(); let with_space: History = History::capture() .timestamp(time::OffsetDateTime::now_utc()) .command(" echo bar") .cwd("/") .build() .into(); let empty: History = History::capture() .timestamp(time::OffsetDateTime::now_utc()) .command("") .cwd("/") .build() .into(); let stripe_key: History = History::capture() .timestamp(time::OffsetDateTime::now_utc()) .command("curl foo.com/bar?key=sk_test_1234567890abcdefghijklmnop") .cwd("/") .build() .into(); let secret_dir: History = History::capture() .timestamp(time::OffsetDateTime::now_utc()) .command("echo ohno") .cwd("/supasecret") .build() .into(); let with_psql: History = History::capture() .timestamp(time::OffsetDateTime::now_utc()) .command("psql") .cwd("/supasecret") .build() .into(); assert!(normal_command.should_save(&settings)); assert!(!with_space.should_save(&settings)); assert!(!empty.should_save(&settings)); assert!(!stripe_key.should_save(&settings)); assert!(!secret_dir.should_save(&settings)); assert!(!with_psql.should_save(&settings)); } #[test] fn disable_secrets() { let settings = Settings { secrets_filter: false, ..Settings::utc() }; let stripe_key: History = History::capture() .timestamp(time::OffsetDateTime::now_utc()) .command("curl foo.com/bar?key=sk_test_1234567890abcdefghijklmnop") .cwd("/") .build() .into(); assert!(stripe_key.should_save(&settings)); } #[test] fn test_serialize_deserialize() { let bytes = [ 205, 0, 0, 153, 217, 32, 54, 54, 100, 49, 54, 99, 98, 101, 101, 55, 99, 100, 52, 55, 53, 51, 56, 101, 53, 99, 53, 98, 56, 98, 52, 52, 101, 57, 48, 48, 54, 101, 207, 23, 99, 98, 117, 24, 210, 246, 128, 206, 2, 238, 210, 240, 0, 170, 103, 105, 116, 32, 115, 116, 97, 116, 117, 115, 217, 42, 47, 85, 115, 101, 114, 115, 47, 99, 111, 110, 114, 97, 100, 46, 108, 117, 100, 103, 97, 116, 101, 47, 68, 111, 99, 117, 109, 101, 110, 116, 115, 47, 99, 111, 100, 101, 47, 97, 116, 117, 105, 110, 217, 32, 98, 57, 55, 100, 57, 97, 51, 48, 54, 102, 50, 55, 52, 52, 55, 51, 97, 50, 48, 51, 100, 50, 101, 98, 97, 52, 49, 102, 57, 52, 53, 55, 187, 102, 118, 102, 103, 57, 51, 54, 99, 48, 107, 112, 102, 58, 99, 111, 110, 114, 97, 100, 46, 108, 117, 100, 103, 97, 116, 101, 192, ]; let history = History { id: "66d16cbee7cd47538e5c5b8b44e9006e".to_owned().into(), timestamp: datetime!(2023-05-28 18:35:40.633872 +00:00), duration: 49206000, exit: 0, command: "git status".to_owned(), cwd: "/Users/conrad.ludgate/Documents/code/atuin".to_owned(), session: "b97d9a306f274473a203d2eba41f9457".to_owned(), hostname: "fvfg936c0kpf:conrad.ludgate".to_owned(), deleted_at: None, }; let serialized = history.serialize().expect("failed to serialize history"); assert_eq!(serialized.0, bytes); let deserialized = History::deserialize(&serialized.0, HISTORY_VERSION) .expect("failed to deserialize history"); assert_eq!(history, deserialized); // test the snapshot too let deserialized = History::deserialize(&bytes, HISTORY_VERSION).expect("failed to deserialize history"); assert_eq!(history, deserialized); } #[test] fn test_serialize_deserialize_deleted() { let history = History { id: "66d16cbee7cd47538e5c5b8b44e9006e".to_owned().into(), timestamp: datetime!(2023-05-28 18:35:40.633872 +00:00), duration: 49206000, exit: 0, command: "git status".to_owned(), cwd: "/Users/conrad.ludgate/Documents/code/atuin".to_owned(), session: "b97d9a306f274473a203d2eba41f9457".to_owned(), hostname: "fvfg936c0kpf:conrad.ludgate".to_owned(), deleted_at: Some(datetime!(2023-11-19 20:18 +00:00)), }; let serialized = history.serialize().expect("failed to serialize history"); let deserialized = History::deserialize(&serialized.0, HISTORY_VERSION) .expect("failed to deserialize history"); assert_eq!(history, deserialized); } #[test] fn test_serialize_deserialize_version() { // v0 let bytes_v0 = [ 205, 0, 0, 153, 217, 32, 54, 54, 100, 49, 54, 99, 98, 101, 101, 55, 99, 100, 52, 55, 53, 51, 56, 101, 53, 99, 53, 98, 56, 98, 52, 52, 101, 57, 48, 48, 54, 101, 207, 23, 99, 98, 117, 24, 210, 246, 128, 206, 2, 238, 210, 240, 0, 170, 103, 105, 116, 32, 115, 116, 97, 116, 117, 115, 217, 42, 47, 85, 115, 101, 114, 115, 47, 99, 111, 110, 114, 97, 100, 46, 108, 117, 100, 103, 97, 116, 101, 47, 68, 111, 99, 117, 109, 101, 110, 116, 115, 47, 99, 111, 100, 101, 47, 97, 116, 117, 105, 110, 217, 32, 98, 57, 55, 100, 57, 97, 51, 48, 54, 102, 50, 55, 52, 52, 55, 51, 97, 50, 48, 51, 100, 50, 101, 98, 97, 52, 49, 102, 57, 52, 53, 55, 187, 102, 118, 102, 103, 57, 51, 54, 99, 48, 107, 112, 102, 58, 99, 111, 110, 114, 97, 100, 46, 108, 117, 100, 103, 97, 116, 101, 192, ]; // some other version let bytes_v1 = [ 205, 1, 0, 153, 217, 32, 54, 54, 100, 49, 54, 99, 98, 101, 101, 55, 99, 100, 52, 55, 53, 51, 56, 101, 53, 99, 53, 98, 56, 98, 52, 52, 101, 57, 48, 48, 54, 101, 207, 23, 99, 98, 117, 24, 210, 246, 128, 206, 2, 238, 210, 240, 0, 170, 103, 105, 116, 32, 115, 116, 97, 116, 117, 115, 217, 42, 47, 85, 115, 101, 114, 115, 47, 99, 111, 110, 114, 97, 100, 46, 108, 117, 100, 103, 97, 116, 101, 47, 68, 111, 99, 117, 109, 101, 110, 116, 115, 47, 99, 111, 100, 101, 47, 97, 116, 117, 105, 110, 217, 32, 98, 57, 55, 100, 57, 97, 51, 48, 54, 102, 50, 55, 52, 52, 55, 51, 97, 50, 48, 51, 100, 50, 101, 98, 97, 52, 49, 102, 57, 52, 53, 55, 187, 102, 118, 102, 103, 57, 51, 54, 99, 48, 107, 112, 102, 58, 99, 111, 110, 114, 97, 100, 46, 108, 117, 100, 103, 97, 116, 101, 192, ]; let deserialized = History::deserialize(&bytes_v0, HISTORY_VERSION); assert!(deserialized.is_ok()); let deserialized = History::deserialize(&bytes_v1, HISTORY_VERSION); assert!(deserialized.is_err()); } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/utils.rs
crates/atuin-client/src/utils.rs
pub(crate) fn get_hostname() -> String { std::env::var("ATUIN_HOST_NAME").unwrap_or_else(|_| { whoami::fallible::hostname().unwrap_or_else(|_| "unknown-host".to_string()) }) } pub(crate) fn get_username() -> String { std::env::var("ATUIN_HOST_USER").unwrap_or_else(|_| whoami::username()) } /// Returns a pair of the hostname and username, separated by a colon. pub(crate) fn get_host_user() -> String { format!("{}:{}", get_hostname(), get_username()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/secrets.rs
crates/atuin-client/src/secrets.rs
// This file will probably trigger a lot of scanners. Sorry. use regex::RegexSet; use std::sync::LazyLock; pub enum TestValue<'a> { Single(&'a str), Multiple(&'a [&'a str]), } /// A list of `(name, regex, test)`, where `test` should match against `regex`. pub static SECRET_PATTERNS: &[(&str, &str, TestValue)] = &[ ( "AWS Access Key ID", "AKIA[0-9A-Z]{16}", TestValue::Single("AKIAIOSFODNN7EXAMPLE"), ), ( "AWS Secret Access Key env var", "AWS_SECRET_ACCESS_KEY", TestValue::Single("AWS_SECRET_ACCESS_KEY=KEYDATA"), ), ( "AWS Session Token env var", "AWS_SESSION_TOKEN", TestValue::Single("AWS_SESSION_TOKEN=KEYDATA"), ), ( "Microsoft Azure secret access key env var", "AZURE_.*_KEY", TestValue::Single("export AZURE_STORAGE_ACCOUNT_KEY=KEYDATA"), ), ( "Google cloud platform key env var", "GOOGLE_SERVICE_ACCOUNT_KEY", TestValue::Single("export GOOGLE_SERVICE_ACCOUNT_KEY=KEYDATA"), ), ( "Atuin login", r"atuin\s+login", TestValue::Single( "atuin login -u mycoolusername -p mycoolpassword -k \"lots of random words\"", ), ), ( "GitHub PAT (old)", "ghp_[a-zA-Z0-9]{36}", TestValue::Single("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"), // legit, I expired it ), ( "GitHub PAT (new)", "gh1_[A-Za-z0-9]{21}_[A-Za-z0-9]{59}|github_pat_[0-9][A-Za-z0-9]{21}_[A-Za-z0-9]{59}", TestValue::Multiple(&[ "gh1_1234567890abcdefghijk_1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklm", "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", // also legit, also expired ]), ), ( "GitHub OAuth Access Token", "gho_[A-Za-z0-9]{36}", TestValue::Single("gho_1234567890abcdefghijklmnopqrstuvwx000"), // not a real token ), ( "GitHub OAuth Access Token (user)", "ghu_[A-Za-z0-9]{36}", TestValue::Single("ghu_1234567890abcdefghijklmnopqrstuvwx000"), // not a real token ), ( "GitHub App Installation Access Token", "ghs_[A-Za-z0-9]{36}", TestValue::Single("ghs_1234567890abcdefghijklmnopqrstuvwx000"), // not a real token ), ( "GitHub Refresh Token", "ghr_[A-Za-z0-9]{76}", TestValue::Single( "ghr_1234567890abcdefghijklmnopqrstuvwx1234567890abcdefghijklmnopqrstuvwx1234567890abcdefghijklmnopqrstuvwx", ), // not a real token ), ( "GitHub App Installation Access Token v1", "v1\\.[0-9A-Fa-f]{40}", TestValue::Single("v1.1234567890abcdef1234567890abcdef12345678"), // not a real token ), ( "GitLab PAT", "glpat-[a-zA-Z0-9_]{20}", TestValue::Single("glpat-RkE_BG5p_bbjML21WSfy"), ), ( "Slack OAuth v2 bot", "xoxb-[0-9]{11}-[0-9]{11}-[0-9a-zA-Z]{24}", TestValue::Single("xoxb-17653672481-19874698323-pdFZKVeTuE8sk7oOcBrzbqgy"), ), ( "Slack OAuth v2 user token", "xoxp-[0-9]{11}-[0-9]{11}-[0-9a-zA-Z]{24}", TestValue::Single("xoxp-17653672481-19874698323-pdFZKVeTuE8sk7oOcBrzbqgy"), ), ( "Slack webhook", "T[a-zA-Z0-9_]{8}/B[a-zA-Z0-9_]{8}/[a-zA-Z0-9_]{24}", TestValue::Single( "https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX", ), ), ( "Stripe test key", "sk_test_[0-9a-zA-Z]{24}", TestValue::Single("sk_test_1234567890abcdefghijklmnop"), ), ( "Stripe live key", "sk_live_[0-9a-zA-Z]{24}", TestValue::Single("sk_live_1234567890abcdefghijklmnop"), ), ( "Netlify authentication token", "nf[pcoub]_[0-9a-zA-Z]{36}", TestValue::Single("nfp_nBh7BdJxUwyaBBwFzpyD29MMFT6pZ9wq5634"), ), ( "npm token", "npm_[A-Za-z0-9]{36}", TestValue::Single("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"), ), ( "Pulumi personal access token", "pul-[0-9a-f]{40}", TestValue::Single("pul-683c2770662c51d960d72ec27613be7653c5cb26"), ), ]; /// The `regex` expressions from [`SECRET_PATTERNS`] compiled into a `RegexSet`. pub static SECRET_PATTERNS_RE: LazyLock<RegexSet> = LazyLock::new(|| { let exprs = SECRET_PATTERNS.iter().map(|f| f.1); RegexSet::new(exprs).expect("Failed to build secrets regex") }); #[cfg(test)] mod tests { use regex::Regex; use crate::secrets::{SECRET_PATTERNS, TestValue}; #[test] fn test_secrets() { for (name, regex, test) in SECRET_PATTERNS { let re = Regex::new(regex).unwrap_or_else(|_| panic!("Failed to compile regex for {name}")); match test { TestValue::Single(test) => { assert!(re.is_match(test), "{name} test failed!"); } TestValue::Multiple(tests) => { for test_str in tests.iter() { assert!( re.is_match(test_str), "{name} test with value \"{test_str}\" failed!" ); } } } } } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/api_client.rs
crates/atuin-client/src/api_client.rs
use std::collections::HashMap; use std::env; use std::time::Duration; use eyre::{Result, bail, eyre}; use reqwest::{ Response, StatusCode, Url, header::{AUTHORIZATION, HeaderMap, USER_AGENT}, }; use atuin_common::{ api::{ATUIN_CARGO_VERSION, ATUIN_HEADER_VERSION, ATUIN_VERSION}, record::{EncryptedData, HostId, Record, RecordIdx}, }; use atuin_common::{ api::{ AddHistoryRequest, ChangePasswordRequest, CountResponse, DeleteHistoryRequest, ErrorResponse, LoginRequest, LoginResponse, MeResponse, RegisterResponse, SendVerificationResponse, StatusResponse, SyncHistoryResponse, VerificationTokenRequest, VerificationTokenResponse, }, record::RecordStatus, }; use semver::Version; use time::OffsetDateTime; use time::format_description::well_known::Rfc3339; use crate::{history::History, sync::hash_str, utils::get_host_user}; static APP_USER_AGENT: &str = concat!("atuin/", env!("CARGO_PKG_VERSION"),); pub struct Client<'a> { sync_addr: &'a str, client: reqwest::Client, } fn make_url(address: &str, path: &str) -> Result<String> { // `join()` expects a trailing `/` in order to join paths // e.g. it treats `http://host:port/subdir` as a file called `subdir` let address = if address.ends_with("/") { address } else { &format!("{address}/") }; // passing a path with a leading `/` will cause `join()` to replace the entire URL path let path = path.strip_prefix("/").unwrap_or(path); let url = Url::parse(address) .map(|url| url.join(path))? .map_err(|_| eyre!("invalid address"))?; Ok(url.to_string()) } pub async fn register( address: &str, username: &str, email: &str, password: &str, ) -> Result<RegisterResponse> { let mut map = HashMap::new(); map.insert("username", username); map.insert("email", email); map.insert("password", password); let url = make_url(address, &format!("/user/{username}"))?; let resp = reqwest::get(url).await?; if resp.status().is_success() { bail!("username already in use"); } let url = make_url(address, "/register")?; let client = reqwest::Client::new(); let resp = client .post(url) .header(USER_AGENT, APP_USER_AGENT) .header(ATUIN_HEADER_VERSION, ATUIN_CARGO_VERSION) .json(&map) .send() .await?; let resp = handle_resp_error(resp).await?; if !ensure_version(&resp)? { bail!("could not register user due to version mismatch"); } let session = resp.json::<RegisterResponse>().await?; Ok(session) } pub async fn login(address: &str, req: LoginRequest) -> Result<LoginResponse> { let url = make_url(address, "/login")?; let client = reqwest::Client::new(); let resp = client .post(url) .header(USER_AGENT, APP_USER_AGENT) .json(&req) .send() .await?; let resp = handle_resp_error(resp).await?; if !ensure_version(&resp)? { bail!("Could not login due to version mismatch"); } let session = resp.json::<LoginResponse>().await?; Ok(session) } #[cfg(feature = "check-update")] pub async fn latest_version() -> Result<Version> { use atuin_common::api::IndexResponse; let url = "https://api.atuin.sh"; let client = reqwest::Client::new(); let resp = client .get(url) .header(USER_AGENT, APP_USER_AGENT) .send() .await?; let resp = handle_resp_error(resp).await?; let index = resp.json::<IndexResponse>().await?; let version = Version::parse(index.version.as_str())?; Ok(version) } pub fn ensure_version(response: &Response) -> Result<bool> { let version = response.headers().get(ATUIN_HEADER_VERSION); let version = if let Some(version) = version { match version.to_str() { Ok(v) => Version::parse(v), Err(e) => bail!("failed to parse server version: {:?}", e), } } else { bail!("Server not reporting its version: it is either too old or unhealthy"); }?; // If the client is newer than the server if version.major < ATUIN_VERSION.major { println!( "Atuin version mismatch! In order to successfully sync, the server needs to run a newer version of Atuin" ); println!("Client: {ATUIN_CARGO_VERSION}"); println!("Server: {version}"); return Ok(false); } Ok(true) } async fn handle_resp_error(resp: Response) -> Result<Response> { let status = resp.status(); if status == StatusCode::SERVICE_UNAVAILABLE { bail!( "Service unavailable: check https://status.atuin.sh (or get in touch with your host)" ); } if status == StatusCode::TOO_MANY_REQUESTS { bail!("Rate limited; please wait before doing that again"); } if !status.is_success() { if let Ok(error) = resp.json::<ErrorResponse>().await { let reason = error.reason; if status.is_client_error() { bail!("Invalid request to the service: {status} - {reason}.") } bail!( "There was an error with the atuin sync service, server error {status}: {reason}.\nIf the problem persists, contact the host" ) } bail!( "There was an error with the atuin sync service: Status {status:?}.\nIf the problem persists, contact the host" ) } Ok(resp) } impl<'a> Client<'a> { pub fn new( sync_addr: &'a str, session_token: &str, connect_timeout: u64, timeout: u64, ) -> Result<Self> { let mut headers = HeaderMap::new(); headers.insert(AUTHORIZATION, format!("Token {session_token}").parse()?); // used for semver server check headers.insert(ATUIN_HEADER_VERSION, ATUIN_CARGO_VERSION.parse()?); Ok(Client { sync_addr, client: reqwest::Client::builder() .user_agent(APP_USER_AGENT) .default_headers(headers) .connect_timeout(Duration::new(connect_timeout, 0)) .timeout(Duration::new(timeout, 0)) .build()?, }) } pub async fn count(&self) -> Result<i64> { let url = make_url(self.sync_addr, "/sync/count")?; let url = Url::parse(url.as_str())?; let resp = self.client.get(url).send().await?; let resp = handle_resp_error(resp).await?; if !ensure_version(&resp)? { bail!("could not sync due to version mismatch"); } if resp.status() != StatusCode::OK { bail!("failed to get count (are you logged in?)"); } let count = resp.json::<CountResponse>().await?; Ok(count.count) } pub async fn status(&self) -> Result<StatusResponse> { let url = make_url(self.sync_addr, "/sync/status")?; let url = Url::parse(url.as_str())?; let resp = self.client.get(url).send().await?; let resp = handle_resp_error(resp).await?; if !ensure_version(&resp)? { bail!("could not sync due to version mismatch"); } let status = resp.json::<StatusResponse>().await?; Ok(status) } pub async fn me(&self) -> Result<MeResponse> { let url = make_url(self.sync_addr, "/api/v0/me")?; let url = Url::parse(url.as_str())?; let resp = self.client.get(url).send().await?; let resp = handle_resp_error(resp).await?; let status = resp.json::<MeResponse>().await?; Ok(status) } pub async fn get_history( &self, sync_ts: OffsetDateTime, history_ts: OffsetDateTime, host: Option<String>, ) -> Result<SyncHistoryResponse> { let host = host.unwrap_or_else(|| hash_str(&get_host_user())); let url = make_url( self.sync_addr, &format!( "/sync/history?sync_ts={}&history_ts={}&host={}", urlencoding::encode(sync_ts.format(&Rfc3339)?.as_str()), urlencoding::encode(history_ts.format(&Rfc3339)?.as_str()), host, ), )?; let resp = self.client.get(url).send().await?; let resp = handle_resp_error(resp).await?; let history = resp.json::<SyncHistoryResponse>().await?; Ok(history) } pub async fn post_history(&self, history: &[AddHistoryRequest]) -> Result<()> { let url = make_url(self.sync_addr, "/history")?; let url = Url::parse(url.as_str())?; let resp = self.client.post(url).json(history).send().await?; handle_resp_error(resp).await?; Ok(()) } pub async fn delete_history(&self, h: History) -> Result<()> { let url = make_url(self.sync_addr, "/history")?; let url = Url::parse(url.as_str())?; let resp = self .client .delete(url) .json(&DeleteHistoryRequest { client_id: h.id.to_string(), }) .send() .await?; handle_resp_error(resp).await?; Ok(()) } pub async fn delete_store(&self) -> Result<()> { let url = make_url(self.sync_addr, "/api/v0/store")?; let url = Url::parse(url.as_str())?; let resp = self.client.delete(url).send().await?; handle_resp_error(resp).await?; Ok(()) } pub async fn post_records(&self, records: &[Record<EncryptedData>]) -> Result<()> { let url = make_url(self.sync_addr, "/api/v0/record")?; let url = Url::parse(url.as_str())?; debug!("uploading {} records to {url}", records.len()); let resp = self.client.post(url).json(records).send().await?; handle_resp_error(resp).await?; Ok(()) } pub async fn next_records( &self, host: HostId, tag: String, start: RecordIdx, count: u64, ) -> Result<Vec<Record<EncryptedData>>> { debug!("fetching record/s from host {}/{}/{}", host.0, tag, start); let url = make_url( self.sync_addr, &format!( "/api/v0/record/next?host={}&tag={}&count={}&start={}", host.0, tag, count, start ), )?; let url = Url::parse(url.as_str())?; let resp = self.client.get(url).send().await?; let resp = handle_resp_error(resp).await?; let records = resp.json::<Vec<Record<EncryptedData>>>().await?; Ok(records) } pub async fn record_status(&self) -> Result<RecordStatus> { let url = make_url(self.sync_addr, "/api/v0/record")?; let url = Url::parse(url.as_str())?; let resp = self.client.get(url).send().await?; let resp = handle_resp_error(resp).await?; if !ensure_version(&resp)? { bail!("could not sync records due to version mismatch"); } let index = resp.json().await?; debug!("got remote index {index:?}"); Ok(index) } pub async fn delete(&self) -> Result<()> { let url = make_url(self.sync_addr, "/account")?; let url = Url::parse(url.as_str())?; let resp = self.client.delete(url).send().await?; if resp.status() == 403 { bail!("invalid login details"); } else if resp.status() == 200 { Ok(()) } else { bail!("Unknown error"); } } pub async fn change_password( &self, current_password: String, new_password: String, ) -> Result<()> { let url = make_url(self.sync_addr, "/account/password")?; let url = Url::parse(url.as_str())?; let resp = self .client .patch(url) .json(&ChangePasswordRequest { current_password, new_password, }) .send() .await?; if resp.status() == 401 { bail!("current password is incorrect") } else if resp.status() == 403 { bail!("invalid login details"); } else if resp.status() == 200 { Ok(()) } else { bail!("Unknown error"); } } // Either request a verification email if token is null, or validate a token pub async fn verify(&self, token: Option<String>) -> Result<(bool, bool)> { // could dedupe this a bit, but it's simple at the moment let (email_sent, verified) = if let Some(token) = token { let url = make_url(self.sync_addr, "/api/v0/account/verify")?; let url = Url::parse(url.as_str())?; let resp = self .client .post(url) .json(&VerificationTokenRequest { token }) .send() .await?; let resp = handle_resp_error(resp).await?; let resp = resp.json::<VerificationTokenResponse>().await?; (false, resp.verified) } else { let url = make_url(self.sync_addr, "/api/v0/account/send-verification")?; let url = Url::parse(url.as_str())?; let resp = self.client.post(url).send().await?; let resp = handle_resp_error(resp).await?; let resp = resp.json::<SendVerificationResponse>().await?; (resp.email_sent, resp.verified) }; Ok((email_sent, verified)) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/logout.rs
crates/atuin-client/src/logout.rs
use eyre::{Context, Result}; use fs_err::remove_file; use crate::settings::Settings; pub fn logout(settings: &Settings) -> Result<()> { let session_path = settings.session_path.as_str(); if settings.logged_in() { remove_file(session_path).context("Failed to remove session file")?; println!("You have logged out!"); } else { println!("You are not logged in"); } Ok(()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/plugin.rs
crates/atuin-client/src/plugin.rs
use std::collections::HashMap; #[derive(Debug, Clone)] pub struct OfficialPlugin { pub name: String, pub description: String, pub install_message: String, } impl OfficialPlugin { pub fn new(name: &str, description: &str, install_message: &str) -> Self { Self { name: name.to_string(), description: description.to_string(), install_message: install_message.to_string(), } } } pub struct OfficialPluginRegistry { plugins: HashMap<String, OfficialPlugin>, } impl OfficialPluginRegistry { pub fn new() -> Self { let mut registry = Self { plugins: HashMap::new(), }; // Register official plugins registry.register_official_plugins(); registry } fn register_official_plugins(&mut self) { // atuin-update plugin self.plugins.insert( "update".to_string(), OfficialPlugin::new( "update", "Update atuin to the latest version", "The 'atuin update' command is provided by the atuin-update plugin.\n\ It is only installed if you used the install script\n \ If you used a package manager (brew, apt, etc), please continue to use it for updates" ), ); } pub fn get_plugin(&self, name: &str) -> Option<&OfficialPlugin> { self.plugins.get(name) } pub fn is_official_plugin(&self, name: &str) -> bool { self.plugins.contains_key(name) } pub fn get_install_message(&self, name: &str) -> Option<&str> { self.plugins .get(name) .map(|plugin| plugin.install_message.as_str()) } } impl Default for OfficialPluginRegistry { fn default() -> Self { Self::new() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_registry_creation() { let registry = OfficialPluginRegistry::new(); assert!(registry.is_official_plugin("update")); assert!(!registry.is_official_plugin("nonexistent")); } #[test] fn test_get_plugin() { let registry = OfficialPluginRegistry::new(); let plugin = registry.get_plugin("update"); assert!(plugin.is_some()); assert_eq!(plugin.unwrap().name, "update"); } #[test] fn test_get_install_message() { let registry = OfficialPluginRegistry::new(); let message = registry.get_install_message("update"); assert!(message.is_some()); assert!(message.unwrap().contains("atuin-update")); } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/settings/scripts.rs
crates/atuin-client/src/settings/scripts.rs
use serde::{Deserialize, Serialize}; #[derive(Debug, Serialize, Deserialize, Clone)] pub struct Settings { pub db_path: String, } impl Default for Settings { fn default() -> Self { let dir = atuin_common::utils::data_dir(); let path = dir.join("scripts.db"); Self { db_path: path.to_string_lossy().to_string(), } } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/settings/kv.rs
crates/atuin-client/src/settings/kv.rs
use serde::{Deserialize, Serialize}; #[derive(Debug, Serialize, Deserialize, Clone)] pub struct Settings { pub db_path: String, } impl Default for Settings { fn default() -> Self { let dir = atuin_common::utils::data_dir(); let path = dir.join("kv.db"); Self { db_path: path.to_string_lossy().to_string(), } } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/settings/dotfiles.rs
crates/atuin-client/src/settings/dotfiles.rs
use serde::{Deserialize, Serialize}; #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct Settings { #[serde(alias = "enable")] pub enabled: bool, }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/history/builder.rs
crates/atuin-client/src/history/builder.rs
use typed_builder::TypedBuilder; use super::History; /// Builder for a history entry that is imported from shell history. /// /// The only two required fields are `timestamp` and `command`. #[derive(Debug, Clone, TypedBuilder)] pub struct HistoryImported { timestamp: time::OffsetDateTime, #[builder(setter(into))] command: String, #[builder(default = "unknown".into(), setter(into))] cwd: String, #[builder(default = -1)] exit: i64, #[builder(default = -1)] duration: i64, #[builder(default, setter(strip_option, into))] session: Option<String>, #[builder(default, setter(strip_option, into))] hostname: Option<String>, } impl From<HistoryImported> for History { fn from(imported: HistoryImported) -> Self { History::new( imported.timestamp, imported.command, imported.cwd, imported.exit, imported.duration, imported.session, imported.hostname, None, ) } } /// Builder for a history entry that is captured via hook. /// /// This builder is used only at the `start` step of the hook, /// so it doesn't have any fields which are known only after /// the command is finished, such as `exit` or `duration`. #[derive(Debug, Clone, TypedBuilder)] pub struct HistoryCaptured { timestamp: time::OffsetDateTime, #[builder(setter(into))] command: String, #[builder(setter(into))] cwd: String, } impl From<HistoryCaptured> for History { fn from(captured: HistoryCaptured) -> Self { History::new( captured.timestamp, captured.command, captured.cwd, -1, -1, None, None, None, ) } } /// Builder for a history entry that is loaded from the database. /// /// All fields are required, as they are all present in the database. #[derive(Debug, Clone, TypedBuilder)] pub struct HistoryFromDb { id: String, timestamp: time::OffsetDateTime, command: String, cwd: String, exit: i64, duration: i64, session: String, hostname: String, deleted_at: Option<time::OffsetDateTime>, } impl From<HistoryFromDb> for History { fn from(from_db: HistoryFromDb) -> Self { History { id: from_db.id.into(), timestamp: from_db.timestamp, exit: from_db.exit, command: from_db.command, cwd: from_db.cwd, duration: from_db.duration, session: from_db.session, hostname: from_db.hostname, deleted_at: from_db.deleted_at, } } } /// Builder for a history entry that is captured via hook and sent to the daemon /// /// This builder is similar to Capture, but we just require more information up front. /// For the old setup, we could just rely on History::new to read some of the missing /// data. This is no longer the case. #[derive(Debug, Clone, TypedBuilder)] pub struct HistoryDaemonCapture { timestamp: time::OffsetDateTime, #[builder(setter(into))] command: String, #[builder(setter(into))] cwd: String, #[builder(setter(into))] session: String, #[builder(setter(into))] hostname: String, } impl From<HistoryDaemonCapture> for History { fn from(captured: HistoryDaemonCapture) -> Self { History::new( captured.timestamp, captured.command, captured.cwd, -1, -1, Some(captured.session), Some(captured.hostname), None, ) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/history/store.rs
crates/atuin-client/src/history/store.rs
use std::{collections::HashSet, fmt::Write, time::Duration}; use eyre::{Result, bail, eyre}; use indicatif::{ProgressBar, ProgressState, ProgressStyle}; use rmp::decode::Bytes; use crate::{ database::{Database, current_context}, record::{encryption::PASETO_V4, sqlite_store::SqliteStore, store::Store}, }; use atuin_common::record::{DecryptedData, Host, HostId, Record, RecordId, RecordIdx}; use super::{HISTORY_TAG, HISTORY_VERSION, History, HistoryId}; #[derive(Debug, Clone)] pub struct HistoryStore { pub store: SqliteStore, pub host_id: HostId, pub encryption_key: [u8; 32], } #[derive(Debug, Eq, PartialEq, Clone)] pub enum HistoryRecord { Create(History), // Create a history record Delete(HistoryId), // Delete a history record, identified by ID } impl HistoryRecord { /// Serialize a history record, returning DecryptedData /// The record will be of a certain type /// We map those like so: /// /// HistoryRecord::Create -> 0 /// HistoryRecord::Delete-> 1 /// /// This numeric identifier is then written as the first byte to the buffer. For history, we /// append the serialized history right afterwards, to avoid having to handle serialization /// twice. /// /// Deletion simply refers to the history by ID pub fn serialize(&self) -> Result<DecryptedData> { // probably don't actually need to use rmp here, but if we ever need to extend it, it's a // nice wrapper around raw byte stuff use rmp::encode; let mut output = vec![]; match self { HistoryRecord::Create(history) => { // 0 -> a history create encode::write_u8(&mut output, 0)?; let bytes = history.serialize()?; encode::write_bin(&mut output, &bytes.0)?; } HistoryRecord::Delete(id) => { // 1 -> a history delete encode::write_u8(&mut output, 1)?; encode::write_str(&mut output, id.0.as_str())?; } }; Ok(DecryptedData(output)) } pub fn deserialize(bytes: &DecryptedData, version: &str) -> Result<Self> { use rmp::decode; fn error_report<E: std::fmt::Debug>(err: E) -> eyre::Report { eyre!("{err:?}") } let mut bytes = Bytes::new(&bytes.0); let record_type = decode::read_u8(&mut bytes).map_err(error_report)?; match record_type { // 0 -> HistoryRecord::Create 0 => { // not super useful to us atm, but perhaps in the future // written by write_bin above let _ = decode::read_bin_len(&mut bytes).map_err(error_report)?; let record = History::deserialize(bytes.remaining_slice(), version)?; Ok(HistoryRecord::Create(record)) } // 1 -> HistoryRecord::Delete 1 => { let bytes = bytes.remaining_slice(); let (id, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; if !bytes.is_empty() { bail!( "trailing bytes decoding HistoryRecord::Delete - malformed? got {bytes:?}" ); } Ok(HistoryRecord::Delete(id.to_string().into())) } n => { bail!("unknown HistoryRecord type {n}") } } } } impl HistoryStore { pub fn new(store: SqliteStore, host_id: HostId, encryption_key: [u8; 32]) -> Self { HistoryStore { store, host_id, encryption_key, } } async fn push_record(&self, record: HistoryRecord) -> Result<(RecordId, RecordIdx)> { let bytes = record.serialize()?; let idx = self .store .last(self.host_id, HISTORY_TAG) .await? .map_or(0, |p| p.idx + 1); let record = Record::builder() .host(Host::new(self.host_id)) .version(HISTORY_VERSION.to_string()) .tag(HISTORY_TAG.to_string()) .idx(idx) .data(bytes) .build(); let id = record.id; self.store .push(&record.encrypt::<PASETO_V4>(&self.encryption_key)) .await?; Ok((id, idx)) } async fn push_batch(&self, records: impl Iterator<Item = HistoryRecord>) -> Result<()> { let mut ret = Vec::new(); let idx = self .store .last(self.host_id, HISTORY_TAG) .await? .map_or(0, |p| p.idx + 1); // Could probably _also_ do this as an iterator, but let's see how this is for now. // optimizing for minimal sqlite transactions, this code can be optimised later for (n, record) in records.enumerate() { let bytes = record.serialize()?; let record = Record::builder() .host(Host::new(self.host_id)) .version(HISTORY_VERSION.to_string()) .tag(HISTORY_TAG.to_string()) .idx(idx + n as u64) .data(bytes) .build(); let record = record.encrypt::<PASETO_V4>(&self.encryption_key); ret.push(record); } self.store.push_batch(ret.iter()).await?; Ok(()) } pub async fn delete(&self, id: HistoryId) -> Result<(RecordId, RecordIdx)> { let record = HistoryRecord::Delete(id); self.push_record(record).await } pub async fn push(&self, history: History) -> Result<(RecordId, RecordIdx)> { // TODO(ellie): move the history store to its own file // it's tiny rn so fine as is let record = HistoryRecord::Create(history); self.push_record(record).await } pub async fn history(&self) -> Result<Vec<HistoryRecord>> { // Atm this loads all history into memory // Not ideal as that is potentially quite a lot, although history will be small. let records = self.store.all_tagged(HISTORY_TAG).await?; let mut ret = Vec::with_capacity(records.len()); for record in records.into_iter() { let hist = match record.version.as_str() { HISTORY_VERSION => { let decrypted = record.decrypt::<PASETO_V4>(&self.encryption_key)?; HistoryRecord::deserialize(&decrypted.data, HISTORY_VERSION) } version => bail!("unknown history version {version:?}"), }?; ret.push(hist); } Ok(ret) } pub async fn build(&self, database: &dyn Database) -> Result<()> { // I'd like to change how we rebuild and not couple this with the database, but need to // consider the structure more deeply. This will be easy to change. // TODO(ellie): page or iterate this let history = self.history().await?; // In theory we could flatten this here // The current issue is that the database may have history in it already, from the old sync // This didn't actually delete old history // If we're sure we have a DB only maintained by the new store, we can flatten // create/delete before we even get to sqlite let mut creates = Vec::new(); let mut deletes = Vec::new(); for i in history { match i { HistoryRecord::Create(h) => { creates.push(h); } HistoryRecord::Delete(id) => { deletes.push(id); } } } database.save_bulk(&creates).await?; database.delete_rows(&deletes).await?; Ok(()) } pub async fn incremental_build(&self, database: &dyn Database, ids: &[RecordId]) -> Result<()> { for id in ids { let record = self.store.get(*id).await; let record = match record { Ok(record) => record, _ => { continue; } }; if record.tag != HISTORY_TAG { continue; } let decrypted = record.decrypt::<PASETO_V4>(&self.encryption_key)?; let record = HistoryRecord::deserialize(&decrypted.data, HISTORY_VERSION)?; match record { HistoryRecord::Create(h) => { // TODO: benchmark CPU time/memory tradeoff of batch commit vs one at a time database.save(&h).await?; } HistoryRecord::Delete(id) => { database.delete_rows(&[id]).await?; } } } Ok(()) } /// Get a list of history IDs that exist in the store /// Note: This currently involves loading all history into memory. This is not going to be a /// large amount in absolute terms, but do not all it in a hot loop. pub async fn history_ids(&self) -> Result<HashSet<HistoryId>> { let history = self.history().await?; let ret = HashSet::from_iter(history.iter().map(|h| match h { HistoryRecord::Create(h) => h.id.clone(), HistoryRecord::Delete(id) => id.clone(), })); Ok(ret) } pub async fn init_store(&self, db: &impl Database) -> Result<()> { let pb = ProgressBar::new_spinner(); pb.set_style( ProgressStyle::with_template("{spinner:.blue} {msg}") .unwrap() .with_key("eta", |state: &ProgressState, w: &mut dyn Write| { write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap() }) .progress_chars("#>-"), ); pb.enable_steady_tick(Duration::from_millis(500)); pb.set_message("Fetching history from old database"); let context = current_context(); let history = db.list(&[], &context, None, false, true).await?; pb.set_message("Fetching history already in store"); let store_ids = self.history_ids().await?; pb.set_message("Converting old history to new store"); let mut records = Vec::new(); for i in history { debug!("loaded {}", i.id); if store_ids.contains(&i.id) { debug!("skipping {} - already exists", i.id); continue; } if i.deleted_at.is_some() { records.push(HistoryRecord::Delete(i.id)); } else { records.push(HistoryRecord::Create(i)); } } pb.set_message("Writing to db"); if !records.is_empty() { self.push_batch(records.into_iter()).await?; } pb.finish_with_message("Import complete"); Ok(()) } } #[cfg(test)] mod tests { use atuin_common::record::DecryptedData; use time::macros::datetime; use crate::history::{HISTORY_VERSION, store::HistoryRecord}; use super::History; #[test] fn test_serialize_deserialize_create() { let bytes = [ 204, 0, 196, 141, 205, 0, 0, 153, 217, 32, 48, 49, 56, 99, 100, 52, 102, 101, 56, 49, 55, 53, 55, 99, 100, 50, 97, 101, 101, 54, 53, 99, 100, 55, 56, 54, 49, 102, 57, 99, 56, 49, 207, 23, 166, 251, 212, 181, 82, 0, 0, 100, 0, 162, 108, 115, 217, 41, 47, 85, 115, 101, 114, 115, 47, 101, 108, 108, 105, 101, 47, 115, 114, 99, 47, 103, 105, 116, 104, 117, 98, 46, 99, 111, 109, 47, 97, 116, 117, 105, 110, 115, 104, 47, 97, 116, 117, 105, 110, 217, 32, 48, 49, 56, 99, 100, 52, 102, 101, 97, 100, 56, 57, 55, 53, 57, 55, 56, 53, 50, 53, 50, 55, 97, 51, 49, 99, 57, 57, 56, 48, 53, 57, 170, 98, 111, 111, 112, 58, 101, 108, 108, 105, 101, 192, ]; let history = History { id: "018cd4fe81757cd2aee65cd7861f9c81".to_owned().into(), timestamp: datetime!(2024-01-04 00:00:00.000000 +00:00), duration: 100, exit: 0, command: "ls".to_owned(), cwd: "/Users/ellie/src/github.com/atuinsh/atuin".to_owned(), session: "018cd4fead897597852527a31c998059".to_owned(), hostname: "boop:ellie".to_owned(), deleted_at: None, }; let record = HistoryRecord::Create(history); let serialized = record.serialize().expect("failed to serialize history"); assert_eq!(serialized.0, bytes); let deserialized = HistoryRecord::deserialize(&serialized, HISTORY_VERSION) .expect("failed to deserialize HistoryRecord"); assert_eq!(deserialized, record); // check the snapshot too let deserialized = HistoryRecord::deserialize(&DecryptedData(Vec::from(bytes)), HISTORY_VERSION) .expect("failed to deserialize HistoryRecord"); assert_eq!(deserialized, record); } #[test] fn test_serialize_deserialize_delete() { let bytes = [ 204, 1, 217, 32, 48, 49, 56, 99, 100, 52, 102, 101, 56, 49, 55, 53, 55, 99, 100, 50, 97, 101, 101, 54, 53, 99, 100, 55, 56, 54, 49, 102, 57, 99, 56, 49, ]; let record = HistoryRecord::Delete("018cd4fe81757cd2aee65cd7861f9c81".to_string().into()); let serialized = record.serialize().expect("failed to serialize history"); assert_eq!(serialized.0, bytes); let deserialized = HistoryRecord::deserialize(&serialized, HISTORY_VERSION) .expect("failed to deserialize HistoryRecord"); assert_eq!(deserialized, record); let deserialized = HistoryRecord::deserialize(&DecryptedData(Vec::from(bytes)), HISTORY_VERSION) .expect("failed to deserialize HistoryRecord"); assert_eq!(deserialized, record); } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/record/store.rs
crates/atuin-client/src/record/store.rs
use async_trait::async_trait; use eyre::Result; use atuin_common::record::{EncryptedData, HostId, Record, RecordId, RecordIdx, RecordStatus}; /// A record store stores records /// In more detail - we tend to need to process this into _another_ format to actually query it. /// As is, the record store is intended as the source of truth for arbitrary data, which could /// be shell history, kvs, etc. #[async_trait] pub trait Store { // Push a record async fn push(&self, record: &Record<EncryptedData>) -> Result<()> { self.push_batch(std::iter::once(record)).await } // Push a batch of records, all in one transaction async fn push_batch( &self, records: impl Iterator<Item = &Record<EncryptedData>> + Send + Sync, ) -> Result<()>; async fn get(&self, id: RecordId) -> Result<Record<EncryptedData>>; async fn delete(&self, id: RecordId) -> Result<()>; async fn delete_all(&self) -> Result<()>; async fn len_all(&self) -> Result<u64>; async fn len(&self, host: HostId, tag: &str) -> Result<u64>; async fn len_tag(&self, tag: &str) -> Result<u64>; async fn last(&self, host: HostId, tag: &str) -> Result<Option<Record<EncryptedData>>>; async fn first(&self, host: HostId, tag: &str) -> Result<Option<Record<EncryptedData>>>; async fn re_encrypt(&self, old_key: &[u8; 32], new_key: &[u8; 32]) -> Result<()>; async fn verify(&self, key: &[u8; 32]) -> Result<()>; async fn purge(&self, key: &[u8; 32]) -> Result<()>; /// Get the next `limit` records, after and including the given index async fn next( &self, host: HostId, tag: &str, idx: RecordIdx, limit: u64, ) -> Result<Vec<Record<EncryptedData>>>; /// Get the first record for a given host and tag async fn idx( &self, host: HostId, tag: &str, idx: RecordIdx, ) -> Result<Option<Record<EncryptedData>>>; async fn status(&self) -> Result<RecordStatus>; /// Get all records for a given tag async fn all_tagged(&self, tag: &str) -> Result<Vec<Record<EncryptedData>>>; }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/record/sync.rs
crates/atuin-client/src/record/sync.rs
// do a sync :O use std::{cmp::Ordering, fmt::Write}; use eyre::Result; use thiserror::Error; use super::store::Store; use crate::{api_client::Client, settings::Settings}; use atuin_common::record::{Diff, HostId, RecordId, RecordIdx, RecordStatus}; use indicatif::{ProgressBar, ProgressState, ProgressStyle}; #[derive(Error, Debug)] pub enum SyncError { #[error("the local store is ahead of the remote, but for another host. has remote lost data?")] LocalAheadOtherHost, #[error("an issue with the local database occurred: {msg:?}")] LocalStoreError { msg: String }, #[error("something has gone wrong with the sync logic: {msg:?}")] SyncLogicError { msg: String }, #[error("operational error: {msg:?}")] OperationalError { msg: String }, #[error("a request to the sync server failed: {msg:?}")] RemoteRequestError { msg: String }, } #[derive(Debug, Eq, PartialEq)] pub enum Operation { // Either upload or download until the states matches the below Upload { local: RecordIdx, remote: Option<RecordIdx>, host: HostId, tag: String, }, Download { local: Option<RecordIdx>, remote: RecordIdx, host: HostId, tag: String, }, Noop { host: HostId, tag: String, }, } pub async fn diff( settings: &Settings, store: &impl Store, ) -> Result<(Vec<Diff>, RecordStatus), SyncError> { let client = Client::new( &settings.sync_address, settings .session_token() .map_err(|e| SyncError::RemoteRequestError { msg: e.to_string() })? .as_str(), settings.network_connect_timeout, settings.network_timeout, ) .map_err(|e| SyncError::OperationalError { msg: e.to_string() })?; let local_index = store .status() .await .map_err(|e| SyncError::LocalStoreError { msg: e.to_string() })?; let remote_index = client .record_status() .await .map_err(|e| SyncError::RemoteRequestError { msg: e.to_string() })?; let diff = local_index.diff(&remote_index); Ok((diff, remote_index)) } // Take a diff, along with a local store, and resolve it into a set of operations. // With the store as context, we can determine if a tail exists locally or not and therefore if it needs uploading or download. // In theory this could be done as a part of the diffing stage, but it's easier to reason // about and test this way pub async fn operations( diffs: Vec<Diff>, _store: &impl Store, ) -> Result<Vec<Operation>, SyncError> { let mut operations = Vec::with_capacity(diffs.len()); for diff in diffs { let op = match (diff.local, diff.remote) { // We both have it! Could be either. Compare. (Some(local), Some(remote)) => match local.cmp(&remote) { Ordering::Equal => Operation::Noop { host: diff.host, tag: diff.tag, }, Ordering::Greater => Operation::Upload { local, remote: Some(remote), host: diff.host, tag: diff.tag, }, Ordering::Less => Operation::Download { local: Some(local), remote, host: diff.host, tag: diff.tag, }, }, // Remote has it, we don't. Gotta be download (None, Some(remote)) => Operation::Download { local: None, remote, host: diff.host, tag: diff.tag, }, // We have it, remote doesn't. Gotta be upload. (Some(local), None) => Operation::Upload { local, remote: None, host: diff.host, tag: diff.tag, }, // something is pretty fucked. (None, None) => { return Err(SyncError::SyncLogicError { msg: String::from( "diff has nothing for local or remote - (host, tag) does not exist", ), }); } }; operations.push(op); } // sort them - purely so we have a stable testing order, and can rely on // same input = same output // We can sort by ID so long as we continue to use UUIDv7 or something // with the same properties operations.sort_by_key(|op| match op { Operation::Noop { host, tag } => (0, *host, tag.clone()), Operation::Upload { host, tag, .. } => (1, *host, tag.clone()), Operation::Download { host, tag, .. } => (2, *host, tag.clone()), }); Ok(operations) } async fn sync_upload( store: &impl Store, client: &Client<'_>, host: HostId, tag: String, local: RecordIdx, remote: Option<RecordIdx>, ) -> Result<i64, SyncError> { let remote = remote.unwrap_or(0); let expected = local - remote; let upload_page_size = 100; let mut progress = 0; let pb = ProgressBar::new(expected); pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {human_pos}/{human_len} ({eta})") .unwrap() .with_key("eta", |state: &ProgressState, w: &mut dyn Write| write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap()) .progress_chars("#>-")); println!( "Uploading {} records to {}/{}", expected, host.0.as_simple(), tag ); // preload with the first entry if remote does not know of this store loop { let page = store .next(host, tag.as_str(), remote + progress, upload_page_size) .await .map_err(|e| { error!("failed to read upload page: {e:?}"); SyncError::LocalStoreError { msg: e.to_string() } })?; client.post_records(&page).await.map_err(|e| { error!("failed to post records: {e:?}"); SyncError::RemoteRequestError { msg: e.to_string() } })?; pb.set_position(progress); progress += page.len() as u64; if progress >= expected { break; } } pb.finish_with_message("Uploaded records"); Ok(progress as i64) } async fn sync_download( store: &impl Store, client: &Client<'_>, host: HostId, tag: String, local: Option<RecordIdx>, remote: RecordIdx, ) -> Result<Vec<RecordId>, SyncError> { let local = local.unwrap_or(0); let expected = remote - local; let download_page_size = 100; let mut progress = 0; let mut ret = Vec::new(); println!( "Downloading {} records from {}/{}", expected, host.0.as_simple(), tag ); let pb = ProgressBar::new(expected); pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {human_pos}/{human_len} ({eta})") .unwrap() .with_key("eta", |state: &ProgressState, w: &mut dyn Write| write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap()) .progress_chars("#>-")); // preload with the first entry if remote does not know of this store loop { let page = client .next_records(host, tag.clone(), local + progress, download_page_size) .await .map_err(|e| SyncError::RemoteRequestError { msg: e.to_string() })?; store .push_batch(page.iter()) .await .map_err(|e| SyncError::LocalStoreError { msg: e.to_string() })?; ret.extend(page.iter().map(|f| f.id)); pb.set_position(progress); progress += page.len() as u64; if progress >= expected { break; } } pb.finish_with_message("Downloaded records"); Ok(ret) } pub async fn sync_remote( operations: Vec<Operation>, local_store: &impl Store, settings: &Settings, ) -> Result<(i64, Vec<RecordId>), SyncError> { let client = Client::new( &settings.sync_address, settings .session_token() .map_err(|e| SyncError::RemoteRequestError { msg: e.to_string() })? .as_str(), settings.network_connect_timeout, settings.network_timeout, ) .expect("failed to create client"); let mut uploaded = 0; let mut downloaded = Vec::new(); // this can totally run in parallel, but lets get it working first for i in operations { match i { Operation::Upload { host, tag, local, remote, } => uploaded += sync_upload(local_store, &client, host, tag, local, remote).await?, Operation::Download { host, tag, local, remote, } => { let mut d = sync_download(local_store, &client, host, tag, local, remote).await?; downloaded.append(&mut d) } Operation::Noop { .. } => continue, } } Ok((uploaded, downloaded)) } pub async fn sync( settings: &Settings, store: &impl Store, ) -> Result<(i64, Vec<RecordId>), SyncError> { let (diff, _) = diff(settings, store).await?; let operations = operations(diff, store).await?; let (uploaded, downloaded) = sync_remote(operations, store, settings).await?; Ok((uploaded, downloaded)) } #[cfg(test)] mod tests { use atuin_common::record::{Diff, EncryptedData, HostId, Record}; use pretty_assertions::assert_eq; use crate::{ record::{ encryption::PASETO_V4, sqlite_store::SqliteStore, store::Store, sync::{self, Operation}, }, settings::test_local_timeout, }; fn test_record() -> Record<EncryptedData> { Record::builder() .host(atuin_common::record::Host::new(HostId( atuin_common::utils::uuid_v7(), ))) .version("v1".into()) .tag(atuin_common::utils::uuid_v7().simple().to_string()) .data(EncryptedData { data: String::new(), content_encryption_key: String::new(), }) .idx(0) .build() } // Take a list of local records, and a list of remote records. // Return the local database, and a diff of local/remote, ready to build // ops async fn build_test_diff( local_records: Vec<Record<EncryptedData>>, remote_records: Vec<Record<EncryptedData>>, ) -> (SqliteStore, Vec<Diff>) { let local_store = SqliteStore::new(":memory:", test_local_timeout()) .await .expect("failed to open in memory sqlite"); let remote_store = SqliteStore::new(":memory:", test_local_timeout()) .await .expect("failed to open in memory sqlite"); // "remote" for i in local_records { local_store.push(&i).await.unwrap(); } for i in remote_records { remote_store.push(&i).await.unwrap(); } let local_index = local_store.status().await.unwrap(); let remote_index = remote_store.status().await.unwrap(); let diff = local_index.diff(&remote_index); (local_store, diff) } #[tokio::test] async fn test_basic_diff() { // a diff where local is ahead of remote. nothing else. let record = test_record(); let (store, diff) = build_test_diff(vec![record.clone()], vec![]).await; assert_eq!(diff.len(), 1); let operations = sync::operations(diff, &store).await.unwrap(); assert_eq!(operations.len(), 1); assert_eq!( operations[0], Operation::Upload { host: record.host.id, tag: record.tag, local: record.idx, remote: None, } ); } #[tokio::test] async fn build_two_way_diff() { // a diff where local is ahead of remote for one, and remote for // another. One upload, one download let shared_record = test_record(); let remote_ahead = test_record(); let local_ahead = shared_record .append(vec![1, 2, 3]) .encrypt::<PASETO_V4>(&[0; 32]); assert_eq!(local_ahead.idx, 1); let local = vec![shared_record.clone(), local_ahead.clone()]; // local knows about the already synced, and something newer in the same store let remote = vec![shared_record.clone(), remote_ahead.clone()]; // remote knows about the already-synced, and one new record in a new store let (store, diff) = build_test_diff(local, remote).await; let operations = sync::operations(diff, &store).await.unwrap(); assert_eq!(operations.len(), 2); assert_eq!( operations, vec![ // Or in otherwords, local is ahead by one Operation::Upload { host: local_ahead.host.id, tag: local_ahead.tag, local: 1, remote: Some(0), }, // Or in other words, remote knows of a record in an entirely new store (tag) Operation::Download { host: remote_ahead.host.id, tag: remote_ahead.tag, local: None, remote: 0, }, ] ); } #[tokio::test] async fn build_complex_diff() { // One shared, ahead but known only by remote // One known only by local // One known only by remote let shared_record = test_record(); let local_only = test_record(); let local_only_20 = test_record(); let local_only_21 = local_only_20 .append(vec![1, 2, 3]) .encrypt::<PASETO_V4>(&[0; 32]); let local_only_22 = local_only_21 .append(vec![1, 2, 3]) .encrypt::<PASETO_V4>(&[0; 32]); let local_only_23 = local_only_22 .append(vec![1, 2, 3]) .encrypt::<PASETO_V4>(&[0; 32]); let remote_only = test_record(); let remote_only_20 = test_record(); let remote_only_21 = remote_only_20 .append(vec![2, 3, 2]) .encrypt::<PASETO_V4>(&[0; 32]); let remote_only_22 = remote_only_21 .append(vec![2, 3, 2]) .encrypt::<PASETO_V4>(&[0; 32]); let remote_only_23 = remote_only_22 .append(vec![2, 3, 2]) .encrypt::<PASETO_V4>(&[0; 32]); let remote_only_24 = remote_only_23 .append(vec![2, 3, 2]) .encrypt::<PASETO_V4>(&[0; 32]); let second_shared = test_record(); let second_shared_remote_ahead = second_shared .append(vec![1, 2, 3]) .encrypt::<PASETO_V4>(&[0; 32]); let second_shared_remote_ahead2 = second_shared_remote_ahead .append(vec![1, 2, 3]) .encrypt::<PASETO_V4>(&[0; 32]); let third_shared = test_record(); let third_shared_local_ahead = third_shared .append(vec![1, 2, 3]) .encrypt::<PASETO_V4>(&[0; 32]); let third_shared_local_ahead2 = third_shared_local_ahead .append(vec![1, 2, 3]) .encrypt::<PASETO_V4>(&[0; 32]); let fourth_shared = test_record(); let fourth_shared_remote_ahead = fourth_shared .append(vec![1, 2, 3]) .encrypt::<PASETO_V4>(&[0; 32]); let fourth_shared_remote_ahead2 = fourth_shared_remote_ahead .append(vec![1, 2, 3]) .encrypt::<PASETO_V4>(&[0; 32]); let local = vec![ shared_record.clone(), second_shared.clone(), third_shared.clone(), fourth_shared.clone(), fourth_shared_remote_ahead.clone(), // single store, only local has it local_only.clone(), // bigger store, also only known by local local_only_20.clone(), local_only_21.clone(), local_only_22.clone(), local_only_23.clone(), // another shared store, but local is ahead on this one third_shared_local_ahead.clone(), third_shared_local_ahead2.clone(), ]; let remote = vec![ remote_only.clone(), remote_only_20.clone(), remote_only_21.clone(), remote_only_22.clone(), remote_only_23.clone(), remote_only_24.clone(), shared_record.clone(), second_shared.clone(), third_shared.clone(), second_shared_remote_ahead.clone(), second_shared_remote_ahead2.clone(), fourth_shared.clone(), fourth_shared_remote_ahead.clone(), fourth_shared_remote_ahead2.clone(), ]; // remote knows about the already-synced, and one new record in a new store let (store, diff) = build_test_diff(local, remote).await; let operations = sync::operations(diff, &store).await.unwrap(); assert_eq!(operations.len(), 7); let mut result_ops = vec![ // We started with a shared record, but the remote knows of two newer records in the // same store Operation::Download { local: Some(0), remote: 2, host: second_shared_remote_ahead.host.id, tag: second_shared_remote_ahead.tag, }, // We have a shared record, local knows of the first two but not the last Operation::Download { local: Some(1), remote: 2, host: fourth_shared_remote_ahead2.host.id, tag: fourth_shared_remote_ahead2.tag, }, // Remote knows of a store with a single record that local does not have Operation::Download { local: None, remote: 0, host: remote_only.host.id, tag: remote_only.tag, }, // Remote knows of a store with a bunch of records that local does not have Operation::Download { local: None, remote: 4, host: remote_only_20.host.id, tag: remote_only_20.tag, }, // Local knows of a record in a store that remote does not have Operation::Upload { local: 0, remote: None, host: local_only.host.id, tag: local_only.tag, }, // Local knows of 4 records in a store that remote does not have Operation::Upload { local: 3, remote: None, host: local_only_20.host.id, tag: local_only_20.tag, }, // Local knows of 2 more records in a shared store that remote only has one of Operation::Upload { local: 2, remote: Some(0), host: third_shared.host.id, tag: third_shared.tag, }, ]; result_ops.sort_by_key(|op| match op { Operation::Noop { host, tag } => (0, *host, tag.clone()), Operation::Upload { host, tag, .. } => (1, *host, tag.clone()), Operation::Download { host, tag, .. } => (2, *host, tag.clone()), }); assert_eq!(result_ops, operations); } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/record/encryption.rs
crates/atuin-client/src/record/encryption.rs
use atuin_common::record::{ AdditionalData, DecryptedData, EncryptedData, Encryption, HostId, RecordId, RecordIdx, }; use base64::{Engine, engine::general_purpose}; use eyre::{Context, Result, ensure}; use rusty_paserk::{Key, KeyId, Local, PieWrappedKey}; use rusty_paseto::core::{ ImplicitAssertion, Key as DataKey, Local as LocalPurpose, Paseto, PasetoNonce, Payload, V4, }; use serde::{Deserialize, Serialize}; /// Use PASETO V4 Local encryption using the additional data as an implicit assertion. #[allow(non_camel_case_types)] pub struct PASETO_V4; /* Why do we use a random content-encryption key? Originally I was planning on using a derived key for encryption based on additional data. This would be a lot more secure than using the master key directly. However, there's an established norm of using a random key. This scheme might be otherwise known as - client-side encryption - envelope encryption - key wrapping A HSM (Hardware Security Module) provider, eg: AWS, Azure, GCP, or even a physical device like a YubiKey will have some keys that they keep to themselves. These keys never leave their physical hardware. If they never leave the hardware, then encrypting large amounts of data means giving them the data and waiting. This is not a practical solution. Instead, generate a unique key for your data, encrypt that using your HSM and then store that with your data. See - <https://docs.aws.amazon.com/wellarchitected/latest/financial-services-industry-lens/use-envelope-encryption-with-customer-master-keys.html> - <https://cloud.google.com/kms/docs/envelope-encryption> - <https://learn.microsoft.com/en-us/azure/storage/blobs/client-side-encryption?tabs=dotnet#encryption-and-decryption-via-the-envelope-technique> - <https://www.yubico.com/gb/product/yubihsm-2-fips/> - <https://cheatsheetseries.owasp.org/cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#encrypting-stored-keys> Why would we care? In the past we have received some requests for company solutions. If in future we can configure a KMS service with little effort, then that would solve a lot of issues for their security team. Even for personal use, if a user is not comfortable with sharing keys between hosts, GCP HSM costs $1/month and $0.03 per 10,000 key operations. Assuming an active user runs 1000 atuin records a day, that would only cost them $1 and 10 cent a month. Additionally, key rotations are much simpler using this scheme. Rotating a key is as simple as re-encrypting the CEK, and not the message contents. This makes it very fast to rotate a key in bulk. For future reference, with asymmetric encryption, you can encrypt the CEK without the HSM's involvement, but decrypting will need the HSM. This allows the encryption path to still be extremely fast (no network calls) but downloads/decryption that happens in the background can make the network calls to the HSM */ impl Encryption for PASETO_V4 { fn re_encrypt( mut data: EncryptedData, _ad: AdditionalData, old_key: &[u8; 32], new_key: &[u8; 32], ) -> Result<EncryptedData> { let cek = Self::decrypt_cek(data.content_encryption_key, old_key)?; data.content_encryption_key = Self::encrypt_cek(cek, new_key); Ok(data) } fn encrypt(data: DecryptedData, ad: AdditionalData, key: &[u8; 32]) -> EncryptedData { // generate a random key for this entry // aka content-encryption-key (CEK) let random_key = Key::<V4, Local>::new_os_random(); // encode the implicit assertions let assertions = Assertions::from(ad).encode(); // build the payload and encrypt the token let payload = serde_json::to_string(&AtuinPayload { data: general_purpose::URL_SAFE_NO_PAD.encode(data.0), }) .expect("json encoding can't fail"); let nonce = DataKey::<32>::try_new_random().expect("could not source from random"); let nonce = PasetoNonce::<V4, LocalPurpose>::from(&nonce); let token = Paseto::<V4, LocalPurpose>::builder() .set_payload(Payload::from(payload.as_str())) .set_implicit_assertion(ImplicitAssertion::from(assertions.as_str())) .try_encrypt(&random_key.into(), &nonce) .expect("error encrypting atuin data"); EncryptedData { data: token, content_encryption_key: Self::encrypt_cek(random_key, key), } } fn decrypt(data: EncryptedData, ad: AdditionalData, key: &[u8; 32]) -> Result<DecryptedData> { let token = data.data; let cek = Self::decrypt_cek(data.content_encryption_key, key)?; // encode the implicit assertions let assertions = Assertions::from(ad).encode(); // decrypt the payload with the footer and implicit assertions let payload = Paseto::<V4, LocalPurpose>::try_decrypt( &token, &cek.into(), None, ImplicitAssertion::from(&*assertions), ) .context("could not decrypt entry")?; let payload: AtuinPayload = serde_json::from_str(&payload)?; let data = general_purpose::URL_SAFE_NO_PAD.decode(payload.data)?; Ok(DecryptedData(data)) } } impl PASETO_V4 { fn decrypt_cek(wrapped_cek: String, key: &[u8; 32]) -> Result<Key<V4, Local>> { let wrapping_key = Key::<V4, Local>::from_bytes(*key); // let wrapping_key = PasetoSymmetricKey::from(Key::from(key)); let AtuinFooter { kid, wpk } = serde_json::from_str(&wrapped_cek) .context("wrapped cek did not contain the correct contents")?; // check that the wrapping key matches the required key to decrypt. // In future, we could support multiple keys and use this key to // look up the key rather than only allow one key. // For now though we will only support the one key and key rotation will // have to be a hard reset let current_kid = wrapping_key.to_id(); ensure!( current_kid == kid, "attempting to decrypt with incorrect key. currently using {current_kid}, expecting {kid}" ); // decrypt the random key Ok(wpk.unwrap_key(&wrapping_key)?) } fn encrypt_cek(cek: Key<V4, Local>, key: &[u8; 32]) -> String { // aka key-encryption-key (KEK) let wrapping_key = Key::<V4, Local>::from_bytes(*key); // wrap the random key so we can decrypt it later let wrapped_cek = AtuinFooter { wpk: cek.wrap_pie(&wrapping_key), kid: wrapping_key.to_id(), }; serde_json::to_string(&wrapped_cek).expect("could not serialize wrapped cek") } } #[derive(Serialize, Deserialize)] struct AtuinPayload { data: String, } #[derive(Serialize, Deserialize)] /// Well-known footer claims for decrypting. This is not encrypted but is stored in the record. /// <https://github.com/paseto-standard/paseto-spec/blob/master/docs/02-Implementation-Guide/04-Claims.md#optional-footer-claims> struct AtuinFooter { /// Wrapped key wpk: PieWrappedKey<V4, Local>, /// ID of the key which was used to wrap kid: KeyId<V4, Local>, } /// Used in the implicit assertions. This is not encrypted and not stored in the data blob. // This cannot be changed, otherwise it breaks the authenticated encryption. #[derive(Debug, Copy, Clone, Serialize)] struct Assertions<'a> { id: &'a RecordId, idx: &'a RecordIdx, version: &'a str, tag: &'a str, host: &'a HostId, } impl<'a> From<AdditionalData<'a>> for Assertions<'a> { fn from(ad: AdditionalData<'a>) -> Self { Self { id: ad.id, version: ad.version, tag: ad.tag, host: ad.host, idx: ad.idx, } } } impl Assertions<'_> { fn encode(&self) -> String { serde_json::to_string(self).expect("could not serialize implicit assertions") } } #[cfg(test)] mod tests { use atuin_common::{ record::{Host, Record}, utils::uuid_v7, }; use super::*; #[test] fn round_trip() { let key = Key::<V4, Local>::new_os_random(); let ad = AdditionalData { id: &RecordId(uuid_v7()), version: "v0", tag: "kv", host: &HostId(uuid_v7()), idx: &0, }; let data = DecryptedData(vec![1, 2, 3, 4]); let encrypted = PASETO_V4::encrypt(data.clone(), ad, &key.to_bytes()); let decrypted = PASETO_V4::decrypt(encrypted, ad, &key.to_bytes()).unwrap(); assert_eq!(decrypted, data); } #[test] fn same_entry_different_output() { let key = Key::<V4, Local>::new_os_random(); let ad = AdditionalData { id: &RecordId(uuid_v7()), version: "v0", tag: "kv", host: &HostId(uuid_v7()), idx: &0, }; let data = DecryptedData(vec![1, 2, 3, 4]); let encrypted = PASETO_V4::encrypt(data.clone(), ad, &key.to_bytes()); let encrypted2 = PASETO_V4::encrypt(data, ad, &key.to_bytes()); assert_ne!( encrypted.data, encrypted2.data, "re-encrypting the same contents should have different output due to key randomization" ); } #[test] fn cannot_decrypt_different_key() { let key = Key::<V4, Local>::new_os_random(); let fake_key = Key::<V4, Local>::new_os_random(); let ad = AdditionalData { id: &RecordId(uuid_v7()), version: "v0", tag: "kv", host: &HostId(uuid_v7()), idx: &0, }; let data = DecryptedData(vec![1, 2, 3, 4]); let encrypted = PASETO_V4::encrypt(data, ad, &key.to_bytes()); let _ = PASETO_V4::decrypt(encrypted, ad, &fake_key.to_bytes()).unwrap_err(); } #[test] fn cannot_decrypt_different_id() { let key = Key::<V4, Local>::new_os_random(); let ad = AdditionalData { id: &RecordId(uuid_v7()), version: "v0", tag: "kv", host: &HostId(uuid_v7()), idx: &0, }; let data = DecryptedData(vec![1, 2, 3, 4]); let encrypted = PASETO_V4::encrypt(data, ad, &key.to_bytes()); let ad = AdditionalData { id: &RecordId(uuid_v7()), ..ad }; let _ = PASETO_V4::decrypt(encrypted, ad, &key.to_bytes()).unwrap_err(); } #[test] fn re_encrypt_round_trip() { let key1 = Key::<V4, Local>::new_os_random(); let key2 = Key::<V4, Local>::new_os_random(); let ad = AdditionalData { id: &RecordId(uuid_v7()), version: "v0", tag: "kv", host: &HostId(uuid_v7()), idx: &0, }; let data = DecryptedData(vec![1, 2, 3, 4]); let encrypted1 = PASETO_V4::encrypt(data.clone(), ad, &key1.to_bytes()); let encrypted2 = PASETO_V4::re_encrypt(encrypted1.clone(), ad, &key1.to_bytes(), &key2.to_bytes()) .unwrap(); // we only re-encrypt the content keys assert_eq!(encrypted1.data, encrypted2.data); assert_ne!( encrypted1.content_encryption_key, encrypted2.content_encryption_key ); let decrypted = PASETO_V4::decrypt(encrypted2, ad, &key2.to_bytes()).unwrap(); assert_eq!(decrypted, data); } #[test] fn full_record_round_trip() { let key = [0x55; 32]; let record = Record::builder() .id(RecordId(uuid_v7())) .version("v0".to_owned()) .tag("kv".to_owned()) .host(Host::new(HostId(uuid_v7()))) .timestamp(1687244806000000) .data(DecryptedData(vec![1, 2, 3, 4])) .idx(0) .build(); let encrypted = record.encrypt::<PASETO_V4>(&key); assert!(!encrypted.data.data.is_empty()); assert!(!encrypted.data.content_encryption_key.is_empty()); let decrypted = encrypted.decrypt::<PASETO_V4>(&key).unwrap(); assert_eq!(decrypted.data.0, [1, 2, 3, 4]); } #[test] fn full_record_round_trip_fail() { let key = [0x55; 32]; let record = Record::builder() .id(RecordId(uuid_v7())) .version("v0".to_owned()) .tag("kv".to_owned()) .host(Host::new(HostId(uuid_v7()))) .timestamp(1687244806000000) .data(DecryptedData(vec![1, 2, 3, 4])) .idx(0) .build(); let encrypted = record.encrypt::<PASETO_V4>(&key); let mut enc1 = encrypted.clone(); enc1.host = Host::new(HostId(uuid_v7())); let _ = enc1 .decrypt::<PASETO_V4>(&key) .expect_err("tampering with the host should result in auth failure"); let mut enc2 = encrypted; enc2.id = RecordId(uuid_v7()); let _ = enc2 .decrypt::<PASETO_V4>(&key) .expect_err("tampering with the id should result in auth failure"); } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/record/mod.rs
crates/atuin-client/src/record/mod.rs
pub mod encryption; pub mod sqlite_store; pub mod store; #[cfg(feature = "sync")] pub mod sync;
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/record/sqlite_store.rs
crates/atuin-client/src/record/sqlite_store.rs
// Here we are using sqlite as a pretty dumb store, and will not be running any complex queries. // Multiple stores of multiple types are all stored in one chonky table (for now), and we just index // by tag/host use std::str::FromStr; use std::{path::Path, time::Duration}; use async_trait::async_trait; use eyre::{Result, eyre}; use fs_err as fs; use sqlx::{ Row, sqlite::{SqliteConnectOptions, SqliteJournalMode, SqlitePool, SqlitePoolOptions, SqliteRow}, }; use atuin_common::record::{ EncryptedData, Host, HostId, Record, RecordId, RecordIdx, RecordStatus, }; use atuin_common::utils; use uuid::Uuid; use super::encryption::PASETO_V4; use super::store::Store; #[derive(Debug, Clone)] pub struct SqliteStore { pool: SqlitePool, } impl SqliteStore { pub async fn new(path: impl AsRef<Path>, timeout: f64) -> Result<Self> { let path = path.as_ref(); debug!("opening sqlite database at {path:?}"); if utils::broken_symlink(path) { eprintln!( "Atuin: Sqlite db path ({path:?}) is a broken symlink. Unable to read or create replacement." ); std::process::exit(1); } if !path.exists() && let Some(dir) = path.parent() { fs::create_dir_all(dir)?; } let opts = SqliteConnectOptions::from_str(path.as_os_str().to_str().unwrap())? .journal_mode(SqliteJournalMode::Wal) .foreign_keys(true) .create_if_missing(true); let pool = SqlitePoolOptions::new() .acquire_timeout(Duration::from_secs_f64(timeout)) .connect_with(opts) .await?; Self::setup_db(&pool).await?; Ok(Self { pool }) } async fn setup_db(pool: &SqlitePool) -> Result<()> { debug!("running sqlite database setup"); sqlx::migrate!("./record-migrations").run(pool).await?; Ok(()) } async fn save_raw( tx: &mut sqlx::Transaction<'_, sqlx::Sqlite>, r: &Record<EncryptedData>, ) -> Result<()> { // In sqlite, we are "limited" to i64. But that is still fine, until 2262. sqlx::query( "insert or ignore into store(id, idx, host, tag, timestamp, version, data, cek) values(?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", ) .bind(r.id.0.as_hyphenated().to_string()) .bind(r.idx as i64) .bind(r.host.id.0.as_hyphenated().to_string()) .bind(r.tag.as_str()) .bind(r.timestamp as i64) .bind(r.version.as_str()) .bind(r.data.data.as_str()) .bind(r.data.content_encryption_key.as_str()) .execute(&mut **tx) .await?; Ok(()) } fn query_row(row: SqliteRow) -> Record<EncryptedData> { let idx: i64 = row.get("idx"); let timestamp: i64 = row.get("timestamp"); // tbh at this point things are pretty fucked so just panic let id = Uuid::from_str(row.get("id")).expect("invalid id UUID format in sqlite DB"); let host = Uuid::from_str(row.get("host")).expect("invalid host UUID format in sqlite DB"); Record { id: RecordId(id), idx: idx as u64, host: Host::new(HostId(host)), timestamp: timestamp as u64, tag: row.get("tag"), version: row.get("version"), data: EncryptedData { data: row.get("data"), content_encryption_key: row.get("cek"), }, } } async fn load_all(&self) -> Result<Vec<Record<EncryptedData>>> { let res = sqlx::query("select * from store ") .map(Self::query_row) .fetch_all(&self.pool) .await?; Ok(res) } } #[async_trait] impl Store for SqliteStore { async fn push_batch( &self, records: impl Iterator<Item = &Record<EncryptedData>> + Send + Sync, ) -> Result<()> { let mut tx = self.pool.begin().await?; for record in records { Self::save_raw(&mut tx, record).await?; } tx.commit().await?; Ok(()) } async fn get(&self, id: RecordId) -> Result<Record<EncryptedData>> { let res = sqlx::query("select * from store where store.id = ?1") .bind(id.0.as_hyphenated().to_string()) .map(Self::query_row) .fetch_one(&self.pool) .await?; Ok(res) } async fn delete(&self, id: RecordId) -> Result<()> { sqlx::query("delete from store where id = ?1") .bind(id.0.as_hyphenated().to_string()) .execute(&self.pool) .await?; Ok(()) } async fn delete_all(&self) -> Result<()> { sqlx::query("delete from store").execute(&self.pool).await?; Ok(()) } async fn last(&self, host: HostId, tag: &str) -> Result<Option<Record<EncryptedData>>> { let res = sqlx::query("select * from store where host=?1 and tag=?2 order by idx desc limit 1") .bind(host.0.as_hyphenated().to_string()) .bind(tag) .map(Self::query_row) .fetch_one(&self.pool) .await; match res { Err(sqlx::Error::RowNotFound) => Ok(None), Err(e) => Err(eyre!("an error occurred: {}", e)), Ok(record) => Ok(Some(record)), } } async fn first(&self, host: HostId, tag: &str) -> Result<Option<Record<EncryptedData>>> { self.idx(host, tag, 0).await } async fn len_all(&self) -> Result<u64> { let res: Result<(i64,), sqlx::Error> = sqlx::query_as("select count(*) from store") .fetch_one(&self.pool) .await; match res { Err(e) => Err(eyre!("failed to fetch local store len: {}", e)), Ok(v) => Ok(v.0 as u64), } } async fn len_tag(&self, tag: &str) -> Result<u64> { let res: Result<(i64,), sqlx::Error> = sqlx::query_as("select count(*) from store where tag=?1") .bind(tag) .fetch_one(&self.pool) .await; match res { Err(e) => Err(eyre!("failed to fetch local store len: {}", e)), Ok(v) => Ok(v.0 as u64), } } async fn len(&self, host: HostId, tag: &str) -> Result<u64> { let last = self.last(host, tag).await?; if let Some(last) = last { return Ok(last.idx + 1); } return Ok(0); } async fn next( &self, host: HostId, tag: &str, idx: RecordIdx, limit: u64, ) -> Result<Vec<Record<EncryptedData>>> { let res = sqlx::query( "select * from store where idx >= ?1 and host = ?2 and tag = ?3 order by idx asc limit ?4", ) .bind(idx as i64) .bind(host.0.as_hyphenated().to_string()) .bind(tag) .bind(limit as i64) .map(Self::query_row) .fetch_all(&self.pool) .await?; Ok(res) } async fn idx( &self, host: HostId, tag: &str, idx: RecordIdx, ) -> Result<Option<Record<EncryptedData>>> { let res = sqlx::query("select * from store where idx = ?1 and host = ?2 and tag = ?3") .bind(idx as i64) .bind(host.0.as_hyphenated().to_string()) .bind(tag) .map(Self::query_row) .fetch_one(&self.pool) .await; match res { Err(sqlx::Error::RowNotFound) => Ok(None), Err(e) => Err(eyre!("an error occurred: {}", e)), Ok(v) => Ok(Some(v)), } } async fn status(&self) -> Result<RecordStatus> { let mut status = RecordStatus::new(); let res: Result<Vec<(String, String, i64)>, sqlx::Error> = sqlx::query_as("select host, tag, max(idx) from store group by host, tag") .fetch_all(&self.pool) .await; let res = match res { Err(e) => return Err(eyre!("failed to fetch local store status: {}", e)), Ok(v) => v, }; for i in res { let host = HostId( Uuid::from_str(i.0.as_str()).expect("failed to parse uuid for local store status"), ); status.set_raw(host, i.1, i.2 as u64); } Ok(status) } async fn all_tagged(&self, tag: &str) -> Result<Vec<Record<EncryptedData>>> { let res = sqlx::query("select * from store where tag = ?1 order by timestamp asc") .bind(tag) .map(Self::query_row) .fetch_all(&self.pool) .await?; Ok(res) } /// Reencrypt every single item in this store with a new key /// Be careful - this may mess with sync. async fn re_encrypt(&self, old_key: &[u8; 32], new_key: &[u8; 32]) -> Result<()> { // Load all the records // In memory like some of the other code here // This will never be called in a hot loop, and only under the following circumstances // 1. The user has logged into a new account, with a new key. They are unlikely to have a // lot of data // 2. The user has encountered some sort of issue, and runs a maintenance command that // invokes this let all = self.load_all().await?; let re_encrypted = all .into_iter() .map(|record| record.re_encrypt::<PASETO_V4>(old_key, new_key)) .collect::<Result<Vec<_>>>()?; // next up, we delete all the old data and reinsert the new stuff // do it in one transaction, so if anything fails we rollback OK let mut tx = self.pool.begin().await?; let res = sqlx::query("delete from store").execute(&mut *tx).await?; let rows = res.rows_affected(); debug!("deleted {rows} rows"); // don't call push_batch, as it will start its own transaction // call the underlying save_raw for record in re_encrypted { Self::save_raw(&mut tx, &record).await?; } tx.commit().await?; Ok(()) } /// Verify that every record in this store can be decrypted with the current key /// Someday maybe also check each tag/record can be deserialized, but not for now. async fn verify(&self, key: &[u8; 32]) -> Result<()> { let all = self.load_all().await?; all.into_iter() .map(|record| record.decrypt::<PASETO_V4>(key)) .collect::<Result<Vec<_>>>()?; Ok(()) } /// Verify that every record in this store can be decrypted with the current key /// Someday maybe also check each tag/record can be deserialized, but not for now. async fn purge(&self, key: &[u8; 32]) -> Result<()> { let all = self.load_all().await?; for record in all.iter() { match record.clone().decrypt::<PASETO_V4>(key) { Ok(_) => continue, Err(_) => { println!( "Failed to decrypt {}, deleting", record.id.0.as_hyphenated() ); self.delete(record.id).await?; } } } Ok(()) } } #[cfg(test)] mod tests { use atuin_common::{ record::{DecryptedData, EncryptedData, Host, HostId, Record}, utils::uuid_v7, }; use crate::{ encryption::generate_encoded_key, record::{encryption::PASETO_V4, store::Store}, settings::test_local_timeout, }; use super::SqliteStore; fn test_record() -> Record<EncryptedData> { Record::builder() .host(Host::new(HostId(atuin_common::utils::uuid_v7()))) .version("v1".into()) .tag(atuin_common::utils::uuid_v7().simple().to_string()) .data(EncryptedData { data: "1234".into(), content_encryption_key: "1234".into(), }) .idx(0) .build() } #[tokio::test] async fn create_db() { let db = SqliteStore::new(":memory:", test_local_timeout()).await; assert!( db.is_ok(), "db could not be created, {:?}", db.err().unwrap() ); } #[tokio::test] async fn push_record() { let db = SqliteStore::new(":memory:", test_local_timeout()) .await .unwrap(); let record = test_record(); db.push(&record).await.expect("failed to insert record"); } #[tokio::test] async fn get_record() { let db = SqliteStore::new(":memory:", test_local_timeout()) .await .unwrap(); let record = test_record(); db.push(&record).await.unwrap(); let new_record = db.get(record.id).await.expect("failed to fetch record"); assert_eq!(record, new_record, "records are not equal"); } #[tokio::test] async fn last() { let db = SqliteStore::new(":memory:", test_local_timeout()) .await .unwrap(); let record = test_record(); db.push(&record).await.unwrap(); let last = db .last(record.host.id, record.tag.as_str()) .await .expect("failed to get store len"); assert_eq!( last.unwrap().id, record.id, "expected to get back the same record that was inserted" ); } #[tokio::test] async fn first() { let db = SqliteStore::new(":memory:", test_local_timeout()) .await .unwrap(); let record = test_record(); db.push(&record).await.unwrap(); let first = db .first(record.host.id, record.tag.as_str()) .await .expect("failed to get store len"); assert_eq!( first.unwrap().id, record.id, "expected to get back the same record that was inserted" ); } #[tokio::test] async fn len() { let db = SqliteStore::new(":memory:", test_local_timeout()) .await .unwrap(); let record = test_record(); db.push(&record).await.unwrap(); let len = db .len(record.host.id, record.tag.as_str()) .await .expect("failed to get store len"); assert_eq!(len, 1, "expected length of 1 after insert"); } #[tokio::test] async fn len_tag() { let db = SqliteStore::new(":memory:", test_local_timeout()) .await .unwrap(); let record = test_record(); db.push(&record).await.unwrap(); let len = db .len_tag(record.tag.as_str()) .await .expect("failed to get store len"); assert_eq!(len, 1, "expected length of 1 after insert"); } #[tokio::test] async fn len_different_tags() { let db = SqliteStore::new(":memory:", test_local_timeout()) .await .unwrap(); // these have different tags, so the len should be the same // we model multiple stores within one database // new store = new tag = independent length let first = test_record(); let second = test_record(); db.push(&first).await.unwrap(); db.push(&second).await.unwrap(); let first_len = db.len(first.host.id, first.tag.as_str()).await.unwrap(); let second_len = db.len(second.host.id, second.tag.as_str()).await.unwrap(); assert_eq!(first_len, 1, "expected length of 1 after insert"); assert_eq!(second_len, 1, "expected length of 1 after insert"); } #[tokio::test] async fn append_a_bunch() { let db = SqliteStore::new(":memory:", test_local_timeout()) .await .unwrap(); let mut tail = test_record(); db.push(&tail).await.expect("failed to push record"); for _ in 1..100 { tail = tail.append(vec![1, 2, 3, 4]).encrypt::<PASETO_V4>(&[0; 32]); db.push(&tail).await.unwrap(); } assert_eq!( db.len(tail.host.id, tail.tag.as_str()).await.unwrap(), 100, "failed to insert 100 records" ); assert_eq!( db.len_tag(tail.tag.as_str()).await.unwrap(), 100, "failed to insert 100 records" ); } #[tokio::test] async fn append_a_big_bunch() { let db = SqliteStore::new(":memory:", test_local_timeout()) .await .unwrap(); let mut records: Vec<Record<EncryptedData>> = Vec::with_capacity(10000); let mut tail = test_record(); records.push(tail.clone()); for _ in 1..10000 { tail = tail.append(vec![1, 2, 3]).encrypt::<PASETO_V4>(&[0; 32]); records.push(tail.clone()); } db.push_batch(records.iter()).await.unwrap(); assert_eq!( db.len(tail.host.id, tail.tag.as_str()).await.unwrap(), 10000, "failed to insert 10k records" ); } #[tokio::test] async fn re_encrypt() { let store = SqliteStore::new(":memory:", test_local_timeout()) .await .unwrap(); let (key, _) = generate_encoded_key().unwrap(); let data = vec![0u8, 1u8, 2u8, 3u8]; let host_id = HostId(uuid_v7()); for i in 0..10 { let record = Record::builder() .host(Host::new(host_id)) .version(String::from("test")) .tag(String::from("test")) .idx(i) .data(DecryptedData(data.clone())) .build(); let record = record.encrypt::<PASETO_V4>(&key.into()); store .push(&record) .await .expect("failed to push encrypted record"); } // first, check that we can decrypt the data with the current key let all = store.all_tagged("test").await.unwrap(); assert_eq!(all.len(), 10, "failed to fetch all records"); for record in all { let decrypted = record.decrypt::<PASETO_V4>(&key.into()).unwrap(); assert_eq!(decrypted.data.0, data); } // reencrypt the store, then check if // 1) it cannot be decrypted with the old key // 2) it can be decrypted with the new key let (new_key, _) = generate_encoded_key().unwrap(); store .re_encrypt(&key.into(), &new_key.into()) .await .expect("failed to re-encrypt store"); let all = store.all_tagged("test").await.unwrap(); for record in all.iter() { let decrypted = record.clone().decrypt::<PASETO_V4>(&key.into()); assert!( decrypted.is_err(), "did not get error decrypting with old key after re-encrypt" ) } for record in all { let decrypted = record.decrypt::<PASETO_V4>(&new_key.into()).unwrap(); assert_eq!(decrypted.data.0, data); } assert_eq!(store.len(host_id, "test").await.unwrap(), 10); } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/import/zsh_histdb.rs
crates/atuin-client/src/import/zsh_histdb.rs
// import old shell history from zsh-histdb! // automatically hoover up all that we can find // As far as i can tell there are no version numbers in the histdb sqlite DB, so we're going based // on the schema from 2022-05-01 // // I have run into some histories that will not import b/c of non UTF-8 characters. // // // An Example sqlite query for hsitdb data: // //id|session|command_id|place_id|exit_status|start_time|duration|id|argv|id|host|dir // // // select // history.id, // history.start_time, // places.host, // places.dir, // commands.argv // from history // left join commands on history.command_id = commands.id // left join places on history.place_id = places.id ; // // CREATE TABLE history (id integer primary key autoincrement, // session int, // command_id int references commands (id), // place_id int references places (id), // exit_status int, // start_time int, // duration int); // use std::collections::HashMap; use std::path::{Path, PathBuf}; use async_trait::async_trait; use atuin_common::utils::uuid_v7; use directories::UserDirs; use eyre::{Result, eyre}; use sqlx::{Pool, sqlite::SqlitePool}; use time::PrimitiveDateTime; use super::Importer; use crate::history::History; use crate::import::Loader; use crate::utils::{get_hostname, get_username}; #[derive(sqlx::FromRow, Debug)] pub struct HistDbEntryCount { pub count: usize, } #[derive(sqlx::FromRow, Debug)] pub struct HistDbEntry { pub id: i64, pub start_time: PrimitiveDateTime, pub host: Vec<u8>, pub dir: Vec<u8>, pub argv: Vec<u8>, pub duration: i64, pub exit_status: i64, pub session: i64, } #[derive(Debug)] pub struct ZshHistDb { histdb: Vec<HistDbEntry>, username: String, } /// Read db at given file, return vector of entries. async fn hist_from_db(dbpath: PathBuf) -> Result<Vec<HistDbEntry>> { let pool = SqlitePool::connect(dbpath.to_str().unwrap()).await?; hist_from_db_conn(pool).await } async fn hist_from_db_conn(pool: Pool<sqlx::Sqlite>) -> Result<Vec<HistDbEntry>> { let query = r#" SELECT history.id, history.start_time, history.duration, places.host, places.dir, commands.argv, history.exit_status, history.session FROM history LEFT JOIN commands ON history.command_id = commands.id LEFT JOIN places ON history.place_id = places.id ORDER BY history.start_time "#; let histdb_vec: Vec<HistDbEntry> = sqlx::query_as::<_, HistDbEntry>(query) .fetch_all(&pool) .await?; Ok(histdb_vec) } impl ZshHistDb { pub fn histpath_candidate() -> PathBuf { // By default histdb database is `${HOME}/.histdb/zsh-history.db` // This can be modified by ${HISTDB_FILE} // // if [[ -z ${HISTDB_FILE} ]]; then // typeset -g HISTDB_FILE="${HOME}/.histdb/zsh-history.db" let user_dirs = UserDirs::new().unwrap(); // should catch error here? let home_dir = user_dirs.home_dir(); std::env::var("HISTDB_FILE") .as_ref() .map(|x| Path::new(x).to_path_buf()) .unwrap_or_else(|_err| home_dir.join(".histdb/zsh-history.db")) } pub fn histpath() -> Result<PathBuf> { let histdb_path = ZshHistDb::histpath_candidate(); if histdb_path.exists() { Ok(histdb_path) } else { Err(eyre!( "Could not find history file. Try setting $HISTDB_FILE" )) } } } #[async_trait] impl Importer for ZshHistDb { // Not sure how this is used const NAME: &'static str = "zsh_histdb"; /// Creates a new ZshHistDb and populates the history based on the pre-populated data /// structure. async fn new() -> Result<Self> { let dbpath = ZshHistDb::histpath()?; let histdb_entry_vec = hist_from_db(dbpath).await?; Ok(Self { histdb: histdb_entry_vec, username: get_username(), }) } async fn entries(&mut self) -> Result<usize> { Ok(self.histdb.len()) } async fn load(self, h: &mut impl Loader) -> Result<()> { let mut session_map = HashMap::new(); for entry in self.histdb { let command = match std::str::from_utf8(&entry.argv) { Ok(s) => s.trim_end(), Err(_) => continue, // we can skip past things like invalid utf8 }; let cwd = match std::str::from_utf8(&entry.dir) { Ok(s) => s.trim_end(), Err(_) => continue, // we can skip past things like invalid utf8 }; let hostname = format!( "{}:{}", String::from_utf8(entry.host).unwrap_or_else(|_e| get_hostname()), self.username ); let session = session_map.entry(entry.session).or_insert_with(uuid_v7); let imported = History::import() .timestamp(entry.start_time.assume_utc()) .command(command) .cwd(cwd) .duration(entry.duration * 1_000_000_000) .exit(entry.exit_status) .session(session.as_simple().to_string()) .hostname(hostname) .build(); h.push(imported.into()).await?; } Ok(()) } } #[cfg(test)] mod test { use super::*; use sqlx::sqlite::SqlitePoolOptions; use std::env; #[tokio::test(flavor = "multi_thread")] #[allow(unsafe_code)] async fn test_env_vars() { let test_env_db = "nonstd-zsh-history.db"; let key = "HISTDB_FILE"; // TODO: Audit that the environment access only happens in single-threaded code. unsafe { env::set_var(key, test_env_db) }; // test the env got set assert_eq!(env::var(key).unwrap(), test_env_db.to_string()); // test histdb returns the proper db from previous step let histdb_path = ZshHistDb::histpath_candidate(); assert_eq!(histdb_path.to_str().unwrap(), test_env_db); } #[tokio::test(flavor = "multi_thread")] async fn test_import() { let pool: SqlitePool = SqlitePoolOptions::new() .min_connections(2) .connect(":memory:") .await .unwrap(); // sql dump directly from a test database. let db_sql = r#" PRAGMA foreign_keys=OFF; BEGIN TRANSACTION; CREATE TABLE commands (id integer primary key autoincrement, argv text, unique(argv) on conflict ignore); INSERT INTO commands VALUES(1,'pwd'); INSERT INTO commands VALUES(2,'curl google.com'); INSERT INTO commands VALUES(3,'bash'); CREATE TABLE places (id integer primary key autoincrement, host text, dir text, unique(host, dir) on conflict ignore); INSERT INTO places VALUES(1,'mbp16.local','/home/noyez'); CREATE TABLE history (id integer primary key autoincrement, session int, command_id int references commands (id), place_id int references places (id), exit_status int, start_time int, duration int); INSERT INTO history VALUES(1,0,1,1,0,1651497918,1); INSERT INTO history VALUES(2,0,2,1,0,1651497923,1); INSERT INTO history VALUES(3,0,3,1,NULL,1651497930,NULL); DELETE FROM sqlite_sequence; INSERT INTO sqlite_sequence VALUES('commands',3); INSERT INTO sqlite_sequence VALUES('places',3); INSERT INTO sqlite_sequence VALUES('history',3); CREATE INDEX hist_time on history(start_time); CREATE INDEX place_dir on places(dir); CREATE INDEX place_host on places(host); CREATE INDEX history_command_place on history(command_id, place_id); COMMIT; "#; sqlx::query(db_sql).execute(&pool).await.unwrap(); // test histdb iterator let histdb_vec = hist_from_db_conn(pool).await.unwrap(); let histdb = ZshHistDb { histdb: histdb_vec, username: get_username(), }; println!("h: {:#?}", histdb.histdb); println!("counter: {:?}", histdb.histdb.len()); for i in histdb.histdb { println!("{i:?}"); } } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/import/replxx.rs
crates/atuin-client/src/import/replxx.rs
use std::{path::PathBuf, str}; use async_trait::async_trait; use directories::UserDirs; use eyre::{Result, eyre}; use time::{OffsetDateTime, PrimitiveDateTime, macros::format_description}; use super::{Importer, Loader, get_histfile_path, unix_byte_lines}; use crate::history::History; use crate::import::read_to_end; #[derive(Debug)] pub struct Replxx { bytes: Vec<u8>, } fn default_histpath() -> Result<PathBuf> { let user_dirs = UserDirs::new().ok_or_else(|| eyre!("could not find user directories"))?; let home_dir = user_dirs.home_dir(); // There is no default histfile for replxx. // Here we try a couple of common names. let mut candidates = ["replxx_history.txt", ".histfile"].iter(); loop { match candidates.next() { Some(candidate) => { let histpath = home_dir.join(candidate); if histpath.exists() { break Ok(histpath); } } None => { break Err(eyre!( "Could not find history file. Try setting and exporting $HISTFILE" )); } } } } #[async_trait] impl Importer for Replxx { const NAME: &'static str = "replxx"; async fn new() -> Result<Self> { let bytes = read_to_end(get_histfile_path(default_histpath)?)?; Ok(Self { bytes }) } async fn entries(&mut self) -> Result<usize> { Ok(super::count_lines(&self.bytes) / 2) } async fn load(self, h: &mut impl Loader) -> Result<()> { let mut timestamp = OffsetDateTime::UNIX_EPOCH; for b in unix_byte_lines(&self.bytes) { let s = std::str::from_utf8(b)?; match try_parse_line_as_timestamp(s) { Some(t) => timestamp = t, None => { // replxx uses ETB character (0x17) as line breaker let cmd = s.replace('\u{0017}', "\n"); let imported = History::import().timestamp(timestamp).command(cmd); h.push(imported.build().into()).await?; } } } Ok(()) } } fn try_parse_line_as_timestamp(line: &str) -> Option<OffsetDateTime> { // replxx history date time format: ### yyyy-mm-dd hh:mm:ss.xxx let date_time_str = line.strip_prefix("### ")?; let format = format_description!("[year]-[month]-[day] [hour]:[minute]:[second].[subsecond digits:3]"); let primitive_date_time = PrimitiveDateTime::parse(date_time_str, format).ok()?; // There is no safe way to get local time offset. // For simplicity let's just assume UTC. Some(primitive_date_time.assume_utc()) } #[cfg(test)] mod test { use crate::import::{Importer, tests::TestLoader}; use super::Replxx; #[tokio::test] async fn parse_complex() { let bytes = r#"### 2024-02-10 22:16:28.302 select * from remote('127.0.0.1:20222', view(select 1)) ### 2024-02-10 22:16:36.919 select * from numbers(10) ### 2024-02-10 22:16:41.710 select * from system.numbers ### 2024-02-10 22:19:28.655 select 1 ### 2024-02-22 11:15:33.046 CREATE TABLE test( stamp DateTime('UTC'))ENGINE = MergeTreePARTITION BY toDate(stamp)order by tuple() as select toDateTime('2020-01-01')+number*60 from numbers(80000); "# .as_bytes() .to_owned(); let replxx = Replxx { bytes }; let mut loader = TestLoader::default(); replxx.load(&mut loader).await.unwrap(); let mut history = loader.buf.into_iter(); // simple wrapper for replxx history entry macro_rules! history { ($timestamp:expr_2021, $command:expr_2021) => { let h = history.next().expect("missing entry in history"); assert_eq!(h.command.as_str(), $command); assert_eq!(h.timestamp.unix_timestamp(), $timestamp); }; } history!( 1707603388, "select * from remote('127.0.0.1:20222', view(select 1))" ); history!(1707603396, "select * from numbers(10)"); history!(1707603401, "select * from system.numbers"); history!(1707603568, "select 1"); history!( 1708600533, "CREATE TABLE test\n( stamp DateTime('UTC'))\nENGINE = MergeTree\nPARTITION BY toDate(stamp)\norder by tuple() as select toDateTime('2020-01-01')+number*60 from numbers(80000);" ); } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/import/nu.rs
crates/atuin-client/src/import/nu.rs
// import old shell history! // automatically hoover up all that we can find use std::path::PathBuf; use async_trait::async_trait; use directories::BaseDirs; use eyre::{Result, eyre}; use time::OffsetDateTime; use super::{Importer, Loader, unix_byte_lines}; use crate::history::History; use crate::import::read_to_end; #[derive(Debug)] pub struct Nu { bytes: Vec<u8>, } fn get_histpath() -> Result<PathBuf> { let base = BaseDirs::new().ok_or_else(|| eyre!("could not determine data directory"))?; let config_dir = base.config_dir().join("nushell"); let histpath = config_dir.join("history.txt"); if histpath.exists() { Ok(histpath) } else { Err(eyre!("Could not find history file.")) } } #[async_trait] impl Importer for Nu { const NAME: &'static str = "nu"; async fn new() -> Result<Self> { let bytes = read_to_end(get_histpath()?)?; Ok(Self { bytes }) } async fn entries(&mut self) -> Result<usize> { Ok(super::count_lines(&self.bytes)) } async fn load(self, h: &mut impl Loader) -> Result<()> { let now = OffsetDateTime::now_utc(); let mut counter = 0; for b in unix_byte_lines(&self.bytes) { let s = match std::str::from_utf8(b) { Ok(s) => s, Err(_) => continue, // we can skip past things like invalid utf8 }; let cmd: String = s.replace("<\\n>", "\n"); let offset = time::Duration::nanoseconds(counter); counter += 1; let entry = History::import().timestamp(now - offset).command(cmd); h.push(entry.build().into()).await?; } Ok(()) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/import/resh.rs
crates/atuin-client/src/import/resh.rs
use std::path::PathBuf; use async_trait::async_trait; use directories::UserDirs; use eyre::{Result, eyre}; use serde::Deserialize; use atuin_common::utils::uuid_v7; use time::OffsetDateTime; use super::{Importer, Loader, get_histfile_path, unix_byte_lines}; use crate::history::History; use crate::import::read_to_end; #[derive(Deserialize, Debug)] #[serde(rename_all = "camelCase")] pub struct ReshEntry { pub cmd_line: String, pub exit_code: i64, pub shell: String, pub uname: String, pub session_id: String, pub home: String, pub lang: String, pub lc_all: String, pub login: String, pub pwd: String, pub pwd_after: String, pub shell_env: String, pub term: String, pub real_pwd: String, pub real_pwd_after: String, pub pid: i64, pub session_pid: i64, pub host: String, pub hosttype: String, pub ostype: String, pub machtype: String, pub shlvl: i64, pub timezone_before: String, pub timezone_after: String, pub realtime_before: f64, pub realtime_after: f64, pub realtime_before_local: f64, pub realtime_after_local: f64, pub realtime_duration: f64, pub realtime_since_session_start: f64, pub realtime_since_boot: f64, pub git_dir: String, pub git_real_dir: String, pub git_origin_remote: String, pub git_dir_after: String, pub git_real_dir_after: String, pub git_origin_remote_after: String, pub machine_id: String, pub os_release_id: String, pub os_release_version_id: String, pub os_release_id_like: String, pub os_release_name: String, pub os_release_pretty_name: String, pub resh_uuid: String, pub resh_version: String, pub resh_revision: String, pub parts_merged: bool, pub recalled: bool, pub recall_last_cmd_line: String, pub cols: String, pub lines: String, } #[derive(Debug)] pub struct Resh { bytes: Vec<u8>, } fn default_histpath() -> Result<PathBuf> { let user_dirs = UserDirs::new().ok_or_else(|| eyre!("could not find user directories"))?; let home_dir = user_dirs.home_dir(); Ok(home_dir.join(".resh_history.json")) } #[async_trait] impl Importer for Resh { const NAME: &'static str = "resh"; async fn new() -> Result<Self> { let bytes = read_to_end(get_histfile_path(default_histpath)?)?; Ok(Self { bytes }) } async fn entries(&mut self) -> Result<usize> { Ok(super::count_lines(&self.bytes)) } async fn load(self, h: &mut impl Loader) -> Result<()> { for b in unix_byte_lines(&self.bytes) { let s = match std::str::from_utf8(b) { Ok(s) => s, Err(_) => continue, // we can skip past things like invalid utf8 }; let entry = match serde_json::from_str::<ReshEntry>(s) { Ok(e) => e, Err(_) => continue, // skip invalid json :shrug: }; #[allow(clippy::cast_possible_truncation)] #[allow(clippy::cast_sign_loss)] let timestamp = { let secs = entry.realtime_before.floor() as i64; let nanosecs = (entry.realtime_before.fract() * 1_000_000_000_f64).round() as i64; OffsetDateTime::from_unix_timestamp(secs)? + time::Duration::nanoseconds(nanosecs) }; #[allow(clippy::cast_possible_truncation)] #[allow(clippy::cast_sign_loss)] let duration = { let secs = entry.realtime_after.floor() as i64; let nanosecs = (entry.realtime_after.fract() * 1_000_000_000_f64).round() as i64; let base = OffsetDateTime::from_unix_timestamp(secs)? + time::Duration::nanoseconds(nanosecs); let difference = base - timestamp; difference.whole_nanoseconds() as i64 }; let imported = History::import() .command(entry.cmd_line) .timestamp(timestamp) .duration(duration) .exit(entry.exit_code) .cwd(entry.pwd) .hostname(entry.host) // CHECK: should we add uuid here? It's not set in the other importers .session(uuid_v7().as_simple().to_string()); h.push(imported.build().into()).await?; } Ok(()) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/import/bash.rs
crates/atuin-client/src/import/bash.rs
use std::{path::PathBuf, str}; use async_trait::async_trait; use directories::UserDirs; use eyre::{Result, eyre}; use itertools::Itertools; use time::{Duration, OffsetDateTime}; use super::{Importer, Loader, get_histfile_path, unix_byte_lines}; use crate::history::History; use crate::import::read_to_end; #[derive(Debug)] pub struct Bash { bytes: Vec<u8>, } fn default_histpath() -> Result<PathBuf> { let user_dirs = UserDirs::new().ok_or_else(|| eyre!("could not find user directories"))?; let home_dir = user_dirs.home_dir(); Ok(home_dir.join(".bash_history")) } #[async_trait] impl Importer for Bash { const NAME: &'static str = "bash"; async fn new() -> Result<Self> { let bytes = read_to_end(get_histfile_path(default_histpath)?)?; Ok(Self { bytes }) } async fn entries(&mut self) -> Result<usize> { let count = unix_byte_lines(&self.bytes) .map(LineType::from) .filter(|line| matches!(line, LineType::Command(_))) .count(); Ok(count) } async fn load(self, h: &mut impl Loader) -> Result<()> { let lines = unix_byte_lines(&self.bytes) .map(LineType::from) .filter(|line| !matches!(line, LineType::NotUtf8)) // invalid utf8 are ignored .collect_vec(); let (commands_before_first_timestamp, first_timestamp) = lines .iter() .enumerate() .find_map(|(i, line)| match line { LineType::Timestamp(t) => Some((i, *t)), _ => None, }) // if no known timestamps, use now as base .unwrap_or((lines.len(), OffsetDateTime::now_utc())); // if no timestamp is recorded, then use this increment to set an arbitrary timestamp // to preserve ordering // this increment is deliberately very small to prevent particularly fast fingers // causing ordering issues; it also helps in handling the "here document" syntax, // where several lines are recorded in succession without individual timestamps let timestamp_increment = Duration::milliseconds(1); // make sure there is a minimum amount of time before the first known timestamp // to fit all commands, given the default increment let mut next_timestamp = first_timestamp - timestamp_increment * commands_before_first_timestamp as i32; for line in lines.into_iter() { match line { LineType::NotUtf8 => unreachable!(), // already filtered LineType::Empty => {} // do nothing LineType::Timestamp(t) => { if t < next_timestamp { warn!( "Time reversal detected in Bash history! Commands may be ordered incorrectly." ); } next_timestamp = t; } LineType::Command(c) => { let imported = History::import().timestamp(next_timestamp).command(c); h.push(imported.build().into()).await?; next_timestamp += timestamp_increment; } } } Ok(()) } } #[derive(Debug, Clone)] enum LineType<'a> { NotUtf8, /// Can happen when using the "here document" syntax. Empty, /// A timestamp line start with a '#', followed immediately by an integer /// that represents seconds since UNIX epoch. Timestamp(OffsetDateTime), /// Anything else. Command(&'a str), } impl<'a> From<&'a [u8]> for LineType<'a> { fn from(bytes: &'a [u8]) -> Self { let Ok(line) = str::from_utf8(bytes) else { return LineType::NotUtf8; }; if line.is_empty() { return LineType::Empty; } match try_parse_line_as_timestamp(line) { Some(time) => LineType::Timestamp(time), None => LineType::Command(line), } } } fn try_parse_line_as_timestamp(line: &str) -> Option<OffsetDateTime> { let seconds = line.strip_prefix('#')?.parse().ok()?; OffsetDateTime::from_unix_timestamp(seconds).ok() } #[cfg(test)] mod test { use std::cmp::Ordering; use itertools::{Itertools, assert_equal}; use crate::import::{Importer, tests::TestLoader}; use super::Bash; #[tokio::test] async fn parse_no_timestamps() { let bytes = r"cargo install atuin cargo update cargo :b̷i̶t̴r̵o̴t̴ ̵i̷s̴ ̷r̶e̵a̸l̷ " .as_bytes() .to_owned(); let mut bash = Bash { bytes }; assert_eq!(bash.entries().await.unwrap(), 3); let mut loader = TestLoader::default(); bash.load(&mut loader).await.unwrap(); assert_equal( loader.buf.iter().map(|h| h.command.as_str()), [ "cargo install atuin", "cargo update", "cargo :b̷i̶t̴r̵o̴t̴ ̵i̷s̴ ̷r̶e̵a̸l̷", ], ); assert!(is_strictly_sorted(loader.buf.iter().map(|h| h.timestamp))) } #[tokio::test] async fn parse_with_timestamps() { let bytes = b"#1672918999 git reset #1672919006 git clean -dxf #1672919020 cd ../ " .to_vec(); let mut bash = Bash { bytes }; assert_eq!(bash.entries().await.unwrap(), 3); let mut loader = TestLoader::default(); bash.load(&mut loader).await.unwrap(); assert_equal( loader.buf.iter().map(|h| h.command.as_str()), ["git reset", "git clean -dxf", "cd ../"], ); assert_equal( loader.buf.iter().map(|h| h.timestamp.unix_timestamp()), [1672918999, 1672919006, 1672919020], ) } #[tokio::test] async fn parse_with_partial_timestamps() { let bytes = b"git reset #1672919006 git clean -dxf cd ../ " .to_vec(); let mut bash = Bash { bytes }; assert_eq!(bash.entries().await.unwrap(), 3); let mut loader = TestLoader::default(); bash.load(&mut loader).await.unwrap(); assert_equal( loader.buf.iter().map(|h| h.command.as_str()), ["git reset", "git clean -dxf", "cd ../"], ); assert!(is_strictly_sorted(loader.buf.iter().map(|h| h.timestamp))) } fn is_strictly_sorted<T>(iter: impl IntoIterator<Item = T>) -> bool where T: Clone + PartialOrd, { iter.into_iter() .tuple_windows() .all(|(a, b)| matches!(a.partial_cmp(&b), Some(Ordering::Less))) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/import/xonsh.rs
crates/atuin-client/src/import/xonsh.rs
use std::env; use std::fs::{self, File}; use std::path::{Path, PathBuf}; use async_trait::async_trait; use directories::BaseDirs; use eyre::{Result, eyre}; use serde::Deserialize; use time::OffsetDateTime; use uuid::Uuid; use uuid::timestamp::{Timestamp, context::NoContext}; use super::{Importer, Loader, get_histdir_path}; use crate::history::History; use crate::utils::get_host_user; // Note: both HistoryFile and HistoryData have other keys present in the JSON, we don't // care about them so we leave them unspecified so as to avoid deserializing unnecessarily. #[derive(Debug, Deserialize)] struct HistoryFile { data: HistoryData, } #[derive(Debug, Deserialize)] struct HistoryData { sessionid: String, cmds: Vec<HistoryCmd>, } #[derive(Debug, Deserialize)] struct HistoryCmd { cwd: String, inp: String, rtn: Option<i64>, ts: (f64, f64), } #[derive(Debug)] pub struct Xonsh { // history is stored as a bunch of json files, one per session sessions: Vec<HistoryData>, hostname: String, } fn xonsh_hist_dir(xonsh_data_dir: Option<String>) -> Result<PathBuf> { // if running within xonsh, this will be available if let Some(d) = xonsh_data_dir { let mut path = PathBuf::from(d); path.push("history_json"); return Ok(path); } // otherwise, fall back to default let base = BaseDirs::new().ok_or_else(|| eyre!("Could not determine home directory"))?; let hist_dir = base.data_dir().join("xonsh/history_json"); if hist_dir.exists() || cfg!(test) { Ok(hist_dir) } else { Err(eyre!("Could not find xonsh history files")) } } fn load_sessions(hist_dir: &Path) -> Result<Vec<HistoryData>> { let mut sessions = vec![]; for entry in fs::read_dir(hist_dir)? { let p = entry?.path(); let ext = p.extension().and_then(|e| e.to_str()); if p.is_file() && ext == Some("json") && let Some(data) = load_session(&p)? { sessions.push(data); } } Ok(sessions) } fn load_session(path: &Path) -> Result<Option<HistoryData>> { let file = File::open(path)?; // empty files are not valid json, so we can't deserialize them if file.metadata()?.len() == 0 { return Ok(None); } let mut hist_file: HistoryFile = serde_json::from_reader(file)?; // if there are commands in this session, replace the existing UUIDv4 // with a UUIDv7 generated from the timestamp of the first command if let Some(cmd) = hist_file.data.cmds.first() { let seconds = cmd.ts.0.trunc() as u64; let nanos = (cmd.ts.0.fract() * 1_000_000_000_f64) as u32; let ts = Timestamp::from_unix(NoContext, seconds, nanos); hist_file.data.sessionid = Uuid::new_v7(ts).to_string(); } Ok(Some(hist_file.data)) } #[async_trait] impl Importer for Xonsh { const NAME: &'static str = "xonsh"; async fn new() -> Result<Self> { // wrap xonsh-specific path resolver in general one so that it respects $HISTPATH let xonsh_data_dir = env::var("XONSH_DATA_DIR").ok(); let hist_dir = get_histdir_path(|| xonsh_hist_dir(xonsh_data_dir))?; let sessions = load_sessions(&hist_dir)?; let hostname = get_host_user(); Ok(Xonsh { sessions, hostname }) } async fn entries(&mut self) -> Result<usize> { let total = self.sessions.iter().map(|s| s.cmds.len()).sum(); Ok(total) } async fn load(self, loader: &mut impl Loader) -> Result<()> { for session in self.sessions { for cmd in session.cmds { let (start, end) = cmd.ts; let ts_nanos = (start * 1_000_000_000_f64) as i128; let timestamp = OffsetDateTime::from_unix_timestamp_nanos(ts_nanos)?; let duration = (end - start) * 1_000_000_000_f64; match cmd.rtn { Some(exit) => { let entry = History::import() .timestamp(timestamp) .duration(duration.trunc() as i64) .exit(exit) .command(cmd.inp.trim()) .cwd(cmd.cwd) .session(session.sessionid.clone()) .hostname(self.hostname.clone()); loader.push(entry.build().into()).await?; } None => { let entry = History::import() .timestamp(timestamp) .duration(duration.trunc() as i64) .command(cmd.inp.trim()) .cwd(cmd.cwd) .session(session.sessionid.clone()) .hostname(self.hostname.clone()); loader.push(entry.build().into()).await?; } } } } Ok(()) } } #[cfg(test)] mod tests { use time::macros::datetime; use super::*; use crate::history::History; use crate::import::tests::TestLoader; #[test] fn test_hist_dir_xonsh() { let hist_dir = xonsh_hist_dir(Some("/home/user/xonsh_data".to_string())).unwrap(); assert_eq!( hist_dir, PathBuf::from("/home/user/xonsh_data/history_json") ); } #[tokio::test] async fn test_import() { let dir = PathBuf::from("tests/data/xonsh"); let sessions = load_sessions(&dir).unwrap(); let hostname = "box:user".to_string(); let xonsh = Xonsh { sessions, hostname }; let mut loader = TestLoader::default(); xonsh.load(&mut loader).await.unwrap(); // order in buf will depend on filenames, so sort by timestamp for consistency loader.buf.sort_by_key(|h| h.timestamp); for (actual, expected) in loader.buf.iter().zip(expected_hist_entries().iter()) { assert_eq!(actual.timestamp, expected.timestamp); assert_eq!(actual.command, expected.command); assert_eq!(actual.cwd, expected.cwd); assert_eq!(actual.exit, expected.exit); assert_eq!(actual.duration, expected.duration); assert_eq!(actual.hostname, expected.hostname); } } fn expected_hist_entries() -> [History; 4] { [ History::import() .timestamp(datetime!(2024-02-6 04:17:59.478272256 +00:00:00)) .command("echo hello world!".to_string()) .cwd("/home/user/Documents/code/atuin".to_string()) .exit(0) .duration(4651069) .hostname("box:user".to_string()) .build() .into(), History::import() .timestamp(datetime!(2024-02-06 04:18:01.70632832 +00:00:00)) .command("ls -l".to_string()) .cwd("/home/user/Documents/code/atuin".to_string()) .exit(0) .duration(21288633) .hostname("box:user".to_string()) .build() .into(), History::import() .timestamp(datetime!(2024-02-06 17:41:31.142515968 +00:00:00)) .command("false".to_string()) .cwd("/home/user/Documents/code/atuin/atuin-client".to_string()) .exit(1) .duration(10269403) .hostname("box:user".to_string()) .build() .into(), History::import() .timestamp(datetime!(2024-02-06 17:41:32.271584 +00:00:00)) .command("exit".to_string()) .cwd("/home/user/Documents/code/atuin/atuin-client".to_string()) .exit(0) .duration(4259347) .hostname("box:user".to_string()) .build() .into(), ] } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/import/zsh.rs
crates/atuin-client/src/import/zsh.rs
// import old shell history! // automatically hoover up all that we can find use std::borrow::Cow; use std::path::PathBuf; use async_trait::async_trait; use directories::UserDirs; use eyre::{Result, eyre}; use time::OffsetDateTime; use super::{Importer, Loader, get_histfile_path, unix_byte_lines}; use crate::history::History; use crate::import::read_to_end; #[derive(Debug)] pub struct Zsh { bytes: Vec<u8>, } fn default_histpath() -> Result<PathBuf> { // oh-my-zsh sets HISTFILE=~/.zhistory // zsh has no default value for this var, but uses ~/.zhistory. // zsh-newuser-install propose as default .histfile https://github.com/zsh-users/zsh/blob/master/Functions/Newuser/zsh-newuser-install#L794 // we could maybe be smarter about this in the future :) let user_dirs = UserDirs::new().ok_or_else(|| eyre!("could not find user directories"))?; let home_dir = user_dirs.home_dir(); let mut candidates = [".zhistory", ".zsh_history", ".histfile"].iter(); loop { match candidates.next() { Some(candidate) => { let histpath = home_dir.join(candidate); if histpath.exists() { break Ok(histpath); } } None => { break Err(eyre!( "Could not find history file. Try setting and exporting $HISTFILE" )); } } } } #[async_trait] impl Importer for Zsh { const NAME: &'static str = "zsh"; async fn new() -> Result<Self> { let bytes = read_to_end(get_histfile_path(default_histpath)?)?; Ok(Self { bytes }) } async fn entries(&mut self) -> Result<usize> { Ok(super::count_lines(&self.bytes)) } async fn load(self, h: &mut impl Loader) -> Result<()> { let now = OffsetDateTime::now_utc(); let mut line = String::new(); let mut counter = 0; for b in unix_byte_lines(&self.bytes) { let s = match unmetafy(b) { Some(s) => s, _ => continue, // we can skip past things like invalid utf8 }; if let Some(s) = s.strip_suffix('\\') { line.push_str(s); line.push_str("\\\n"); } else { line.push_str(&s); let command = std::mem::take(&mut line); if let Some(command) = command.strip_prefix(": ") { counter += 1; h.push(parse_extended(command, counter)).await?; } else { let offset = time::Duration::seconds(counter); counter += 1; let imported = History::import() // preserve ordering .timestamp(now - offset) .command(command.trim_end().to_string()); h.push(imported.build().into()).await?; } } } Ok(()) } } fn parse_extended(line: &str, counter: i64) -> History { let (time, duration) = line.split_once(':').unwrap(); let (duration, command) = duration.split_once(';').unwrap(); let time = time .parse::<i64>() .ok() .and_then(|t| OffsetDateTime::from_unix_timestamp(t).ok()) .unwrap_or_else(OffsetDateTime::now_utc) + time::Duration::milliseconds(counter); // use nanos, because why the hell not? we won't display them. let duration = duration.parse::<i64>().map_or(-1, |t| t * 1_000_000_000); let imported = History::import() .timestamp(time) .command(command.trim_end().to_string()) .duration(duration); imported.build().into() } fn unmetafy(line: &[u8]) -> Option<Cow<'_, str>> { if line.contains(&0x83) { let mut s = Vec::with_capacity(line.len()); let mut is_meta = false; for ch in line { if *ch == 0x83 { is_meta = true; } else if is_meta { is_meta = false; s.push(*ch ^ 32); } else { s.push(*ch) } } String::from_utf8(s).ok().map(Cow::Owned) } else { std::str::from_utf8(line).ok().map(Cow::Borrowed) } } #[cfg(test)] mod test { use itertools::assert_equal; use crate::import::tests::TestLoader; use super::*; #[test] fn test_parse_extended_simple() { let parsed = parse_extended("1613322469:0;cargo install atuin", 0); assert_eq!(parsed.command, "cargo install atuin"); assert_eq!(parsed.duration, 0); assert_eq!( parsed.timestamp, OffsetDateTime::from_unix_timestamp(1_613_322_469).unwrap() ); let parsed = parse_extended("1613322469:10;cargo install atuin;cargo update", 0); assert_eq!(parsed.command, "cargo install atuin;cargo update"); assert_eq!(parsed.duration, 10_000_000_000); assert_eq!( parsed.timestamp, OffsetDateTime::from_unix_timestamp(1_613_322_469).unwrap() ); let parsed = parse_extended("1613322469:10;cargo :b̷i̶t̴r̵o̴t̴ ̵i̷s̴ ̷r̶e̵a̸l̷", 0); assert_eq!(parsed.command, "cargo :b̷i̶t̴r̵o̴t̴ ̵i̷s̴ ̷r̶e̵a̸l̷"); assert_eq!(parsed.duration, 10_000_000_000); assert_eq!( parsed.timestamp, OffsetDateTime::from_unix_timestamp(1_613_322_469).unwrap() ); let parsed = parse_extended("1613322469:10;cargo install \\n atuin\n", 0); assert_eq!(parsed.command, "cargo install \\n atuin"); assert_eq!(parsed.duration, 10_000_000_000); assert_eq!( parsed.timestamp, OffsetDateTime::from_unix_timestamp(1_613_322_469).unwrap() ); } #[tokio::test] async fn test_parse_file() { let bytes = r": 1613322469:0;cargo install atuin : 1613322469:10;cargo install atuin; \ cargo update : 1613322469:10;cargo :b̷i̶t̴r̵o̴t̴ ̵i̷s̴ ̷r̶e̵a̸l̷ " .as_bytes() .to_owned(); let mut zsh = Zsh { bytes }; assert_eq!(zsh.entries().await.unwrap(), 4); let mut loader = TestLoader::default(); zsh.load(&mut loader).await.unwrap(); assert_equal( loader.buf.iter().map(|h| h.command.as_str()), [ "cargo install atuin", "cargo install atuin; \\\ncargo update", "cargo :b̷i̶t̴r̵o̴t̴ ̵i̷s̴ ̷r̶e̵a̸l̷", ], ); } #[tokio::test] async fn test_parse_metafied() { let bytes = b"echo \xe4\xbd\x83\x80\xe5\xa5\xbd\nls ~/\xe9\x83\xbf\xb3\xe4\xb9\x83\xb0\n".to_vec(); let mut zsh = Zsh { bytes }; assert_eq!(zsh.entries().await.unwrap(), 2); let mut loader = TestLoader::default(); zsh.load(&mut loader).await.unwrap(); assert_equal( loader.buf.iter().map(|h| h.command.as_str()), ["echo 你好", "ls ~/音乐"], ); } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/import/mod.rs
crates/atuin-client/src/import/mod.rs
use std::fs::File; use std::io::Read; use std::path::PathBuf; use async_trait::async_trait; use eyre::{Result, bail}; use memchr::Memchr; use crate::history::History; pub mod bash; pub mod fish; pub mod nu; pub mod nu_histdb; pub mod powershell; pub mod replxx; pub mod resh; pub mod xonsh; pub mod xonsh_sqlite; pub mod zsh; pub mod zsh_histdb; #[async_trait] pub trait Importer: Sized { const NAME: &'static str; async fn new() -> Result<Self>; async fn entries(&mut self) -> Result<usize>; async fn load(self, loader: &mut impl Loader) -> Result<()>; } #[async_trait] pub trait Loader: Sync + Send { async fn push(&mut self, hist: History) -> eyre::Result<()>; } fn unix_byte_lines(input: &[u8]) -> impl Iterator<Item = &[u8]> { UnixByteLines { iter: memchr::memchr_iter(b'\n', input), bytes: input, i: 0, } } struct UnixByteLines<'a> { iter: Memchr<'a>, bytes: &'a [u8], i: usize, } impl<'a> Iterator for UnixByteLines<'a> { type Item = &'a [u8]; fn next(&mut self) -> Option<Self::Item> { let j = self.iter.next()?; let out = &self.bytes[self.i..j]; self.i = j + 1; Some(out) } fn count(self) -> usize where Self: Sized, { self.iter.count() } } fn count_lines(input: &[u8]) -> usize { unix_byte_lines(input).count() } fn get_histpath<D>(def: D) -> Result<PathBuf> where D: FnOnce() -> Result<PathBuf>, { if let Ok(p) = std::env::var("HISTFILE") { Ok(PathBuf::from(p)) } else { def() } } fn get_histfile_path<D>(def: D) -> Result<PathBuf> where D: FnOnce() -> Result<PathBuf>, { get_histpath(def).and_then(is_file) } fn get_histdir_path<D>(def: D) -> Result<PathBuf> where D: FnOnce() -> Result<PathBuf>, { get_histpath(def).and_then(is_dir) } fn read_to_end(path: PathBuf) -> Result<Vec<u8>> { let mut bytes = Vec::new(); let mut f = File::open(path)?; f.read_to_end(&mut bytes)?; Ok(bytes) } fn is_file(p: PathBuf) -> Result<PathBuf> { if p.is_file() { Ok(p) } else { bail!( "Could not find history file {:?}. Try setting and exporting $HISTFILE", p ) } } fn is_dir(p: PathBuf) -> Result<PathBuf> { if p.is_dir() { Ok(p) } else { bail!( "Could not find history directory {:?}. Try setting and exporting $HISTFILE", p ) } } #[cfg(test)] mod tests { use super::*; #[derive(Default)] pub struct TestLoader { pub buf: Vec<History>, } #[async_trait] impl Loader for TestLoader { async fn push(&mut self, hist: History) -> Result<()> { self.buf.push(hist); Ok(()) } } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/import/fish.rs
crates/atuin-client/src/import/fish.rs
// import old shell history! // automatically hoover up all that we can find use std::path::PathBuf; use async_trait::async_trait; use directories::BaseDirs; use eyre::{Result, eyre}; use time::OffsetDateTime; use super::{Importer, Loader, unix_byte_lines}; use crate::history::History; use crate::import::read_to_end; #[derive(Debug)] pub struct Fish { bytes: Vec<u8>, } /// see https://fishshell.com/docs/current/interactive.html#searchable-command-history fn default_histpath() -> Result<PathBuf> { let base = BaseDirs::new().ok_or_else(|| eyre!("could not determine data directory"))?; let data = std::env::var("XDG_DATA_HOME").map_or_else( |_| base.home_dir().join(".local").join("share"), PathBuf::from, ); // fish supports multiple history sessions // If `fish_history` var is missing, or set to `default`, use `fish` as the session let session = std::env::var("fish_history").unwrap_or_else(|_| String::from("fish")); let session = if session == "default" { String::from("fish") } else { session }; let mut histpath = data.join("fish"); histpath.push(format!("{session}_history")); if histpath.exists() { Ok(histpath) } else { Err(eyre!("Could not find history file.")) } } #[async_trait] impl Importer for Fish { const NAME: &'static str = "fish"; async fn new() -> Result<Self> { let bytes = read_to_end(default_histpath()?)?; Ok(Self { bytes }) } async fn entries(&mut self) -> Result<usize> { Ok(super::count_lines(&self.bytes)) } async fn load(self, loader: &mut impl Loader) -> Result<()> { let now = OffsetDateTime::now_utc(); let mut time: Option<OffsetDateTime> = None; let mut cmd: Option<String> = None; for b in unix_byte_lines(&self.bytes) { let s = match std::str::from_utf8(b) { Ok(s) => s, Err(_) => continue, // we can skip past things like invalid utf8 }; if let Some(c) = s.strip_prefix("- cmd: ") { // first, we must deal with the prev cmd if let Some(cmd) = cmd.take() { let time = time.unwrap_or(now); let entry = History::import().timestamp(time).command(cmd); loader.push(entry.build().into()).await?; } // using raw strings to avoid needing escaping. // replaces double backslashes with single backslashes let c = c.replace(r"\\", r"\"); // replaces escaped newlines let c = c.replace(r"\n", "\n"); // TODO: any other escape characters? cmd = Some(c); } else if let Some(t) = s.strip_prefix(" when: ") { // if t is not an int, just ignore this line if let Ok(t) = t.parse::<i64>() { time = Some(OffsetDateTime::from_unix_timestamp(t)?); } } else { // ... ignore paths lines } } // we might have a trailing cmd if let Some(cmd) = cmd.take() { let time = time.unwrap_or(now); let entry = History::import().timestamp(time).command(cmd); loader.push(entry.build().into()).await?; } Ok(()) } } #[cfg(test)] mod test { use crate::import::{Importer, tests::TestLoader}; use super::Fish; #[tokio::test] async fn parse_complex() { // complicated input with varying contents and escaped strings. let bytes = r#"- cmd: history --help when: 1639162832 - cmd: cat ~/.bash_history when: 1639162851 paths: - ~/.bash_history - cmd: ls ~/.local/share/fish/fish_history when: 1639162890 paths: - ~/.local/share/fish/fish_history - cmd: cat ~/.local/share/fish/fish_history when: 1639162893 paths: - ~/.local/share/fish/fish_history ERROR - CORRUPTED: ENTRY CONTINUE: - AS - NORMAL - cmd: echo "foo" \\\n'bar' baz when: 1639162933 - cmd: cat ~/.local/share/fish/fish_history when: 1639162939 paths: - ~/.local/share/fish/fish_history - cmd: echo "\\"" \\\\ "\\\\" when: 1639163063 - cmd: cat ~/.local/share/fish/fish_history when: 1639163066 paths: - ~/.local/share/fish/fish_history "# .as_bytes() .to_owned(); let fish = Fish { bytes }; let mut loader = TestLoader::default(); fish.load(&mut loader).await.unwrap(); let mut history = loader.buf.into_iter(); // simple wrapper for fish history entry macro_rules! fishtory { ($timestamp:expr_2021, $command:expr_2021) => { let h = history.next().expect("missing entry in history"); assert_eq!(h.command.as_str(), $command); assert_eq!(h.timestamp.unix_timestamp(), $timestamp); }; } fishtory!(1639162832, "history --help"); fishtory!(1639162851, "cat ~/.bash_history"); fishtory!(1639162890, "ls ~/.local/share/fish/fish_history"); fishtory!(1639162893, "cat ~/.local/share/fish/fish_history"); fishtory!(1639162933, "echo \"foo\" \\\n'bar' baz"); fishtory!(1639162939, "cat ~/.local/share/fish/fish_history"); fishtory!(1639163063, r#"echo "\"" \\ "\\""#); fishtory!(1639163066, "cat ~/.local/share/fish/fish_history"); } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/import/powershell.rs
crates/atuin-client/src/import/powershell.rs
use async_trait::async_trait; use directories::BaseDirs; use eyre::{Result, eyre}; use std::path::PathBuf; use time::{Duration, OffsetDateTime}; use super::{Importer, Loader, count_lines, unix_byte_lines}; use crate::history::History; use crate::import::read_to_end; #[derive(Debug)] pub struct PowerShell { bytes: Vec<u8>, line_count: Option<usize>, } fn get_history_path() -> Result<PathBuf> { let base = BaseDirs::new().ok_or_else(|| eyre!("could not determine data directory"))?; // The command line history in PowerShell is maintained by the PSReadLine module: // https://learn.microsoft.com/en-us/powershell/module/psreadline/about/about_psreadline#command-history // // > PSReadLine maintains a history file containing all the commands and data you've entered from the command line. // > The history files are a file named `$($Host.Name)_history.txt`. // > On Windows systems the history file is stored at `$Env:APPDATA\Microsoft\Windows\PowerShell\PSReadLine`. // > On non-Windows systems, the history files are stored at `$Env:XDG_DATA_HOME/powershell/PSReadLine` // > or `$Env:HOME/.local/share/powershell/PSReadLine`. let dir = if cfg!(windows) { base.data_dir() .join("Microsoft") .join("Windows") .join("PowerShell") .join("PSReadLine") } else { std::env::var("XDG_DATA_HOME") .map_or_else( |_| base.home_dir().join(".local").join("share"), PathBuf::from, ) .join("powershell") .join("PSReadLine") }; // The history is stored in a file named `$($Host.Name)_history.txt`. // For the default console host shipped by Microsoft,`$Host.Name` is `ConsoleHost`: // https://learn.microsoft.com/en-us/dotnet/api/system.management.automation.host.pshost.name#remarks let file = dir.join("ConsoleHost_history.txt"); if file.is_file() { Ok(file) } else { Err(eyre!("Could not find history file: {}", file.display())) } } #[async_trait] impl Importer for PowerShell { const NAME: &'static str = "PowerShell"; async fn new() -> Result<Self> { let bytes = read_to_end(get_history_path()?)?; Ok(Self { bytes, line_count: None, }) } async fn entries(&mut self) -> Result<usize> { // Commands can be split over multiple lines, // but this is only used for a progress bar, and multi-line commands // should be quite rare, so this is not an issue in practice. if self.line_count.is_none() { self.line_count = Some(count_lines(&self.bytes)); } Ok(self.line_count.unwrap()) } async fn load(mut self, h: &mut impl Loader) -> Result<()> { let line_count = self.entries().await?; let start = OffsetDateTime::now_utc() - Duration::milliseconds(line_count as i64); let mut counter = 0; let mut iter = unix_byte_lines(&self.bytes); while let Some(s) = iter.next() { let Ok(s) = read_line(s) else { continue; // We can skip past things like invalid utf8 }; let mut cmd = s.to_string(); // Multi-line commands end with a backtick, append the following lines. while cmd.ends_with('`') { cmd.pop(); let Some(next) = iter.next() else { break; }; let Ok(next) = read_line(next) else { break; }; cmd.push('\n'); cmd.push_str(next); } if cmd.is_empty() { continue; } let offset = Duration::milliseconds(counter); counter += 1; let entry = History::import().timestamp(start + offset).command(cmd); h.push(entry.build().into()).await?; } Ok(()) } } fn read_line(s: &[u8]) -> Result<&str> { let s = str::from_utf8(s)?; // History is stored in CRLF on Windows, normalize the input to LF on all platforms. let s = s.strip_suffix('\r').unwrap_or(s); Ok(s) } #[cfg(test)] mod test { use super::*; use crate::import::tests::TestLoader; use itertools::assert_equal; const INPUT: &str = r#"cargo install atuin cargo update echo "first line` second line` ` last line" echo foo echo bar echo baz "#; const EXPECTED: &[&str] = &[ "cargo install atuin", "cargo update", "echo \"first line\nsecond line\n\nlast line\"", "echo foo", "echo bar", "echo baz", ]; #[tokio::test] async fn test_import() { let loader = import(INPUT).await; let actual = loader.buf.iter().map(|h| h.command.clone()); let expected = EXPECTED.iter().map(|s| s.to_string()); assert_equal(actual, expected); } #[tokio::test] async fn test_crlf() { let input = INPUT.replace("\n", "\r\n"); let loader = import(input.as_str()).await; let actual = loader.buf.iter().map(|h| h.command.clone()); let expected = EXPECTED.iter().map(|s| s.to_string()); assert_equal(actual, expected); } #[tokio::test] async fn test_timestamps() { let loader = import(INPUT).await; let mut prev = loader.buf.first().unwrap().timestamp; for current in loader.buf.iter().skip(1).map(|h| h.timestamp) { assert!(current > prev); prev = current; } } async fn import(input: &str) -> TestLoader { let powershell = PowerShell { bytes: input.as_bytes().to_vec(), line_count: None, }; let mut loader = TestLoader::default(); powershell.load(&mut loader).await.unwrap(); loader } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/import/nu_histdb.rs
crates/atuin-client/src/import/nu_histdb.rs
// import old shell history! // automatically hoover up all that we can find use std::path::PathBuf; use async_trait::async_trait; use directories::BaseDirs; use eyre::{Result, eyre}; use sqlx::{Pool, sqlite::SqlitePool}; use time::{Duration, OffsetDateTime}; use super::Importer; use crate::history::History; use crate::import::Loader; #[derive(sqlx::FromRow, Debug)] pub struct HistDbEntry { pub id: i64, pub command_line: Vec<u8>, pub start_timestamp: i64, pub session_id: i64, pub hostname: Vec<u8>, pub cwd: Vec<u8>, pub duration_ms: i64, pub exit_status: i64, pub more_info: Vec<u8>, } impl From<HistDbEntry> for History { fn from(histdb_item: HistDbEntry) -> Self { let ts_secs = histdb_item.start_timestamp / 1000; let ts_ns = (histdb_item.start_timestamp % 1000) * 1_000_000; let imported = History::import() .timestamp( OffsetDateTime::from_unix_timestamp(ts_secs).unwrap() + Duration::nanoseconds(ts_ns), ) .command(String::from_utf8(histdb_item.command_line).unwrap()) .cwd(String::from_utf8(histdb_item.cwd).unwrap()) .exit(histdb_item.exit_status) .duration(histdb_item.duration_ms) .session(format!("{:x}", histdb_item.session_id)) .hostname(String::from_utf8(histdb_item.hostname).unwrap()); imported.build().into() } } #[derive(Debug)] pub struct NuHistDb { histdb: Vec<HistDbEntry>, } /// Read db at given file, return vector of entries. async fn hist_from_db(dbpath: PathBuf) -> Result<Vec<HistDbEntry>> { let pool = SqlitePool::connect(dbpath.to_str().unwrap()).await?; hist_from_db_conn(pool).await } async fn hist_from_db_conn(pool: Pool<sqlx::Sqlite>) -> Result<Vec<HistDbEntry>> { let query = r#" SELECT id, command_line, start_timestamp, session_id, hostname, cwd, duration_ms, exit_status, more_info FROM history ORDER BY start_timestamp "#; let histdb_vec: Vec<HistDbEntry> = sqlx::query_as::<_, HistDbEntry>(query) .fetch_all(&pool) .await?; Ok(histdb_vec) } impl NuHistDb { pub fn histpath() -> Result<PathBuf> { let base = BaseDirs::new().ok_or_else(|| eyre!("could not determine data directory"))?; let config_dir = base.config_dir().join("nushell"); let histdb_path = config_dir.join("history.sqlite3"); if histdb_path.exists() { Ok(histdb_path) } else { Err(eyre!("Could not find history file.")) } } } #[async_trait] impl Importer for NuHistDb { // Not sure how this is used const NAME: &'static str = "nu_histdb"; /// Creates a new NuHistDb and populates the history based on the pre-populated data /// structure. async fn new() -> Result<Self> { let dbpath = NuHistDb::histpath()?; let histdb_entry_vec = hist_from_db(dbpath).await?; Ok(Self { histdb: histdb_entry_vec, }) } async fn entries(&mut self) -> Result<usize> { Ok(self.histdb.len()) } async fn load(self, h: &mut impl Loader) -> Result<()> { for i in self.histdb { h.push(i.into()).await?; } Ok(()) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-client/src/import/xonsh_sqlite.rs
crates/atuin-client/src/import/xonsh_sqlite.rs
use std::env; use std::path::PathBuf; use async_trait::async_trait; use directories::BaseDirs; use eyre::{Result, eyre}; use futures::TryStreamExt; use sqlx::{FromRow, Row, sqlite::SqlitePool}; use time::OffsetDateTime; use uuid::Uuid; use uuid::timestamp::{Timestamp, context::NoContext}; use super::{Importer, Loader, get_histfile_path}; use crate::history::History; use crate::utils::get_host_user; #[derive(Debug, FromRow)] struct HistDbEntry { inp: String, rtn: Option<i64>, tsb: f64, tse: f64, cwd: String, session_start: f64, } impl HistDbEntry { fn into_hist_with_hostname(self, hostname: String) -> History { let ts_nanos = (self.tsb * 1_000_000_000_f64) as i128; let timestamp = OffsetDateTime::from_unix_timestamp_nanos(ts_nanos).unwrap(); let session_ts_seconds = self.session_start.trunc() as u64; let session_ts_nanos = (self.session_start.fract() * 1_000_000_000_f64) as u32; let session_ts = Timestamp::from_unix(NoContext, session_ts_seconds, session_ts_nanos); let session_id = Uuid::new_v7(session_ts).to_string(); let duration = (self.tse - self.tsb) * 1_000_000_000_f64; if let Some(exit) = self.rtn { let imported = History::import() .timestamp(timestamp) .duration(duration.trunc() as i64) .exit(exit) .command(self.inp) .cwd(self.cwd) .session(session_id) .hostname(hostname); imported.build().into() } else { let imported = History::import() .timestamp(timestamp) .duration(duration.trunc() as i64) .command(self.inp) .cwd(self.cwd) .session(session_id) .hostname(hostname); imported.build().into() } } } fn xonsh_db_path(xonsh_data_dir: Option<String>) -> Result<PathBuf> { // if running within xonsh, this will be available if let Some(d) = xonsh_data_dir { let mut path = PathBuf::from(d); path.push("xonsh-history.sqlite"); return Ok(path); } // otherwise, fall back to default let base = BaseDirs::new().ok_or_else(|| eyre!("Could not determine home directory"))?; let hist_file = base.data_dir().join("xonsh/xonsh-history.sqlite"); if hist_file.exists() || cfg!(test) { Ok(hist_file) } else { Err(eyre!( "Could not find xonsh history db at: {}", hist_file.to_string_lossy() )) } } #[derive(Debug)] pub struct XonshSqlite { pool: SqlitePool, hostname: String, } #[async_trait] impl Importer for XonshSqlite { const NAME: &'static str = "xonsh_sqlite"; async fn new() -> Result<Self> { // wrap xonsh-specific path resolver in general one so that it respects $HISTPATH let xonsh_data_dir = env::var("XONSH_DATA_DIR").ok(); let db_path = get_histfile_path(|| xonsh_db_path(xonsh_data_dir))?; let connection_str = db_path.to_str().ok_or_else(|| { eyre!( "Invalid path for SQLite database: {}", db_path.to_string_lossy() ) })?; let pool = SqlitePool::connect(connection_str).await?; let hostname = get_host_user(); Ok(XonshSqlite { pool, hostname }) } async fn entries(&mut self) -> Result<usize> { let query = "SELECT COUNT(*) FROM xonsh_history"; let row = sqlx::query(query).fetch_one(&self.pool).await?; let count: u32 = row.get(0); Ok(count as usize) } async fn load(self, loader: &mut impl Loader) -> Result<()> { let query = r#" SELECT inp, rtn, tsb, tse, cwd, MIN(tsb) OVER (PARTITION BY sessionid) AS session_start FROM xonsh_history ORDER BY rowid "#; let mut entries = sqlx::query_as::<_, HistDbEntry>(query).fetch(&self.pool); let mut count = 0; while let Some(entry) = entries.try_next().await? { let hist = entry.into_hist_with_hostname(self.hostname.clone()); loader.push(hist).await?; count += 1; } println!("Loaded: {count}"); Ok(()) } } #[cfg(test)] mod tests { use time::macros::datetime; use super::*; use crate::history::History; use crate::import::tests::TestLoader; #[test] fn test_db_path_xonsh() { let db_path = xonsh_db_path(Some("/home/user/xonsh_data".to_string())).unwrap(); assert_eq!( db_path, PathBuf::from("/home/user/xonsh_data/xonsh-history.sqlite") ); } #[tokio::test] async fn test_import() { let connection_str = "tests/data/xonsh-history.sqlite"; let xonsh_sqlite = XonshSqlite { pool: SqlitePool::connect(connection_str).await.unwrap(), hostname: "box:user".to_string(), }; let mut loader = TestLoader::default(); xonsh_sqlite.load(&mut loader).await.unwrap(); for (actual, expected) in loader.buf.iter().zip(expected_hist_entries().iter()) { assert_eq!(actual.timestamp, expected.timestamp); assert_eq!(actual.command, expected.command); assert_eq!(actual.cwd, expected.cwd); assert_eq!(actual.exit, expected.exit); assert_eq!(actual.duration, expected.duration); assert_eq!(actual.hostname, expected.hostname); } } fn expected_hist_entries() -> [History; 4] { [ History::import() .timestamp(datetime!(2024-02-6 17:56:21.130956288 +00:00:00)) .command("echo hello world!".to_string()) .cwd("/home/user/Documents/code/atuin".to_string()) .exit(0) .duration(2628564) .hostname("box:user".to_string()) .build() .into(), History::import() .timestamp(datetime!(2024-02-06 17:56:28.190406144 +00:00:00)) .command("ls -l".to_string()) .cwd("/home/user/Documents/code/atuin".to_string()) .exit(0) .duration(9371519) .hostname("box:user".to_string()) .build() .into(), History::import() .timestamp(datetime!(2024-02-06 17:56:46.989020928 +00:00:00)) .command("false".to_string()) .cwd("/home/user/Documents/code/atuin".to_string()) .exit(1) .duration(17337560) .hostname("box:user".to_string()) .build() .into(), History::import() .timestamp(datetime!(2024-02-06 17:56:48.218384128 +00:00:00)) .command("exit".to_string()) .cwd("/home/user/Documents/code/atuin".to_string()) .exit(0) .duration(4599094) .hostname("box:user".to_string()) .build() .into(), ] } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/build.rs
crates/atuin/build.rs
use std::process::Command; fn main() { let output = Command::new("git").args(["rev-parse", "HEAD"]).output(); let sha = match output { Ok(sha) => String::from_utf8(sha.stdout).unwrap(), Err(_) => String::from("NO_GIT"), }; println!("cargo:rustc-env=GIT_HASH={sha}"); }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/sync.rs
crates/atuin/src/sync.rs
use atuin_dotfiles::store::{AliasStore, var::VarStore}; use atuin_scripts::store::ScriptStore; use eyre::{Context, Result}; use atuin_client::{ database::Database, history::store::HistoryStore, record::sqlite_store::SqliteStore, settings::Settings, }; use atuin_common::record::RecordId; use atuin_kv::store::KvStore; // This is the only crate that ties together all other crates. // Therefore, it's the only crate where functions tying together all stores can live /// Rebuild all stores after a sync /// Note: for history, this only does an _incremental_ sync. Hence the need to specify downloaded /// records. pub async fn build( settings: &Settings, store: &SqliteStore, db: &dyn Database, downloaded: Option<&[RecordId]>, ) -> Result<()> { let encryption_key: [u8; 32] = atuin_client::encryption::load_key(settings) .context("could not load encryption key")? .into(); let host_id = Settings::host_id().expect("failed to get host_id"); let downloaded = downloaded.unwrap_or(&[]); let kv_db = atuin_kv::database::Database::new(settings.kv.db_path.clone(), 1.0).await?; let history_store = HistoryStore::new(store.clone(), host_id, encryption_key); let alias_store = AliasStore::new(store.clone(), host_id, encryption_key); let var_store = VarStore::new(store.clone(), host_id, encryption_key); let kv_store = KvStore::new(store.clone(), kv_db, host_id, encryption_key); let script_store = ScriptStore::new(store.clone(), host_id, encryption_key); history_store.incremental_build(db, downloaded).await?; alias_store.build().await?; var_store.build().await?; kv_store.build().await?; let script_db = atuin_scripts::database::Database::new(settings.scripts.db_path.clone(), 1.0).await?; script_store.build(script_db).await?; Ok(()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/main.rs
crates/atuin/src/main.rs
#![warn(clippy::pedantic, clippy::nursery)] #![allow(clippy::use_self, clippy::missing_const_for_fn)] // not 100% reliable use clap::Parser; use clap::builder::Styles; use clap::builder::styling::{AnsiColor, Effects}; use eyre::Result; use command::AtuinCmd; mod command; #[cfg(feature = "sync")] mod sync; const VERSION: &str = env!("CARGO_PKG_VERSION"); const SHA: &str = env!("GIT_HASH"); const LONG_VERSION: &str = concat!(env!("CARGO_PKG_VERSION"), " (", env!("GIT_HASH"), ")"); static HELP_TEMPLATE: &str = "\ {before-help}{name} {version} {author} {about} {usage-heading} {usage} {all-args}{after-help}"; const STYLES: Styles = Styles::styled() .header(AnsiColor::Yellow.on_default().effects(Effects::BOLD)) .usage(AnsiColor::Green.on_default().effects(Effects::BOLD)) .literal(AnsiColor::Green.on_default().effects(Effects::BOLD)) .placeholder(AnsiColor::Green.on_default()); /// Magical shell history #[derive(Parser)] #[command( author = "Ellie Huxtable <ellie@atuin.sh>", version = VERSION, long_version = LONG_VERSION, help_template(HELP_TEMPLATE), styles = STYLES, )] struct Atuin { #[command(subcommand)] atuin: AtuinCmd, } impl Atuin { fn run(self) -> Result<()> { self.atuin.run() } } fn main() -> Result<()> { Atuin::parse().run() }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/external.rs
crates/atuin/src/command/external.rs
use std::fmt::Write as _; use std::process::Command; use std::{io, process}; #[cfg(feature = "client")] use atuin_client::plugin::OfficialPluginRegistry; use clap::CommandFactory; use clap::builder::{StyledStr, Styles}; use eyre::Result; use crate::Atuin; pub fn run(args: &[String]) -> Result<()> { let subcommand = &args[0]; let bin = format!("atuin-{subcommand}"); let mut cmd = Command::new(&bin); cmd.args(&args[1..]); let spawn_result = match cmd.spawn() { Ok(child) => Ok(child), Err(e) => match e.kind() { io::ErrorKind::NotFound => { let output = render_not_found(subcommand, &bin); Err(output) } _ => Err(e.to_string().into()), }, }; match spawn_result { Ok(mut child) => { let status = child.wait()?; if status.success() { Ok(()) } else { process::exit(status.code().unwrap_or(1)); } } Err(e) => { eprintln!("{}", e.ansi()); process::exit(1); } } } fn render_not_found(subcommand: &str, bin: &str) -> StyledStr { let mut output = StyledStr::new(); let styles = Styles::styled(); let error = styles.get_error(); let invalid = styles.get_invalid(); let literal = styles.get_literal(); #[cfg(feature = "client")] { let registry = OfficialPluginRegistry::new(); // Check if this is an official plugin if let Some(install_message) = registry.get_install_message(subcommand) { let _ = write!(output, "{error}error:{error:#} "); let _ = write!( output, "'{invalid}{subcommand}{invalid:#}' is an official atuin plugin, but it's not installed" ); let _ = write!(output, "\n\n"); let _ = write!(output, "{install_message}"); return output; } } let mut atuin_cmd = Atuin::command(); let usage = atuin_cmd.render_usage(); let _ = write!(output, "{error}error:{error:#} "); let _ = write!( output, "unrecognized subcommand '{invalid}{subcommand}{invalid:#}' " ); let _ = write!( output, "and no executable named '{invalid}{bin}{invalid:#}' found in your PATH" ); let _ = write!(output, "\n\n"); let _ = write!(output, "{usage}"); let _ = write!(output, "\n\n"); let _ = write!( output, "For more information, try '{literal}--help{literal:#}'." ); output }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/gen_completions.rs
crates/atuin/src/command/gen_completions.rs
use clap::{CommandFactory, Parser, ValueEnum}; use clap_complete::{Generator, Shell, generate, generate_to}; use clap_complete_nushell::Nushell; use eyre::Result; // clap put nushell completions into a separate package due to the maintainers // being a little less committed to support them. // This means we have to do a tiny bit of legwork to combine these completions // into one command. #[derive(Debug, Clone, ValueEnum)] #[value(rename_all = "lower")] pub enum GenShell { Bash, Elvish, Fish, Nushell, PowerShell, Zsh, } impl Generator for GenShell { fn file_name(&self, name: &str) -> String { match self { // clap_complete Self::Bash => Shell::Bash.file_name(name), Self::Elvish => Shell::Elvish.file_name(name), Self::Fish => Shell::Fish.file_name(name), Self::PowerShell => Shell::PowerShell.file_name(name), Self::Zsh => Shell::Zsh.file_name(name), // clap_complete_nushell Self::Nushell => Nushell.file_name(name), } } fn generate(&self, cmd: &clap::Command, buf: &mut dyn std::io::prelude::Write) { match self { // clap_complete Self::Bash => Shell::Bash.generate(cmd, buf), Self::Elvish => Shell::Elvish.generate(cmd, buf), Self::Fish => Shell::Fish.generate(cmd, buf), Self::PowerShell => Shell::PowerShell.generate(cmd, buf), Self::Zsh => Shell::Zsh.generate(cmd, buf), // clap_complete_nushell Self::Nushell => Nushell.generate(cmd, buf), } } } #[derive(Debug, Parser)] pub struct Cmd { /// Set the shell for generating completions #[arg(long, short)] shell: GenShell, /// Set the output directory #[arg(long, short)] out_dir: Option<String>, } impl Cmd { pub fn run(self) -> Result<()> { let Cmd { shell, out_dir } = self; let mut cli = crate::Atuin::command(); match out_dir { Some(out_dir) => { generate_to(shell, &mut cli, env!("CARGO_PKG_NAME"), &out_dir)?; } None => { generate( shell, &mut cli, env!("CARGO_PKG_NAME"), &mut std::io::stdout(), ); } } Ok(()) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/contributors.rs
crates/atuin/src/command/contributors.rs
static CONTRIBUTORS: &str = include_str!("CONTRIBUTORS"); pub fn run() { println!("\n{CONTRIBUTORS}"); }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client.rs
crates/atuin/src/command/client.rs
use std::path::PathBuf; use clap::Subcommand; use eyre::{Result, WrapErr}; use atuin_client::{ database::Sqlite, record::sqlite_store::SqliteStore, settings::Settings, theme, }; use tracing_subscriber::{filter::EnvFilter, fmt, prelude::*}; #[cfg(feature = "sync")] mod sync; #[cfg(feature = "sync")] mod account; #[cfg(feature = "daemon")] mod daemon; mod default_config; mod doctor; mod dotfiles; mod history; mod import; mod info; mod init; mod kv; mod scripts; mod search; mod stats; mod store; mod wrapped; #[derive(Subcommand, Debug)] #[command(infer_subcommands = true)] pub enum Cmd { /// Manipulate shell history #[command(subcommand)] History(history::Cmd), /// Import shell history from file #[command(subcommand)] Import(import::Cmd), /// Calculate statistics for your history Stats(stats::Cmd), /// Interactive history search Search(search::Cmd), #[cfg(feature = "sync")] #[command(flatten)] Sync(sync::Cmd), /// Manage your sync account #[cfg(feature = "sync")] Account(account::Cmd), /// Get or set small key-value pairs #[command(subcommand)] Kv(kv::Cmd), /// Manage the atuin data store #[command(subcommand)] Store(store::Cmd), /// Manage your dotfiles with Atuin #[command(subcommand)] Dotfiles(dotfiles::Cmd), /// Manage your scripts with Atuin #[command(subcommand)] Scripts(scripts::Cmd), /// Print Atuin's shell init script #[command()] Init(init::Cmd), /// Information about dotfiles locations and ENV vars #[command()] Info, /// Run the doctor to check for common issues #[command()] Doctor, #[command()] Wrapped { year: Option<i32> }, /// *Experimental* Start the background daemon #[cfg(feature = "daemon")] #[command()] Daemon, /// Print the default atuin configuration (config.toml) #[command()] DefaultConfig, } impl Cmd { pub fn run(self) -> Result<()> { let runtime = tokio::runtime::Builder::new_current_thread() .enable_all() .build() .unwrap(); let settings = Settings::new().wrap_err("could not load client settings")?; let theme_manager = theme::ThemeManager::new(settings.theme.debug, None); let res = runtime.block_on(self.run_inner(settings, theme_manager)); runtime.shutdown_timeout(std::time::Duration::from_millis(50)); res } async fn run_inner( self, mut settings: Settings, mut theme_manager: theme::ThemeManager, ) -> Result<()> { let filter = EnvFilter::from_env("ATUIN_LOG").add_directive("sqlx_sqlite::regexp=off".parse()?); tracing_subscriber::registry() .with(fmt::layer()) .with(filter) .init(); tracing::trace!(command = ?self, "client command"); // Skip initializing any databases for history // This is a pretty hot path, as it runs before and after every single command the user // runs match self { Self::History(history) => return history.run(&settings).await, Self::Init(init) => return init.run(&settings).await, Self::Doctor => return doctor::run(&settings).await, _ => {} } let db_path = PathBuf::from(settings.db_path.as_str()); let record_store_path = PathBuf::from(settings.record_store_path.as_str()); let db = Sqlite::new(db_path, settings.local_timeout).await?; let sqlite_store = SqliteStore::new(record_store_path, settings.local_timeout).await?; let theme_name = settings.theme.name.clone(); let theme = theme_manager.load_theme(theme_name.as_str(), settings.theme.max_depth); match self { Self::Import(import) => import.run(&db).await, Self::Stats(stats) => stats.run(&db, &settings, theme).await, Self::Search(search) => search.run(db, &mut settings, sqlite_store, theme).await, #[cfg(feature = "sync")] Self::Sync(sync) => sync.run(settings, &db, sqlite_store).await, #[cfg(feature = "sync")] Self::Account(account) => account.run(settings, sqlite_store).await, Self::Kv(kv) => kv.run(&settings, &sqlite_store).await, Self::Store(store) => store.run(&settings, &db, sqlite_store).await, Self::Dotfiles(dotfiles) => dotfiles.run(&settings, sqlite_store).await, Self::Scripts(scripts) => scripts.run(&settings, sqlite_store, &db).await, Self::Info => { info::run(&settings); Ok(()) } Self::DefaultConfig => { default_config::run(); Ok(()) } Self::Wrapped { year } => wrapped::run(year, &db, &settings, theme).await, #[cfg(feature = "daemon")] Self::Daemon => daemon::run(settings, sqlite_store, db).await, Self::History(_) | Self::Init(_) | Self::Doctor => unreachable!(), } } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/mod.rs
crates/atuin/src/command/mod.rs
use clap::Subcommand; use eyre::Result; #[cfg(not(windows))] use rustix::{fs::Mode, process::umask}; #[cfg(feature = "client")] mod client; #[cfg(feature = "server")] mod server; mod contributors; mod gen_completions; mod external; #[derive(Subcommand)] #[command(infer_subcommands = true)] #[allow(clippy::large_enum_variant)] pub enum AtuinCmd { #[cfg(feature = "client")] #[command(flatten)] Client(client::Cmd), /// Start an atuin server #[cfg(feature = "server")] #[command(subcommand)] Server(server::Cmd), /// Generate a UUID Uuid, Contributors, /// Generate shell completions GenCompletions(gen_completions::Cmd), #[command(external_subcommand)] External(Vec<String>), } impl AtuinCmd { pub fn run(self) -> Result<()> { #[cfg(not(windows))] { // set umask before we potentially open/create files // or in other words, 077. Do not allow any access to any other user let mode = Mode::RWXG | Mode::RWXO; umask(mode); } match self { #[cfg(feature = "client")] Self::Client(client) => client.run(), #[cfg(feature = "server")] Self::Server(server) => server.run(), Self::Contributors => { contributors::run(); Ok(()) } Self::Uuid => { println!("{}", atuin_common::utils::uuid_v7().as_simple()); Ok(()) } Self::GenCompletions(gen_completions) => gen_completions.run(), Self::External(args) => external::run(&args), } } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/server.rs
crates/atuin/src/command/server.rs
use std::net::SocketAddr; use atuin_server_database::DbType; use atuin_server_postgres::Postgres; use atuin_server_sqlite::Sqlite; use tracing_subscriber::{EnvFilter, fmt, prelude::*}; use clap::Parser; use eyre::{Context, Result, eyre}; use atuin_server::{Settings, example_config, launch, launch_metrics_server}; #[derive(Parser, Debug)] #[clap(infer_subcommands = true)] pub enum Cmd { /// Start the server Start { /// The host address to bind #[clap(long)] host: Option<String>, /// The port to bind #[clap(long, short)] port: Option<u16>, }, /// Print server example configuration DefaultConfig, } impl Cmd { #[tokio::main] pub async fn run(self) -> Result<()> { tracing_subscriber::registry() .with(fmt::layer()) .with(EnvFilter::from_default_env()) .init(); tracing::trace!(command = ?self, "server command"); match self { Self::Start { host, port } => { let settings = Settings::new().wrap_err("could not load server settings")?; let host = host.as_ref().unwrap_or(&settings.host).clone(); let port = port.unwrap_or(settings.port); let addr = SocketAddr::new(host.parse()?, port); if settings.metrics.enable { tokio::spawn(launch_metrics_server( settings.metrics.host.clone(), settings.metrics.port, )); } match settings.db_settings.db_type() { DbType::Postgres => launch::<Postgres>(settings, addr).await, DbType::Sqlite => launch::<Sqlite>(settings, addr).await, DbType::Unknown => { Err(eyre!("db_uri must start with postgres:// or sqlite://")) } } } Self::DefaultConfig => { println!("{}", example_config()); Ok(()) } } } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/stats.rs
crates/atuin/src/command/client/stats.rs
use clap::Parser; use eyre::Result; use interim::parse_date_string; use time::{Duration, OffsetDateTime, Time}; use atuin_client::{ database::{Database, current_context}, settings::Settings, theme::Theme, }; use atuin_history::stats::{compute, pretty_print}; fn parse_ngram_size(s: &str) -> Result<usize, String> { let value = s .parse::<usize>() .map_err(|_| format!("'{s}' is not a valid window size"))?; if value == 0 { return Err("ngram window size must be at least 1".to_string()); } Ok(value) } #[derive(Parser, Debug)] #[command(infer_subcommands = true)] pub struct Cmd { /// Compute statistics for the specified period, leave blank for statistics since the beginning. See [this](https://docs.atuin.sh/reference/stats/) for more details. period: Vec<String>, /// How many top commands to list #[arg(long, short, default_value = "10")] count: usize, /// The number of consecutive commands to consider #[arg(long, short, default_value = "1", value_parser = parse_ngram_size)] ngram_size: usize, } impl Cmd { pub async fn run(&self, db: &impl Database, settings: &Settings, theme: &Theme) -> Result<()> { let context = current_context(); let words = if self.period.is_empty() { String::from("all") } else { self.period.join(" ") }; let now = OffsetDateTime::now_utc().to_offset(settings.timezone.0); let last_night = now.replace_time(Time::MIDNIGHT); let history = if words.as_str() == "all" { db.list(&[], &context, None, false, false).await? } else if words.trim() == "today" { let start = last_night; let end = start + Duration::days(1); db.range(start, end).await? } else if words.trim() == "month" { let end = last_night; let start = end - Duration::days(31); db.range(start, end).await? } else if words.trim() == "week" { let end = last_night; let start = end - Duration::days(7); db.range(start, end).await? } else if words.trim() == "year" { let end = last_night; let start = end - Duration::days(365); db.range(start, end).await? } else { let start = parse_date_string(&words, now, settings.dialect.into())?; let end = start + Duration::days(1); db.range(start, end).await? }; let stats = compute(settings, &history, self.count, self.ngram_size); if let Some(stats) = stats { pretty_print(stats, self.ngram_size, theme); } Ok(()) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/store.rs
crates/atuin/src/command/client/store.rs
use clap::Subcommand; use eyre::Result; use atuin_client::{ database::Database, record::{sqlite_store::SqliteStore, store::Store}, settings::Settings, }; use itertools::Itertools; use time::{OffsetDateTime, UtcOffset}; #[cfg(feature = "sync")] mod push; #[cfg(feature = "sync")] mod pull; mod purge; mod rebuild; mod rekey; mod verify; #[derive(Subcommand, Debug)] #[command(infer_subcommands = true)] pub enum Cmd { /// Print the current status of the record store Status, /// Rebuild a store (eg atuin store rebuild history) Rebuild(rebuild::Rebuild), /// Re-encrypt the store with a new key (potential for data loss!) Rekey(rekey::Rekey), /// Delete all records in the store that cannot be decrypted with the current key Purge(purge::Purge), /// Verify that all records in the store can be decrypted with the current key Verify(verify::Verify), /// Push all records to the remote sync server (one way sync) #[cfg(feature = "sync")] Push(push::Push), /// Pull records from the remote sync server (one way sync) #[cfg(feature = "sync")] Pull(pull::Pull), } impl Cmd { pub async fn run( &self, settings: &Settings, database: &dyn Database, store: SqliteStore, ) -> Result<()> { match self { Self::Status => self.status(store).await, Self::Rebuild(rebuild) => rebuild.run(settings, store, database).await, Self::Rekey(rekey) => rekey.run(settings, store).await, Self::Verify(verify) => verify.run(settings, store).await, Self::Purge(purge) => purge.run(settings, store).await, #[cfg(feature = "sync")] Self::Push(push) => push.run(settings, store).await, #[cfg(feature = "sync")] Self::Pull(pull) => pull.run(settings, store, database).await, } } pub async fn status(&self, store: SqliteStore) -> Result<()> { let host_id = Settings::host_id().expect("failed to get host_id"); let offset = UtcOffset::current_local_offset().unwrap_or(UtcOffset::UTC); let status = store.status().await?; // TODO: should probs build some data structure and then pretty-print it or smth for (host, st) in status.hosts.iter().sorted_by_key(|(h, _)| *h) { let host_string = if host == &host_id { format!("host: {} <- CURRENT HOST", host.0.as_hyphenated()) } else { format!("host: {}", host.0.as_hyphenated()) }; println!("{host_string}"); for (tag, idx) in st.iter().sorted_by_key(|(tag, _)| *tag) { println!("\tstore: {tag}"); let first = store.first(*host, tag).await?; let last = store.last(*host, tag).await?; println!("\t\tidx: {idx}"); if let Some(first) = first { println!("\t\tfirst: {}", first.id.0.as_hyphenated()); let time = OffsetDateTime::from_unix_timestamp_nanos(i128::from(first.timestamp))? .to_offset(offset); println!("\t\t\tcreated: {time}"); } if let Some(last) = last { println!("\t\tlast: {}", last.id.0.as_hyphenated()); let time = OffsetDateTime::from_unix_timestamp_nanos(i128::from(last.timestamp))? .to_offset(offset); println!("\t\t\tcreated: {time}"); } } println!(); } Ok(()) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/wrapped.rs
crates/atuin/src/command/client/wrapped.rs
use crossterm::style::{ResetColor, SetAttribute}; use eyre::Result; use std::collections::{HashMap, HashSet}; use time::{Date, Duration, Month, OffsetDateTime, Time}; use atuin_client::{database::Database, settings::Settings, theme::Theme}; use atuin_history::stats::{Stats, compute}; #[derive(Debug)] struct WrappedStats { nav_commands: usize, pkg_commands: usize, error_rate: f64, first_half_commands: Vec<(String, usize)>, second_half_commands: Vec<(String, usize)>, git_percentage: f64, busiest_hour: Option<(String, usize)>, } impl WrappedStats { #[allow(clippy::too_many_lines, clippy::cast_precision_loss)] fn new(settings: &Settings, stats: &Stats, history: &[atuin_client::history::History]) -> Self { let nav_commands = stats .top .iter() .filter(|(cmd, _)| { let cmd = &cmd[0]; cmd == "cd" || cmd == "ls" || cmd == "pwd" || cmd == "pushd" || cmd == "popd" }) .map(|(_, count)| count) .sum(); let pkg_managers = [ "cargo", "npm", "pnpm", "yarn", "pip", "pip3", "pipenv", "poetry", "pipx", "uv", "brew", "apt", "apt-get", "apk", "pacman", "yay", "paru", "yum", "dnf", "dnf5", "rpm", "rpm-ostree", "zypper", "pkg", "chocolatey", "choco", "scoop", "winget", "gem", "bundle", "shards", "composer", "gradle", "maven", "mvn", "go get", "nuget", "dotnet", "mix", "hex", "rebar3", "nix", "nix-env", "cabal", "opam", ]; let pkg_commands = history .iter() .filter(|h| { let cmd = h.command.clone(); pkg_managers.iter().any(|pm| cmd.starts_with(pm)) }) .count(); // Error analysis let mut command_errors: HashMap<String, (usize, usize)> = HashMap::new(); // (total_uses, errors) let midyear = history[0].timestamp + Duration::days(182); // Split year in half let mut first_half_commands: HashMap<String, usize> = HashMap::new(); let mut second_half_commands: HashMap<String, usize> = HashMap::new(); let mut hours: HashMap<String, usize> = HashMap::new(); for entry in history { let cmd = entry .command .split_whitespace() .next() .unwrap_or("") .to_string(); let (total, errors) = command_errors.entry(cmd.clone()).or_insert((0, 0)); *total += 1; if entry.exit != 0 { *errors += 1; } // Track command evolution if entry.timestamp < midyear { *first_half_commands.entry(cmd.clone()).or_default() += 1; } else { *second_half_commands.entry(cmd).or_default() += 1; } // Track hourly distribution let local_time = entry .timestamp .to_offset(time::UtcOffset::current_local_offset().unwrap_or(settings.timezone.0)); let hour = format!("{:02}:00", local_time.time().hour()); *hours.entry(hour).or_default() += 1; } let total_errors: usize = command_errors.values().map(|(_, errors)| errors).sum(); let total_commands: usize = command_errors.values().map(|(total, _)| total).sum(); let error_rate = total_errors as f64 / total_commands as f64; // Process command evolution data let mut first_half: Vec<_> = first_half_commands.into_iter().collect(); let mut second_half: Vec<_> = second_half_commands.into_iter().collect(); first_half.sort_by_key(|(_, count)| std::cmp::Reverse(*count)); second_half.sort_by_key(|(_, count)| std::cmp::Reverse(*count)); first_half.truncate(5); second_half.truncate(5); // Calculate git percentage let git_commands: usize = stats .top .iter() .filter(|(cmd, _)| cmd[0].starts_with("git")) .map(|(_, count)| count) .sum(); let git_percentage = git_commands as f64 / stats.total_commands as f64; // Find busiest hour let busiest_hour = hours.into_iter().max_by_key(|(_, count)| *count); Self { nav_commands, pkg_commands, error_rate, first_half_commands: first_half, second_half_commands: second_half, git_percentage, busiest_hour, } } } pub fn print_wrapped_header(year: i32) { let reset = ResetColor; let bold = SetAttribute(crossterm::style::Attribute::Bold); println!("{bold}╭────────────────────────────────────╮{reset}"); println!("{bold}│ ATUIN WRAPPED {year} │{reset}"); println!("{bold}│ Your Year in Shell History │{reset}"); println!("{bold}╰────────────────────────────────────╯{reset}"); println!(); } #[allow(clippy::cast_precision_loss)] fn print_fun_facts(wrapped_stats: &WrappedStats, stats: &Stats, year: i32) { let reset = ResetColor; let bold = SetAttribute(crossterm::style::Attribute::Bold); if wrapped_stats.git_percentage > 0.05 { println!( "{bold}🌟 You're a Git Power User!{reset} {bold}{:.1}%{reset} of your commands were Git operations\n", wrapped_stats.git_percentage * 100.0 ); } // Navigation patterns let nav_percentage = wrapped_stats.nav_commands as f64 / stats.total_commands as f64 * 100.0; if nav_percentage > 0.05 { println!( "{bold}🚀 You're a Navigator!{reset} {bold}{nav_percentage:.1}%{reset} of your time was spent navigating directories\n", ); } // Command vocabulary println!( "{bold}📚 Command Vocabulary{reset}: You know {bold}{}{reset} unique commands\n", stats.unique_commands ); // Package management println!( "{bold}📦 Package Management{reset}: You ran {bold}{}{reset} package-related commands\n", wrapped_stats.pkg_commands ); // Error patterns let error_percentage = wrapped_stats.error_rate * 100.0; println!( "{bold}🚨 Error Analysis{reset}: Your commands failed {bold}{error_percentage:.1}%{reset} of the time\n", ); // Command evolution println!("🔍 Command Evolution:"); // print stats for each half and compare println!(" {bold}Top Commands{reset} in the first half of {year}:"); for (cmd, count) in wrapped_stats.first_half_commands.iter().take(3) { println!(" {bold}{cmd}{reset} ({count} times)"); } println!(" {bold}Top Commands{reset} in the second half of {year}:"); for (cmd, count) in wrapped_stats.second_half_commands.iter().take(3) { println!(" {bold}{cmd}{reset} ({count} times)"); } // Find new favorite commands (in top 5 of second half but not in first half) let first_half_set: HashSet<_> = wrapped_stats .first_half_commands .iter() .map(|(cmd, _)| cmd) .collect(); let new_favorites: Vec<_> = wrapped_stats .second_half_commands .iter() .filter(|(cmd, _)| !first_half_set.contains(cmd)) .take(2) .collect(); if !new_favorites.is_empty() { println!(" {bold}New favorites{reset} in the second half:"); for (cmd, count) in new_favorites { println!(" {bold}{cmd}{reset} ({count} times)"); } } // Time patterns if let Some((hour, count)) = &wrapped_stats.busiest_hour { println!("\n🕘 Most Productive Hour: {bold}{hour}{reset} ({count} commands)",); // Night owl or early bird let hour_num = hour .split(':') .next() .unwrap_or("0") .parse::<u32>() .unwrap_or(0); if hour_num >= 22 || hour_num <= 4 { println!(" You're quite the night owl! 🦉"); } else if (5..=7).contains(&hour_num) { println!(" Early bird gets the worm! 🐦"); } } println!(); } pub async fn run( year: Option<i32>, db: &impl Database, settings: &Settings, theme: &Theme, ) -> Result<()> { let now = OffsetDateTime::now_utc().to_offset(settings.timezone.0); let month = now.month(); // If we're in December, then wrapped is for the current year. If not, it's for the previous year let year = year.unwrap_or_else(|| { if month == Month::December { now.year() } else { now.year() - 1 } }); let start = OffsetDateTime::new_in_offset( Date::from_calendar_date(year, Month::January, 1).unwrap(), Time::MIDNIGHT, now.offset(), ); let end = OffsetDateTime::new_in_offset( Date::from_calendar_date(year, Month::December, 31).unwrap(), Time::MIDNIGHT + Duration::days(1) - Duration::nanoseconds(1), now.offset(), ); let history = db.range(start, end).await?; if history.is_empty() { println!( "Your history for {year} is empty!\nMaybe 'atuin import' could help you import your previous history 🪄" ); return Ok(()); } // Compute overall stats using existing functionality let stats = compute(settings, &history, 10, 1).expect("Failed to compute stats"); let wrapped_stats = WrappedStats::new(settings, &stats, &history); // Print wrapped format print_wrapped_header(year); println!("🎉 In {year}, you typed {} commands!", stats.total_commands); println!( " That's ~{} commands every day\n", stats.total_commands / 365 ); println!("Your Top Commands:"); atuin_history::stats::pretty_print(stats.clone(), 1, theme); println!(); print_fun_facts(&wrapped_stats, &stats, year); Ok(()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/sync.rs
crates/atuin/src/command/client/sync.rs
use clap::Subcommand; use eyre::{Result, WrapErr}; use atuin_client::{ database::Database, encryption, history::store::HistoryStore, record::{sqlite_store::SqliteStore, store::Store, sync}, settings::Settings, }; mod status; use crate::command::client::account; #[derive(Subcommand, Debug)] #[command(infer_subcommands = true)] pub enum Cmd { /// Sync with the configured server Sync { /// Force re-download everything #[arg(long, short)] force: bool, }, /// Login to the configured server Login(account::login::Cmd), /// Log out Logout, /// Register with the configured server Register(account::register::Cmd), /// Print the encryption key for transfer to another machine Key { /// Switch to base64 output of the key #[arg(long)] base64: bool, }, /// Display the sync status Status, } impl Cmd { pub async fn run( self, settings: Settings, db: &impl Database, store: SqliteStore, ) -> Result<()> { match self { Self::Sync { force } => run(&settings, force, db, store).await, Self::Login(l) => l.run(&settings, &store).await, Self::Logout => account::logout::run(&settings), Self::Register(r) => r.run(&settings).await, Self::Status => status::run(&settings, db).await, Self::Key { base64 } => { use atuin_client::encryption::{encode_key, load_key}; let key = load_key(&settings).wrap_err("could not load encryption key")?; if base64 { let encode = encode_key(&key).wrap_err("could not encode encryption key")?; println!("{encode}"); } else { let mnemonic = bip39::Mnemonic::from_entropy(&key, bip39::Language::English) .map_err(|_| eyre::eyre!("invalid key"))?; println!("{mnemonic}"); } Ok(()) } } } } async fn run( settings: &Settings, force: bool, db: &impl Database, store: SqliteStore, ) -> Result<()> { if settings.sync.records { let encryption_key: [u8; 32] = encryption::load_key(settings) .context("could not load encryption key")? .into(); let host_id = Settings::host_id().expect("failed to get host_id"); let history_store = HistoryStore::new(store.clone(), host_id, encryption_key); let (uploaded, downloaded) = sync::sync(settings, &store).await?; crate::sync::build(settings, &store, db, Some(&downloaded)).await?; println!("{uploaded}/{} up/down to record store", downloaded.len()); let history_length = db.history_count(true).await?; let store_history_length = store.len_tag("history").await?; #[allow(clippy::cast_sign_loss)] if history_length as u64 > store_history_length { println!( "{history_length} in history index, but {store_history_length} in history store" ); println!("Running automatic history store init..."); // Internally we use the global filter mode, so this context is ignored. // don't recurse or loop here. history_store.init_store(db).await?; println!("Re-running sync due to new records locally"); // we'll want to run sync once more, as there will now be stuff to upload let (uploaded, downloaded) = sync::sync(settings, &store).await?; crate::sync::build(settings, &store, db, Some(&downloaded)).await?; println!("{uploaded}/{} up/down to record store", downloaded.len()); } } else { atuin_client::sync::sync(settings, force, db).await?; } println!( "Sync complete! {} items in history database, force: {}", db.history_count(true).await?, force ); Ok(()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/default_config.rs
crates/atuin/src/command/client/default_config.rs
use atuin_client::settings::Settings; pub fn run() { println!("{}", Settings::example_config()); }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/info.rs
crates/atuin/src/command/client/info.rs
use atuin_client::settings::Settings; use crate::{SHA, VERSION}; pub fn run(settings: &Settings) { let config = atuin_common::utils::config_dir(); let mut config_file = config.clone(); config_file.push("config.toml"); let mut sever_config = config; sever_config.push("server.toml"); let config_paths = format!( "Config files:\nclient config: {:?}\nserver config: {:?}\nclient db path: {:?}\nkey path: {:?}\nsession path: {:?}", config_file.to_string_lossy(), sever_config.to_string_lossy(), settings.db_path, settings.key_path, settings.session_path ); let env_vars = format!( "Env Vars:\nATUIN_CONFIG_DIR = {:?}", std::env::var("ATUIN_CONFIG_DIR").unwrap_or_else(|_| "None".into()) ); let general_info = format!("Version info:\nversion: {VERSION}\ncommit: {SHA}"); let print_out = format!("{config_paths}\n\n{env_vars}\n\n{general_info}"); println!("{print_out}"); }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/scripts.rs
crates/atuin/src/command/client/scripts.rs
use std::collections::HashMap; use std::collections::HashSet; use std::io::IsTerminal; use std::io::Read; use std::path::PathBuf; use atuin_scripts::execution::template_script; use atuin_scripts::{ execution::{build_executable_script, execute_script_interactive, template_variables}, store::{ScriptStore, script::Script}, }; use clap::{Parser, Subcommand}; use eyre::OptionExt; use eyre::{Result, bail}; use tempfile::NamedTempFile; use atuin_client::{database::Database, record::sqlite_store::SqliteStore, settings::Settings}; use tracing::debug; #[derive(Parser, Debug)] pub struct NewScript { pub name: String, #[arg(short, long)] pub description: Option<String>, #[arg(short, long)] pub tags: Vec<String>, #[arg(short, long)] pub shebang: Option<String>, #[arg(long)] pub script: Option<PathBuf>, #[allow(clippy::option_option)] #[arg(long)] /// Use the last command as the script content /// Optionally specify a number to use the last N commands pub last: Option<Option<usize>>, #[arg(long)] /// Skip opening editor when using --last pub no_edit: bool, } #[derive(Parser, Debug)] pub struct Run { pub name: String, /// Specify template variables in the format KEY=VALUE /// Example: -v name=John -v greeting="Hello there" #[arg(short, long = "var")] pub var: Vec<String>, } #[derive(Parser, Debug)] pub struct List {} #[derive(Parser, Debug)] pub struct Get { pub name: String, #[arg(short, long)] /// Display only the executable script with shebang pub script: bool, } #[derive(Parser, Debug)] pub struct Edit { pub name: String, #[arg(short, long)] pub description: Option<String>, /// Replace all existing tags with these new tags #[arg(short, long)] pub tags: Vec<String>, /// Remove all tags from the script #[arg(long)] pub no_tags: bool, /// Rename the script #[arg(long)] pub rename: Option<String>, #[arg(short, long)] pub shebang: Option<String>, #[arg(long)] pub script: Option<PathBuf>, #[allow(clippy::struct_field_names)] /// Skip opening editor #[arg(long)] pub no_edit: bool, } #[derive(Parser, Debug)] pub struct Delete { pub name: String, #[arg(short, long)] pub force: bool, } #[derive(Subcommand, Debug)] #[command(infer_subcommands = true)] pub enum Cmd { New(NewScript), Run(Run), #[command(alias = "ls")] List(List), Get(Get), Edit(Edit), #[command(alias = "rm")] Delete(Delete), } impl Cmd { // Helper function to open an editor with optional initial content fn open_editor(initial_content: Option<&str>) -> Result<String> { // Create a temporary file let temp_file = NamedTempFile::new()?; let path = temp_file.into_temp_path(); // Write initial content to the temp file if provided if let Some(content) = initial_content { std::fs::write(&path, content)?; } // Open the file in the user's preferred editor let editor_str = std::env::var("EDITOR").unwrap_or_else(|_| "vi".to_string()); // Use shlex to safely split the string into shell-like parts. let parts = shlex::split(&editor_str).ok_or_eyre("Failed to parse editor command")?; let (command, args) = parts.split_first().ok_or_eyre("No editor command found")?; let status = std::process::Command::new(command) .args(args) .arg(&path) .status()?; if !status.success() { bail!("failed to open editor"); } // Read back the edited content let content = std::fs::read_to_string(&path)?; path.close()?; Ok(content) } // Helper function to execute a script and manage stdin/stdout/stderr async fn execute_script(script_content: String, shebang: String) -> Result<i32> { let mut session = execute_script_interactive(script_content, shebang) .await .expect("failed to execute script"); // Create a channel to signal when the process exits let (exit_tx, mut exit_rx) = tokio::sync::oneshot::channel(); // Set up a task to read from stdin and forward to the script let sender = session.stdin_tx.clone(); let stdin_task = tokio::spawn(async move { use tokio::io::AsyncReadExt; use tokio::select; let stdin = tokio::io::stdin(); let mut reader = tokio::io::BufReader::new(stdin); let mut buffer = vec![0u8; 1024]; // Read in chunks for efficiency loop { // Use select to either read from stdin or detect when the process exits select! { // Check if the script process has exited _ = &mut exit_rx => { break; } // Try to read from stdin read_result = reader.read(&mut buffer) => { match read_result { Ok(0) => break, // EOF Ok(n) => { // Convert the bytes to a string and forward to script let input = String::from_utf8_lossy(&buffer[0..n]).to_string(); if let Err(e) = sender.send(input).await { eprintln!("Error sending input to script: {e}"); break; } }, Err(e) => { eprintln!("Error reading from stdin: {e}"); break; } } } } } }); // Wait for the script to complete let exit_code = session.wait_for_exit().await; // Signal the stdin task to stop let _ = exit_tx.send(()); let _ = stdin_task.await; let code = exit_code.unwrap_or(-1); if code != 0 { eprintln!("Script exited with code {code}"); } Ok(code) } async fn handle_new_script( settings: &Settings, new_script: NewScript, script_store: ScriptStore, script_db: atuin_scripts::database::Database, history_db: &impl Database, ) -> Result<()> { let mut stdin = std::io::stdin(); let script_content = if let Some(count_opt) = new_script.last { // Get the last N commands from history, plus 1 to exclude the command that runs this script let count = count_opt.unwrap_or(1) + 1; // Add 1 to the count to exclude the current command let context = atuin_client::database::current_context(); // Get the last N+1 commands, filtering by the default mode let filters = [settings.default_filter_mode(context.git_root.is_some())]; let mut history = history_db .list(&filters, &context, Some(count), false, false) .await?; // Reverse to get chronological order history.reverse(); // Skip the most recent command (which would be the atuin scripts new command itself) if !history.is_empty() { history.pop(); // Remove the most recent command } // Format the commands into a script let commands: Vec<String> = history.iter().map(|h| h.command.clone()).collect(); if commands.is_empty() { bail!("No commands found in history"); } let script_text = commands.join("\n"); // Only open editor if --no-edit is not specified if new_script.no_edit { Some(script_text) } else { // Open the editor with the commands pre-loaded Some(Self::open_editor(Some(&script_text))?) } } else if let Some(script_path) = new_script.script { let script_content = std::fs::read_to_string(script_path)?; Some(script_content) } else if !stdin.is_terminal() { let mut buffer = String::new(); stdin.read_to_string(&mut buffer)?; Some(buffer) } else { // Open editor with empty file Some(Self::open_editor(None)?) }; let script = Script::builder() .name(new_script.name) .description(new_script.description.unwrap_or_default()) .shebang(new_script.shebang.unwrap_or_default()) .tags(new_script.tags) .script(script_content.unwrap_or_default()) .build(); script_store.create(script).await?; script_store.build(script_db).await?; Ok(()) } async fn handle_run( _settings: &Settings, run: Run, script_db: atuin_scripts::database::Database, ) -> Result<()> { let script = script_db.get_by_name(&run.name).await?; if let Some(script) = script { // Get variables used in the template let variables = template_variables(&script)?; // Create a hashmap to store variable values let mut variable_values: HashMap<String, serde_json::Value> = HashMap::new(); // Parse variables from command-line arguments first for var_str in &run.var { if let Some((key, value)) = var_str.split_once('=') { // Add to variable values variable_values.insert( key.to_string(), serde_json::Value::String(value.to_string()), ); debug!("Using CLI variable: {}={}", key, value); } else { eprintln!("Warning: Ignoring malformed variable specification: {var_str}"); eprintln!("Variables should be specified as KEY=VALUE"); } } // Collect variables that are still needed (not specified via CLI) let remaining_vars: HashSet<String> = variables .into_iter() .filter(|var| !variable_values.contains_key(var)) .collect(); // If there are variables in the template that weren't specified on the command line, prompt for them if !remaining_vars.is_empty() { println!("This script contains template variables that need values:"); let stdin = std::io::stdin(); let mut input = String::new(); for var in remaining_vars { input.clear(); println!("Enter value for '{var}': "); if stdin.read_line(&mut input).is_err() { eprintln!("Failed to read input for variable '{var}'"); // Provide an empty string as fallback variable_values.insert(var, serde_json::Value::String(String::new())); continue; } let value = input.trim().to_string(); variable_values.insert(var, serde_json::Value::String(value)); } } let final_script = if variable_values.is_empty() { // No variables to template, just use the original script script.script.clone() } else { // If we have variables, we need to template the script debug!("Templating script with variables: {:?}", variable_values); template_script(&script, &variable_values)? }; // Execute the script (either templated or original) Self::execute_script(final_script, script.shebang.clone()).await?; } else { bail!("script not found"); } Ok(()) } async fn handle_list( _settings: &Settings, _list: List, script_db: atuin_scripts::database::Database, ) -> Result<()> { let scripts = script_db.list().await?; if scripts.is_empty() { println!("No scripts found"); } else { println!("Available scripts:"); for script in scripts { if script.tags.is_empty() { println!("- {} ", script.name); } else { println!("- {} [tags: {}]", script.name, script.tags.join(", ")); } // Print description if it's not empty if !script.description.is_empty() { println!(" Description: {}", script.description); } } } Ok(()) } async fn handle_get( _settings: &Settings, get: Get, script_db: atuin_scripts::database::Database, ) -> Result<()> { let script = script_db.get_by_name(&get.name).await?; if let Some(script) = script { if get.script { // Just print the executable script with shebang print!( "{}", build_executable_script(script.script.clone(), script.shebang) ); return Ok(()); } // Create a YAML representation of the script println!("---"); println!("name: {}", script.name); println!("id: {}", script.id); if script.description.is_empty() { println!("description: \"\""); } else { println!("description: |"); // Indent multiline descriptions properly for YAML for line in script.description.lines() { println!(" {line}"); } } if script.tags.is_empty() { println!("tags: []"); } else { println!("tags:"); for tag in &script.tags { println!(" - {tag}"); } } println!("shebang: {}", script.shebang); println!("script: |"); // Indent the script content for proper YAML multiline format for line in script.script.lines() { println!(" {line}"); } Ok(()) } else { bail!("script '{}' not found", get.name); } } #[allow(clippy::cognitive_complexity)] async fn handle_edit( _settings: &Settings, edit: Edit, script_store: ScriptStore, script_db: atuin_scripts::database::Database, ) -> Result<()> { debug!("editing script {:?}", edit); // Find the existing script let existing_script = script_db.get_by_name(&edit.name).await?; debug!("existing script {:?}", existing_script); if let Some(mut script) = existing_script { // Update the script with new values if provided if let Some(description) = edit.description { script.description = description; } // Handle renaming if requested if let Some(new_name) = edit.rename { // Check if a script with the new name already exists if (script_db.get_by_name(&new_name).await?).is_some() { bail!("A script named '{}' already exists", new_name); } // Update the name script.name = new_name; } // Handle tag updates with priority: // 1. If --no-tags is provided, clear all tags // 2. If --tags is provided, replace all tags // 3. If neither is provided, tags remain unchanged if edit.no_tags { // Clear all tags script.tags.clear(); } else if !edit.tags.is_empty() { // Replace all tags script.tags = edit.tags; } // If none of the above conditions are met, tags remain unchanged if let Some(shebang) = edit.shebang { script.shebang = shebang; } // Handle script content update let script_content = if let Some(script_path) = edit.script { // Load script from provided file std::fs::read_to_string(script_path)? } else if !edit.no_edit { // Open the script in editor for interactive editing if --no-edit is not specified Self::open_editor(Some(&script.script))? } else { // If --no-edit is specified, keep the existing script content script.script.clone() }; // Update the script content script.script = script_content; // Update the script in the store script_store.update(script).await?; // Rebuild the database to apply changes script_store.build(script_db).await?; println!("Script '{}' updated successfully!", edit.name); Ok(()) } else { bail!("script '{}' not found", edit.name); } } async fn handle_delete( _settings: &Settings, delete: Delete, script_store: ScriptStore, script_db: atuin_scripts::database::Database, ) -> Result<()> { // Find the script by name let script = script_db.get_by_name(&delete.name).await?; if let Some(script) = script { // If not force, confirm deletion if !delete.force { println!( "Are you sure you want to delete script '{}'? [y/N]", delete.name ); let mut input = String::new(); std::io::stdin().read_line(&mut input)?; let input = input.trim().to_lowercase(); if input != "y" && input != "yes" { println!("Deletion cancelled"); return Ok(()); } } // Delete the script script_store.delete(script.id).await?; // Rebuild the database to apply changes script_store.build(script_db).await?; println!("Script '{}' deleted successfully", delete.name); Ok(()) } else { bail!("script '{}' not found", delete.name); } } pub async fn run( self, settings: &Settings, store: SqliteStore, history_db: &impl Database, ) -> Result<()> { let host_id = Settings::host_id().expect("failed to get host_id"); let encryption_key: [u8; 32] = atuin_client::encryption::load_key(settings)?.into(); let script_store = ScriptStore::new(store, host_id, encryption_key); let script_db = atuin_scripts::database::Database::new(settings.scripts.db_path.clone(), 1.0).await?; match self { Self::New(new_script) => { Self::handle_new_script(settings, new_script, script_store, script_db, history_db) .await } Self::Run(run) => Self::handle_run(settings, run, script_db).await, Self::List(list) => Self::handle_list(settings, list, script_db).await, Self::Get(get) => Self::handle_get(settings, get, script_db).await, Self::Edit(edit) => Self::handle_edit(settings, edit, script_store, script_db).await, Self::Delete(delete) => { Self::handle_delete(settings, delete, script_store, script_db).await } } } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/search.rs
crates/atuin/src/command/client/search.rs
use std::fs::File; use std::io::{IsTerminal as _, Write, stderr}; use atuin_common::utils::{self, Escapable as _}; use clap::Parser; use eyre::Result; use atuin_client::{ database::Database, database::{OptFilters, current_context}, encryption, history::{History, store::HistoryStore}, record::sqlite_store::SqliteStore, settings::{FilterMode, KeymapMode, SearchMode, Settings, Timezone}, theme::Theme, }; use super::history::ListMode; mod cursor; mod duration; mod engines; mod history_list; mod inspector; mod interactive; pub use duration::format_duration_into; #[allow(clippy::struct_excessive_bools, clippy::struct_field_names)] #[derive(Parser, Debug)] pub struct Cmd { /// Filter search result by directory #[arg(long, short)] cwd: Option<String>, /// Exclude directory from results #[arg(long = "exclude-cwd")] exclude_cwd: Option<String>, /// Filter search result by exit code #[arg(long, short)] exit: Option<i64>, /// Exclude results with this exit code #[arg(long = "exclude-exit")] exclude_exit: Option<i64>, /// Only include results added before this date #[arg(long, short)] before: Option<String>, /// Only include results after this date #[arg(long)] after: Option<String>, /// How many entries to return at most #[arg(long)] limit: Option<i64>, /// Offset from the start of the results #[arg(long)] offset: Option<i64>, /// Open interactive search UI #[arg(long, short)] interactive: bool, /// Allow overriding filter mode over config #[arg(long = "filter-mode")] filter_mode: Option<FilterMode>, /// Allow overriding search mode over config #[arg(long = "search-mode")] search_mode: Option<SearchMode>, /// Marker argument used to inform atuin that it was invoked from a shell up-key binding (hidden from help to avoid confusion) #[arg(long = "shell-up-key-binding", hide = true)] shell_up_key_binding: bool, /// Notify the keymap at the shell's side #[arg(long = "keymap-mode", default_value = "auto")] keymap_mode: KeymapMode, /// Use human-readable formatting for time #[arg(long)] human: bool, query: Option<Vec<String>>, /// Show only the text of the command #[arg(long)] cmd_only: bool, /// Terminate the output with a null, for better multiline handling #[arg(long)] print0: bool, /// Delete anything matching this query. Will not print out the match #[arg(long)] delete: bool, /// Delete EVERYTHING! #[arg(long)] delete_it_all: bool, /// Reverse the order of results, oldest first #[arg(long, short)] reverse: bool, /// Display the command time in another timezone other than the configured default. /// /// This option takes one of the following kinds of values: /// - the special value "local" (or "l") which refers to the system time zone /// - an offset from UTC (e.g. "+9", "-2:30") #[arg(long, visible_alias = "tz")] #[arg(allow_hyphen_values = true)] // Clippy warns about `Option<Option<T>>`, but we suppress it because we need // this distinction for proper argument handling. #[allow(clippy::option_option)] timezone: Option<Option<Timezone>>, /// Available variables: {command}, {directory}, {duration}, {user}, {host}, {time}, {exit} and /// {relativetime}. /// Example: --format "{time} - [{duration}] - {directory}$\t{command}" #[arg(long, short)] format: Option<String>, /// Set the maximum number of lines Atuin's interface should take up. #[arg(long = "inline-height")] inline_height: Option<u16>, /// Include duplicate commands in the output (non-interactive only) #[arg(long)] include_duplicates: bool, /// File name to write the result to (hidden from help as this is meant to be used from a script) #[arg(long = "result-file", hide = true)] result_file: Option<String>, } impl Cmd { // clippy: please write this instead // clippy: now it has too many lines // me: I'll do it later OKAY #[allow(clippy::too_many_lines)] pub async fn run( self, db: impl Database, settings: &mut Settings, store: SqliteStore, theme: &Theme, ) -> Result<()> { let query = self.query.map_or_else( || { std::env::var("ATUIN_QUERY").map_or_else( |_| vec![], |query| { query .split(' ') .map(std::string::ToString::to_string) .collect() }, ) }, |query| query, ); if (self.delete_it_all || self.delete) && self.limit.is_some() { // Because of how deletion is implemented, it will always delete all matches // and disregard the limit option. It is also not clear what deletion with a // limit would even mean. Deleting the LIMIT most recent entries that match // the search query would make sense, but that wouldn't match what's displayed // when running the equivalent search, but deleting those entries that are // displayed with the search would leave any duplicates of those lines which may // or may not have been intended to be deleted. eprintln!("\"--limit\" is not compatible with deletion."); return Ok(()); } if self.delete && query.is_empty() { eprintln!( "Please specify a query to match the items you wish to delete. If you wish to delete all history, pass --delete-it-all" ); return Ok(()); } if self.delete_it_all && !query.is_empty() { eprintln!( "--delete-it-all will delete ALL of your history! It does not require a query." ); return Ok(()); } if let Some(search_mode) = self.search_mode { settings.search_mode = search_mode; } if let Some(filter_mode) = self.filter_mode { settings.filter_mode = Some(filter_mode); } if let Some(inline_height) = self.inline_height { settings.inline_height = inline_height; } settings.shell_up_key_binding = self.shell_up_key_binding; // `keymap_mode` specified in config.toml overrides the `--keymap-mode` // option specified in the keybindings. settings.keymap_mode = match settings.keymap_mode { KeymapMode::Auto => self.keymap_mode, value => value, }; settings.keymap_mode_shell = self.keymap_mode; let encryption_key: [u8; 32] = encryption::load_key(settings)?.into(); let host_id = Settings::host_id().expect("failed to get host_id"); let history_store = HistoryStore::new(store.clone(), host_id, encryption_key); if self.interactive { let item = interactive::history(&query, settings, db, &history_store, theme).await?; if let Some(result_file) = self.result_file { let mut file = File::create(result_file)?; write!(file, "{item}")?; } else if stderr().is_terminal() { eprintln!("{}", item.escape_control()); } else { eprintln!("{item}"); } } else { let opt_filter = OptFilters { exit: self.exit, exclude_exit: self.exclude_exit, cwd: self.cwd, exclude_cwd: self.exclude_cwd, before: self.before, after: self.after, limit: self.limit, offset: self.offset, reverse: self.reverse, include_duplicates: self.include_duplicates, }; let mut entries = run_non_interactive(settings, opt_filter.clone(), &query, &db).await?; if entries.is_empty() { std::process::exit(1) } // if we aren't deleting, print it all if self.delete || self.delete_it_all { // delete it // it only took me _years_ to add this // sorry while !entries.is_empty() { for entry in &entries { eprintln!("deleting {}", entry.id); if settings.sync.records { let (id, _) = history_store.delete(entry.id.clone()).await?; history_store.incremental_build(&db, &[id]).await?; } else { db.delete(entry.clone()).await?; } } entries = run_non_interactive(settings, opt_filter.clone(), &query, &db).await?; } } else { let format = match self.format { None => Some(settings.history_format.as_str()), _ => self.format.as_deref(), }; let tz = match self.timezone { Some(Some(tz)) => tz, // User provided a value Some(None) | None => settings.timezone, // No value was provided }; super::history::print_list( &entries, ListMode::from_flags(self.human, self.cmd_only), format, self.print0, true, tz, ); } } Ok(()) } } // This is supposed to more-or-less mirror the command line version, so ofc // it is going to have a lot of args #[allow(clippy::too_many_arguments, clippy::cast_possible_truncation)] async fn run_non_interactive( settings: &Settings, filter_options: OptFilters, query: &[String], db: &impl Database, ) -> Result<Vec<History>> { let dir = if filter_options.cwd.as_deref() == Some(".") { Some(utils::get_current_dir()) } else { filter_options.cwd }; let context = current_context(); let opt_filter = OptFilters { cwd: dir.clone(), ..filter_options }; let filter_mode = settings.default_filter_mode(context.git_root.is_some()); let results = db .search( settings.search_mode, filter_mode, &context, query.join(" ").as_str(), opt_filter, ) .await?; Ok(results) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/history.rs
crates/atuin/src/command/client/history.rs
use std::{ fmt::{self, Display}, io::{self, IsTerminal, Write}, path::PathBuf, time::Duration, }; use atuin_common::utils::{self, Escapable as _}; use clap::Subcommand; use eyre::{Context, Result}; use runtime_format::{FormatKey, FormatKeyError, ParseSegment, ParsedFmt}; use atuin_client::{ database::{Database, Sqlite, current_context}, encryption, history::{History, store::HistoryStore}, record::sqlite_store::SqliteStore, settings::{ FilterMode::{Directory, Global, Session}, Settings, Timezone, }, }; #[cfg(feature = "sync")] use atuin_client::{record, sync}; use log::{debug, warn}; use time::{OffsetDateTime, macros::format_description}; use super::search::format_duration_into; #[derive(Subcommand, Debug)] #[command(infer_subcommands = true)] pub enum Cmd { /// Begins a new command in the history Start { /// Collects the command from the `ATUIN_COMMAND_LINE` environment variable, /// which does not need escaping and is more compatible between OS and shells #[arg(long = "command-from-env", hide = true)] cmd_env: bool, command: Vec<String>, }, /// Finishes a new command in the history (adds time, exit code) End { id: String, #[arg(long, short)] exit: i64, #[arg(long, short)] duration: Option<u64>, }, /// List all items in history List { #[arg(long, short)] cwd: bool, #[arg(long, short)] session: bool, #[arg(long)] human: bool, /// Show only the text of the command #[arg(long)] cmd_only: bool, /// Terminate the output with a null, for better multiline support #[arg(long)] print0: bool, #[arg(long, short, default_value = "true")] // accept no value #[arg(num_args(0..=1), default_missing_value("true"))] // accept a value #[arg(action = clap::ArgAction::Set)] reverse: bool, /// Display the command time in another timezone other than the configured default. /// /// This option takes one of the following kinds of values: /// - the special value "local" (or "l") which refers to the system time zone /// - an offset from UTC (e.g. "+9", "-2:30") #[arg(long, visible_alias = "tz")] timezone: Option<Timezone>, /// Available variables: {command}, {directory}, {duration}, {user}, {host}, {exit} and {time}. /// Example: --format "{time} - [{duration}] - {directory}$\t{command}" #[arg(long, short)] format: Option<String>, }, /// Get the last command ran Last { #[arg(long)] human: bool, /// Show only the text of the command #[arg(long)] cmd_only: bool, /// Display the command time in another timezone other than the configured default. /// /// This option takes one of the following kinds of values: /// - the special value "local" (or "l") which refers to the system time zone /// - an offset from UTC (e.g. "+9", "-2:30") #[arg(long, visible_alias = "tz")] timezone: Option<Timezone>, /// Available variables: {command}, {directory}, {duration}, {user}, {host} and {time}. /// Example: --format "{time} - [{duration}] - {directory}$\t{command}" #[arg(long, short)] format: Option<String>, }, InitStore, /// Delete history entries matching the configured exclusion filters Prune { /// List matching history lines without performing the actual deletion. #[arg(short = 'n', long)] dry_run: bool, }, /// Delete duplicate history entries (that have the same command, cwd and hostname) Dedup { /// List matching history lines without performing the actual deletion. #[arg(short = 'n', long)] dry_run: bool, /// Only delete results added before this date #[arg(long, short)] before: String, /// How many recent duplicates to keep #[arg(long)] dupkeep: u32, }, } #[derive(Clone, Copy, Debug)] pub enum ListMode { Human, CmdOnly, Regular, } impl ListMode { pub const fn from_flags(human: bool, cmd_only: bool) -> Self { if human { ListMode::Human } else if cmd_only { ListMode::CmdOnly } else { ListMode::Regular } } } #[allow(clippy::cast_sign_loss)] pub fn print_list( h: &[History], list_mode: ListMode, format: Option<&str>, print0: bool, reverse: bool, tz: Timezone, ) { let w = std::io::stdout(); let mut w = w.lock(); let fmt_str = match list_mode { ListMode::Human => format .unwrap_or("{time} · {duration}\t{command}") .replace("\\t", "\t"), ListMode::Regular => format .unwrap_or("{time}\t{command}\t{duration}") .replace("\\t", "\t"), // not used ListMode::CmdOnly => String::new(), }; let parsed_fmt = match list_mode { ListMode::Human | ListMode::Regular => parse_fmt(&fmt_str), ListMode::CmdOnly => std::iter::once(ParseSegment::Key("command")).collect(), }; let iterator = if reverse { Box::new(h.iter().rev()) as Box<dyn Iterator<Item = &History>> } else { Box::new(h.iter()) as Box<dyn Iterator<Item = &History>> }; let entry_terminator = if print0 { "\0" } else { "\n" }; let flush_each_line = print0; for history in iterator { let fh = FmtHistory { history, cmd_format: CmdFormat::for_output(&w), tz: &tz, }; let args = parsed_fmt.with_args(&fh); // Check for formatting errors before attempting to write if let Err(err) = args.status() { eprintln!("ERROR: history output failed with: {err}"); std::process::exit(1); } let write_result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { write!(w, "{args}{entry_terminator}") })); match write_result { Ok(Ok(())) => { // Write succeeded } Ok(Err(err)) => { if err.kind() != io::ErrorKind::BrokenPipe { eprintln!("ERROR: Failed to write history output: {err}"); std::process::exit(1); } } Err(_) => { eprintln!("ERROR: Format string caused a formatting error."); eprintln!( "This may be due to an unsupported format string containing special characters." ); eprintln!( "Please check your format string syntax and ensure literal braces are properly escaped." ); std::process::exit(1); } } if flush_each_line { check_for_write_errors(w.flush()); } } if !flush_each_line { check_for_write_errors(w.flush()); } } fn check_for_write_errors(write: Result<(), io::Error>) { if let Err(err) = write { // Ignore broken pipe (issue #626) if err.kind() != io::ErrorKind::BrokenPipe { eprintln!("ERROR: History output failed with the following error: {err}"); std::process::exit(1); } } } /// Type wrapper around `History` with formatting settings. #[derive(Clone, Copy, Debug)] struct FmtHistory<'a> { history: &'a History, cmd_format: CmdFormat, tz: &'a Timezone, } #[derive(Clone, Copy, Debug)] enum CmdFormat { Literal, Escaped, } impl CmdFormat { fn for_output<O: IsTerminal>(out: &O) -> Self { if out.is_terminal() { Self::Escaped } else { Self::Literal } } } static TIME_FMT: &[time::format_description::FormatItem<'static>] = format_description!("[year]-[month]-[day] [hour repr:24]:[minute]:[second]"); /// defines how to format the history impl FormatKey for FmtHistory<'_> { #[allow(clippy::cast_sign_loss)] fn fmt(&self, key: &str, f: &mut fmt::Formatter<'_>) -> Result<(), FormatKeyError> { match key { "command" => match self.cmd_format { CmdFormat::Literal => f.write_str(self.history.command.trim()), CmdFormat::Escaped => f.write_str(&self.history.command.trim().escape_control()), }?, "directory" => f.write_str(self.history.cwd.trim())?, "exit" => f.write_str(&self.history.exit.to_string())?, "duration" => { let dur = Duration::from_nanos(std::cmp::max(self.history.duration, 0) as u64); format_duration_into(dur, f)?; } "time" => { self.history .timestamp .to_offset(self.tz.0) .format(TIME_FMT) .map_err(|_| fmt::Error)? .fmt(f)?; } "relativetime" => { let since = OffsetDateTime::now_utc() - self.history.timestamp; let d = Duration::try_from(since).unwrap_or_default(); format_duration_into(d, f)?; } "host" => f.write_str( self.history .hostname .split_once(':') .map_or(&self.history.hostname, |(host, _)| host), )?, "user" => f.write_str( self.history .hostname .split_once(':') .map_or("", |(_, user)| user), )?, _ => return Err(FormatKeyError::UnknownKey), } Ok(()) } } fn parse_fmt(format: &str) -> ParsedFmt<'_> { match ParsedFmt::new(format) { Ok(fmt) => fmt, Err(err) => { eprintln!("ERROR: History formatting failed with the following error: {err}"); if format.contains('"') && (format.contains(":{") || format.contains(",{")) { eprintln!("It looks like you're trying to create JSON output."); eprintln!("For JSON, you need to escape literal braces by doubling them:"); eprintln!("Example: '{{\"command\":\"{{command}}\",\"time\":\"{{time}}\"}}'"); } else { eprintln!( "If your formatting string contains literal curly braces, you need to escape them by doubling:" ); eprintln!("Use {{{{ for literal {{ and }}}} for literal }}"); } std::process::exit(1) } } } impl Cmd { #[allow(clippy::too_many_lines, clippy::cast_possible_truncation)] async fn handle_start(db: &impl Database, settings: &Settings, command: &str) -> Result<()> { // It's better for atuin to silently fail here and attempt to // store whatever is ran, than to throw an error to the terminal let cwd = utils::get_current_dir(); let h: History = History::capture() .timestamp(OffsetDateTime::now_utc()) .command(command) .cwd(cwd) .build() .into(); if !h.should_save(settings) { return Ok(()); } // print the ID // we use this as the key for calling end println!("{}", h.id); db.save(&h).await?; Ok(()) } #[cfg(feature = "daemon")] async fn handle_daemon_start(settings: &Settings, command: &str) -> Result<()> { // It's better for atuin to silently fail here and attempt to // store whatever is ran, than to throw an error to the terminal let cwd = utils::get_current_dir(); let h: History = History::capture() .timestamp(OffsetDateTime::now_utc()) .command(command) .cwd(cwd) .build() .into(); if !h.should_save(settings) { return Ok(()); } let resp = atuin_daemon::client::HistoryClient::new( #[cfg(not(unix))] settings.daemon.tcp_port, #[cfg(unix)] settings.daemon.socket_path.clone(), ) .await? .start_history(h) .await?; // print the ID // we use this as the key for calling end println!("{resp}"); Ok(()) } #[allow(unused_variables)] async fn handle_end( db: &impl Database, store: SqliteStore, history_store: HistoryStore, settings: &Settings, id: &str, exit: i64, duration: Option<u64>, ) -> Result<()> { if id.trim() == "" { return Ok(()); } let Some(mut h) = db.load(id).await? else { warn!("history entry is missing"); return Ok(()); }; if h.duration > 0 { debug!("cannot end history - already has duration"); // returning OK as this can occur if someone Ctrl-c a prompt return Ok(()); } if !settings.store_failed && exit > 0 { debug!("history has non-zero exit code, and store_failed is false"); // the history has already been inserted half complete. remove it db.delete(h).await?; return Ok(()); } h.exit = exit; h.duration = match duration { Some(value) => i64::try_from(value).context("command took over 292 years")?, None => i64::try_from((OffsetDateTime::now_utc() - h.timestamp).whole_nanoseconds()) .context("command took over 292 years")?, }; db.update(&h).await?; history_store.push(h).await?; if settings.should_sync()? { #[cfg(feature = "sync")] { if settings.sync.records { let (_, downloaded) = record::sync::sync(settings, &store).await?; Settings::save_sync_time()?; crate::sync::build(settings, &store, db, Some(&downloaded)).await?; } else { debug!("running periodic background sync"); sync::sync(settings, false, db).await?; } } #[cfg(not(feature = "sync"))] debug!("not compiled with sync support"); } else { debug!("sync disabled! not syncing"); } Ok(()) } #[cfg(feature = "daemon")] #[allow(unused_variables)] async fn handle_daemon_end( settings: &Settings, id: &str, exit: i64, duration: Option<u64>, ) -> Result<()> { let resp = atuin_daemon::client::HistoryClient::new( #[cfg(not(unix))] settings.daemon.tcp_port, #[cfg(unix)] settings.daemon.socket_path.clone(), ) .await? .end_history(id.to_string(), duration.unwrap_or(0), exit) .await?; Ok(()) } #[allow(clippy::too_many_arguments)] #[allow(clippy::fn_params_excessive_bools)] async fn handle_list( db: &impl Database, settings: &Settings, context: atuin_client::database::Context, session: bool, cwd: bool, mode: ListMode, format: Option<String>, include_deleted: bool, print0: bool, reverse: bool, tz: Timezone, ) -> Result<()> { let filters = match (session, cwd) { (true, true) => [Session, Directory], (true, false) => [Session, Global], (false, true) => [Global, Directory], (false, false) => [ settings.default_filter_mode(context.git_root.is_some()), Global, ], }; let history = db .list(&filters, &context, None, false, include_deleted) .await?; print_list( &history, mode, match format { None => Some(settings.history_format.as_str()), _ => format.as_deref(), }, print0, reverse, tz, ); Ok(()) } async fn handle_prune( db: &impl Database, settings: &Settings, store: SqliteStore, context: atuin_client::database::Context, dry_run: bool, ) -> Result<()> { // Grab all executed commands and filter them using History::should_save. // We could iterate or paginate here if memory usage becomes an issue. let matches: Vec<History> = db .list(&[Global], &context, None, false, false) .await? .into_iter() .filter(|h| !h.should_save(settings)) .collect(); match matches.len() { 0 => { println!("No entries to prune."); return Ok(()); } 1 => println!("Found 1 entry to prune."), n => println!("Found {n} entries to prune."), } if dry_run { print_list( &matches, ListMode::Human, Some(settings.history_format.as_str()), false, false, settings.timezone, ); } else { let encryption_key: [u8; 32] = encryption::load_key(settings) .context("could not load encryption key")? .into(); let host_id = Settings::host_id().expect("failed to get host_id"); let history_store = HistoryStore::new(store.clone(), host_id, encryption_key); for entry in matches { eprintln!("deleting {}", entry.id); if settings.sync.records { let (id, _) = history_store.delete(entry.id.clone()).await?; history_store.incremental_build(db, &[id]).await?; } else { db.delete(entry.clone()).await?; } } } Ok(()) } async fn handle_dedup( db: &impl Database, settings: &Settings, store: SqliteStore, before: i64, dupkeep: u32, dry_run: bool, ) -> Result<()> { if dupkeep == 0 { eprintln!( "\"--dupkeep 0\" would keep 0 copies of duplicate commands and thus delete all of them! Use \"atuin search --delete ...\" if you really want that." ); std::process::exit(1); } let matches: Vec<History> = db.get_dups(before, dupkeep).await?; match matches.len() { 0 => { println!("No duplicates to delete."); return Ok(()); } 1 => println!("Found 1 duplicate to delete."), n => println!("Found {n} duplicates to delete."), } if dry_run { print_list( &matches, ListMode::Human, Some(settings.history_format.as_str()), false, false, settings.timezone, ); } else { let encryption_key: [u8; 32] = encryption::load_key(settings) .context("could not load encryption key")? .into(); let host_id = Settings::host_id().expect("failed to get host_id"); let history_store = HistoryStore::new(store.clone(), host_id, encryption_key); for entry in matches { eprintln!("deleting {}", entry.id); if settings.sync.records { let (id, _) = history_store.delete(entry.id).await?; history_store.incremental_build(db, &[id]).await?; } else { db.delete(entry).await?; } } } Ok(()) } pub async fn run(self, settings: &Settings) -> Result<()> { let context = current_context(); #[cfg(feature = "daemon")] // Skip initializing any databases for start/end, if the daemon is enabled if settings.daemon.enabled { match self { Self::Start { .. } => { let command = self.get_start_command().unwrap_or_default(); return Self::handle_daemon_start(settings, &command).await; } Self::End { id, exit, duration } => { return Self::handle_daemon_end(settings, &id, exit, duration).await; } _ => {} } } let db_path = PathBuf::from(settings.db_path.as_str()); let record_store_path = PathBuf::from(settings.record_store_path.as_str()); let db = Sqlite::new(db_path, settings.local_timeout).await?; let store = SqliteStore::new(record_store_path, settings.local_timeout).await?; let encryption_key: [u8; 32] = encryption::load_key(settings) .context("could not load encryption key")? .into(); let host_id = Settings::host_id().expect("failed to get host_id"); let history_store = HistoryStore::new(store.clone(), host_id, encryption_key); match self { Self::Start { .. } => { let command = self.get_start_command().unwrap_or_default(); Self::handle_start(&db, settings, &command).await } Self::End { id, exit, duration } => { Self::handle_end(&db, store, history_store, settings, &id, exit, duration).await } Self::List { session, cwd, human, cmd_only, print0, reverse, timezone, format, } => { let mode = ListMode::from_flags(human, cmd_only); let tz = timezone.unwrap_or(settings.timezone); Self::handle_list( &db, settings, context, session, cwd, mode, format, false, print0, reverse, tz, ) .await } Self::Last { human, cmd_only, timezone, format, } => { let last = db.last().await?; let last = last.as_slice(); let tz = timezone.unwrap_or(settings.timezone); print_list( last, ListMode::from_flags(human, cmd_only), match format { None => Some(settings.history_format.as_str()), _ => format.as_deref(), }, false, true, tz, ); Ok(()) } Self::InitStore => history_store.init_store(&db).await, Self::Prune { dry_run } => { Self::handle_prune(&db, settings, store, context, dry_run).await } Self::Dedup { dry_run, before, dupkeep, } => { let before = i64::try_from( interim::parse_date_string( before.as_str(), OffsetDateTime::now_utc(), interim::Dialect::Uk, )? .unix_timestamp_nanos(), )?; Self::handle_dedup(&db, settings, store, before, dupkeep, dry_run).await } } } /// Returns the command line to use for the `Start` variant. /// Returns `None` for any other variant. fn get_start_command(&self) -> Option<String> { match self { Self::Start { cmd_env: true, .. } => { Some(std::env::var("ATUIN_COMMAND_LINE").unwrap_or_default()) } Self::Start { command, .. } => Some(command.join(" ")), _ => None, } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_format_string_no_panic() { // Don't panic but provide helpful output (issue #2776) let malformed_json = r#"{"command":"{command}","key":"value"}"#; let result = std::panic::catch_unwind(|| parse_fmt(malformed_json)); assert!(result.is_ok()); } #[test] fn test_valid_formats_still_work() { assert!(std::panic::catch_unwind(|| parse_fmt("{command}")).is_ok()); assert!(std::panic::catch_unwind(|| parse_fmt("{time} - {command}")).is_ok()); } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/init.rs
crates/atuin/src/command/client/init.rs
use std::path::PathBuf; use atuin_client::{encryption, record::sqlite_store::SqliteStore, settings::Settings}; use atuin_dotfiles::store::{AliasStore, var::VarStore}; use clap::{Parser, ValueEnum}; use eyre::{Result, WrapErr}; mod bash; mod fish; mod powershell; mod xonsh; mod zsh; #[derive(Parser, Debug)] pub struct Cmd { shell: Shell, /// Disable the binding of CTRL-R to atuin #[clap(long)] disable_ctrl_r: bool, /// Disable the binding of the Up Arrow key to atuin #[clap(long)] disable_up_arrow: bool, } #[derive(Clone, Copy, ValueEnum, Debug)] #[value(rename_all = "lower")] #[allow(clippy::enum_variant_names, clippy::doc_markdown)] pub enum Shell { /// Zsh setup Zsh, /// Bash setup Bash, /// Fish setup Fish, /// Nu setup Nu, /// Xonsh setup Xonsh, /// PowerShell setup PowerShell, } impl Cmd { fn init_nu(&self) { let full = include_str!("../../shell/atuin.nu"); println!("{full}"); if std::env::var("ATUIN_NOBIND").is_err() { const BIND_CTRL_R: &str = r"$env.config = ( $env.config | upsert keybindings ( $env.config.keybindings | append { name: atuin modifier: control keycode: char_r mode: [emacs, vi_normal, vi_insert] event: { send: executehostcommand cmd: (_atuin_search_cmd) } } ) )"; const BIND_UP_ARROW: &str = r" $env.config = ( $env.config | upsert keybindings ( $env.config.keybindings | append { name: atuin modifier: none keycode: up mode: [emacs, vi_normal, vi_insert] event: { until: [ {send: menuup} {send: executehostcommand cmd: (_atuin_search_cmd '--shell-up-key-binding') } ] } } ) ) "; if !self.disable_ctrl_r { println!("{BIND_CTRL_R}"); } if !self.disable_up_arrow { println!("{BIND_UP_ARROW}"); } } } fn static_init(&self) { match self.shell { Shell::Zsh => { zsh::init_static(self.disable_up_arrow, self.disable_ctrl_r); } Shell::Bash => { bash::init_static(self.disable_up_arrow, self.disable_ctrl_r); } Shell::Fish => { fish::init_static(self.disable_up_arrow, self.disable_ctrl_r); } Shell::Nu => { self.init_nu(); } Shell::Xonsh => { xonsh::init_static(self.disable_up_arrow, self.disable_ctrl_r); } Shell::PowerShell => { powershell::init_static(self.disable_up_arrow, self.disable_ctrl_r); } } } async fn dotfiles_init(&self, settings: &Settings) -> Result<()> { let record_store_path = PathBuf::from(settings.record_store_path.as_str()); let sqlite_store = SqliteStore::new(record_store_path, settings.local_timeout).await?; let encryption_key: [u8; 32] = encryption::load_key(settings) .context("could not load encryption key")? .into(); let host_id = Settings::host_id().expect("failed to get host_id"); let alias_store = AliasStore::new(sqlite_store.clone(), host_id, encryption_key); let var_store = VarStore::new(sqlite_store.clone(), host_id, encryption_key); match self.shell { Shell::Zsh => { zsh::init( alias_store, var_store, self.disable_up_arrow, self.disable_ctrl_r, ) .await?; } Shell::Bash => { bash::init( alias_store, var_store, self.disable_up_arrow, self.disable_ctrl_r, ) .await?; } Shell::Fish => { fish::init( alias_store, var_store, self.disable_up_arrow, self.disable_ctrl_r, ) .await?; } Shell::Nu => self.init_nu(), Shell::Xonsh => { xonsh::init( alias_store, var_store, self.disable_up_arrow, self.disable_ctrl_r, ) .await?; } Shell::PowerShell => { powershell::init( alias_store, var_store, self.disable_up_arrow, self.disable_ctrl_r, ) .await?; } } Ok(()) } pub async fn run(self, settings: &Settings) -> Result<()> { if !settings.paths_ok() { eprintln!( "Atuin settings paths are broken. Disabling atuin shell hooks. Run `atuin doctor` to diagnose." ); return Ok(()); } if settings.dotfiles.enabled { self.dotfiles_init(settings).await?; } else { self.static_init(); } Ok(()) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/doctor.rs
crates/atuin/src/command/client/doctor.rs
use std::process::Command; use std::{env, path::PathBuf, str::FromStr}; use atuin_client::database::Sqlite; use atuin_client::settings::Settings; use atuin_common::shell::{Shell, shell_name}; use atuin_common::utils; use colored::Colorize; use eyre::Result; use serde::Serialize; use sysinfo::{Disks, System, get_current_pid}; #[derive(Debug, Serialize)] struct ShellInfo { pub name: String, // best-effort, not supported on all OSes pub default: String, // Detect some shell plugins that the user has installed. // I'm just going to start with preexec/blesh pub plugins: Vec<String>, // The preexec framework used in the current session, if Atuin is loaded. pub preexec: Option<String>, } impl ShellInfo { // HACK ALERT! // Many of the shell vars we need to detect are not exported :( // So, we're going to run a interactive session and directly check the // variable. There's a chance this won't work, so it should not be fatal. // // Every shell we support handles `shell -ic 'command'` fn shellvar_exists(shell: &str, var: &str) -> bool { let cmd = Command::new(shell) .args([ "-ic", format!("[ -z ${var} ] || echo ATUIN_DOCTOR_ENV_FOUND").as_str(), ]) .output() .map_or(String::new(), |v| { let out = v.stdout; String::from_utf8(out).unwrap_or_default() }); cmd.contains("ATUIN_DOCTOR_ENV_FOUND") } fn detect_preexec_framework(shell: &str) -> Option<String> { if env::var("ATUIN_SESSION").ok().is_none() { None } else if shell.starts_with("bash") || shell == "sh" { env::var("ATUIN_PREEXEC_BACKEND") .ok() .filter(|value| !value.is_empty()) .and_then(|atuin_preexec_backend| { atuin_preexec_backend.rfind(':').and_then(|pos_colon| { u32::from_str(&atuin_preexec_backend[..pos_colon]) .ok() .is_some_and(|preexec_shlvl| { env::var("SHLVL") .ok() .and_then(|shlvl| u32::from_str(&shlvl).ok()) .is_some_and(|shlvl| shlvl == preexec_shlvl) }) .then(|| atuin_preexec_backend[pos_colon + 1..].to_string()) }) }) } else { Some("built-in".to_string()) } } fn validate_plugin_blesh( _shell: &str, shell_process: &sysinfo::Process, ble_session_id: &str, ) -> Option<String> { ble_session_id .split('/') .nth(1) .and_then(|field| u32::from_str(field).ok()) .filter(|&blesh_pid| blesh_pid == shell_process.pid().as_u32()) .map(|_| "blesh".to_string()) } pub fn plugins(shell: &str, shell_process: &sysinfo::Process) -> Vec<String> { // consider a different detection approach if there are plugins // that don't set shell vars enum PluginShellType { Any, Bash, // Note: these are currently unused #[allow(dead_code)] Zsh, #[allow(dead_code)] Fish, #[allow(dead_code)] Nushell, #[allow(dead_code)] Xonsh, } enum PluginProbeType { EnvironmentVariable(&'static str), InteractiveShellVariable(&'static str), } type PluginValidator = fn(&str, &sysinfo::Process, &str) -> Option<String>; let plugin_list: [( &str, PluginShellType, PluginProbeType, Option<PluginValidator>, ); 3] = [ ( "atuin", PluginShellType::Any, PluginProbeType::EnvironmentVariable("ATUIN_SESSION"), None, ), ( "blesh", PluginShellType::Bash, PluginProbeType::EnvironmentVariable("BLE_SESSION_ID"), Some(Self::validate_plugin_blesh), ), ( "bash-preexec", PluginShellType::Bash, PluginProbeType::InteractiveShellVariable("bash_preexec_imported"), None, ), ]; plugin_list .into_iter() .filter(|(_, shell_type, _, _)| match shell_type { PluginShellType::Any => true, PluginShellType::Bash => shell.starts_with("bash") || shell == "sh", PluginShellType::Zsh => shell.starts_with("zsh"), PluginShellType::Fish => shell.starts_with("fish"), PluginShellType::Nushell => shell.starts_with("nu"), PluginShellType::Xonsh => shell.starts_with("xonsh"), }) .filter_map(|(plugin, _, probe_type, validator)| -> Option<String> { match probe_type { PluginProbeType::EnvironmentVariable(env) => { env::var(env).ok().filter(|value| !value.is_empty()) } PluginProbeType::InteractiveShellVariable(shellvar) => { ShellInfo::shellvar_exists(shell, shellvar).then_some(String::default()) } } .and_then(|value| { validator.map_or_else( || Some(plugin.to_string()), |validator| validator(shell, shell_process, &value), ) }) }) .collect() } pub fn new() -> Self { // TODO: rework to use atuin_common::Shell let sys = System::new_all(); let process = sys .process(get_current_pid().expect("Failed to get current PID")) .expect("Process with current pid does not exist"); let parent = sys .process(process.parent().expect("Atuin running with no parent!")) .expect("Process with parent pid does not exist"); let name = shell_name(Some(parent)); let plugins = ShellInfo::plugins(name.as_str(), parent); let default = Shell::default_shell().unwrap_or(Shell::Unknown).to_string(); let preexec = Self::detect_preexec_framework(name.as_str()); Self { name, default, plugins, preexec, } } } #[derive(Debug, Serialize)] struct DiskInfo { pub name: String, pub filesystem: String, } #[derive(Debug, Serialize)] struct SystemInfo { pub os: String, pub arch: String, pub version: String, pub disks: Vec<DiskInfo>, } impl SystemInfo { pub fn new() -> Self { let disks = Disks::new_with_refreshed_list(); let disks = disks .list() .iter() .map(|d| DiskInfo { name: d.name().to_os_string().into_string().unwrap(), filesystem: d.file_system().to_os_string().into_string().unwrap(), }) .collect(); Self { os: System::name().unwrap_or_else(|| "unknown".to_string()), arch: System::cpu_arch().unwrap_or_else(|| "unknown".to_string()), version: System::os_version().unwrap_or_else(|| "unknown".to_string()), disks, } } } #[derive(Debug, Serialize)] struct SyncInfo { /// Whether the main Atuin sync server is in use /// I'm just calling it Atuin Cloud for lack of a better name atm pub cloud: bool, pub records: bool, pub auto_sync: bool, pub last_sync: String, } impl SyncInfo { pub fn new(settings: &Settings) -> Self { Self { cloud: settings.sync_address == "https://api.atuin.sh", auto_sync: settings.auto_sync, records: settings.sync.records, last_sync: Settings::last_sync() .map_or_else(|_| "no last sync".to_string(), |v| v.to_string()), } } } #[derive(Debug)] struct SettingPaths { db: String, record_store: String, key: String, session: String, } impl SettingPaths { pub fn new(settings: &Settings) -> Self { Self { db: settings.db_path.clone(), record_store: settings.record_store_path.clone(), key: settings.key_path.clone(), session: settings.session_path.clone(), } } pub fn verify(&self) { let paths = vec![ ("ATUIN_DB_PATH", &self.db), ("ATUIN_RECORD_STORE", &self.record_store), ("ATUIN_KEY", &self.key), ("ATUIN_SESSION", &self.session), ]; for (path_env_var, path) in paths { if utils::broken_symlink(path) { eprintln!( "{path} (${path_env_var}) is a broken symlink. This may cause issues with Atuin." ); } } } } #[derive(Debug, Serialize)] struct AtuinInfo { pub version: String, pub commit: String, /// Whether the main Atuin sync server is in use /// I'm just calling it Atuin Cloud for lack of a better name atm pub sync: Option<SyncInfo>, pub sqlite_version: String, #[serde(skip)] // probably unnecessary to expose this pub setting_paths: SettingPaths, } impl AtuinInfo { pub async fn new(settings: &Settings) -> Self { let session_path = settings.session_path.as_str(); let logged_in = PathBuf::from(session_path).exists(); let sync = if logged_in { Some(SyncInfo::new(settings)) } else { None }; let sqlite_version = match Sqlite::new("sqlite::memory:", 0.1).await { Ok(db) => db .sqlite_version() .await .unwrap_or_else(|_| "unknown".to_string()), Err(_) => "error".to_string(), }; Self { version: crate::VERSION.to_string(), commit: crate::SHA.to_string(), sync, sqlite_version, setting_paths: SettingPaths::new(settings), } } } #[derive(Debug, Serialize)] struct DoctorDump { pub atuin: AtuinInfo, pub shell: ShellInfo, pub system: SystemInfo, } impl DoctorDump { pub async fn new(settings: &Settings) -> Self { Self { atuin: AtuinInfo::new(settings).await, shell: ShellInfo::new(), system: SystemInfo::new(), } } } fn checks(info: &DoctorDump) { println!(); // spacing // let zfs_error = "[Filesystem] ZFS is known to have some issues with SQLite. Atuin uses SQLite heavily. If you are having poor performance, there are some workarounds here: https://github.com/atuinsh/atuin/issues/952".bold().red(); let bash_plugin_error = "[Shell] If you are using Bash, Atuin requires that either bash-preexec or ble.sh (>= 0.4) be installed. An older ble.sh may not be detected. so ignore this if you have ble.sh >= 0.4 set up! Read more here: https://docs.atuin.sh/guide/installation/#bash".bold().red(); let blesh_integration_error = "[Shell] Atuin and ble.sh seem to be loaded in the session, but the integration does not seem to be working. Please check the setup in .bashrc.".bold().red(); // ZFS: https://github.com/atuinsh/atuin/issues/952 if info.system.disks.iter().any(|d| d.filesystem == "zfs") { println!("{zfs_error}"); } info.atuin.setting_paths.verify(); // Shell if info.shell.name == "bash" { if !info .shell .plugins .iter() .any(|p| p == "blesh" || p == "bash-preexec") { println!("{bash_plugin_error}"); } if info.shell.plugins.iter().any(|plugin| plugin == "atuin") && info.shell.plugins.iter().any(|plugin| plugin == "blesh") && info.shell.preexec.as_ref().is_some_and(|val| val == "none") { println!("{blesh_integration_error}"); } } } pub async fn run(settings: &Settings) -> Result<()> { println!("{}", "Atuin Doctor".bold()); println!("Checking for diagnostics"); let dump = DoctorDump::new(settings).await; checks(&dump); let dump = serde_json::to_string_pretty(&dump)?; println!("\nPlease include the output below with any bug reports or issues\n"); println!("{dump}"); Ok(()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/daemon.rs
crates/atuin/src/command/client/daemon.rs
use eyre::Result; use atuin_client::{database::Sqlite, record::sqlite_store::SqliteStore, settings::Settings}; use atuin_daemon::server::listen; pub async fn run(settings: Settings, store: SqliteStore, history_db: Sqlite) -> Result<()> { listen(settings, store, history_db).await?; Ok(()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/kv.rs
crates/atuin/src/command/client/kv.rs
use clap::Subcommand; use eyre::{Context, Result, eyre}; use atuin_client::{encryption, record::sqlite_store::SqliteStore, settings::Settings}; use atuin_kv::store::KvStore; #[derive(Subcommand, Debug)] #[command(infer_subcommands = true)] pub enum Cmd { /// Set a key-value pair Set { /// Key to set #[arg(long, short)] key: String, /// Value to store value: String, /// Namespace for the key-value pair #[arg(long, short, default_value = "default")] namespace: String, }, /// Delete one or more key-value pairs #[command(alias = "rm")] Delete { /// Keys to delete #[arg(required = true)] keys: Vec<String>, /// Namespace for the key-value pair #[arg(long, short, default_value = "default")] namespace: String, }, /// Retrieve a saved value Get { /// Key to retrieve key: String, /// Namespace for the key-value pair #[arg(long, short, default_value = "default")] namespace: String, }, /// List all keys in a namespace, or in all namespaces #[command(alias = "ls")] List { /// Namespace to list keys from #[arg(long, short, default_value = "default")] namespace: String, /// List all keys in all namespaces #[arg(long, short, alias = "all")] all_namespaces: bool, }, /// Rebuild the KV store Rebuild, } impl Cmd { pub async fn run(&self, settings: &Settings, store: &SqliteStore) -> Result<()> { let encryption_key: [u8; 32] = encryption::load_key(settings) .context("could not load encryption key")? .into(); let host_id = Settings::host_id().expect("failed to get host_id"); let kv_db = atuin_kv::database::Database::new(settings.kv.db_path.clone(), 1.0).await?; let kv_store = KvStore::new(store.clone(), kv_db, host_id, encryption_key); match self { Self::Set { key, value, namespace, } => { if namespace.is_empty() { return Err(eyre!("namespace cannot be empty")); } kv_store.set(namespace, key, value).await } Self::Delete { keys, namespace } => kv_store.delete(namespace, keys).await, Self::Get { key, namespace } => { let kv = kv_store.get(namespace, key).await?; if let Some(val) = kv { println!("{val}"); } Ok(()) } Self::List { namespace, all_namespaces, } => { let entries = if *all_namespaces { kv_store.list(None).await? } else { kv_store.list(Some(namespace)).await? }; for entry in entries { if *all_namespaces { println!("{}.{}", entry.namespace, entry.key); } else { println!("{}", entry.key); } } Ok(()) } Self::Rebuild {} => kv_store.build().await, } } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/import.rs
crates/atuin/src/command/client/import.rs
use std::env; use async_trait::async_trait; use clap::Parser; use eyre::Result; use indicatif::ProgressBar; use atuin_client::{ database::Database, history::History, import::{ Importer, Loader, bash::Bash, fish::Fish, nu::Nu, nu_histdb::NuHistDb, powershell::PowerShell, replxx::Replxx, resh::Resh, xonsh::Xonsh, xonsh_sqlite::XonshSqlite, zsh::Zsh, zsh_histdb::ZshHistDb, }, }; #[derive(Parser, Debug)] #[command(infer_subcommands = true)] pub enum Cmd { /// Import history for the current shell Auto, /// Import history from the zsh history file Zsh, /// Import history from the zsh history file ZshHistDb, /// Import history from the bash history file Bash, /// Import history from the replxx history file Replxx, /// Import history from the resh history file Resh, /// Import history from the fish history file Fish, /// Import history from the nu history file Nu, /// Import history from the nu history file NuHistDb, /// Import history from xonsh json files Xonsh, /// Import history from xonsh sqlite db XonshSqlite, /// Import history from the powershell history file Powershell, } const BATCH_SIZE: usize = 100; impl Cmd { #[allow(clippy::cognitive_complexity)] pub async fn run<DB: Database>(&self, db: &DB) -> Result<()> { println!(" Atuin "); println!("======================"); println!(" \u{1f30d} "); println!(" \u{1f418}\u{1f418}\u{1f418}\u{1f418} "); println!(" \u{1f422} "); println!("======================"); println!("Importing history..."); match self { Self::Auto => { if cfg!(windows) { return if env::var("PSModulePath").is_ok() { println!("Detected PowerShell"); import::<PowerShell, DB>(db).await } else { println!("Could not detect the current shell."); println!("Please run atuin import <SHELL>."); println!("To view a list of shells, run atuin import."); Ok(()) }; } // $XONSH_HISTORY_BACKEND isn't always set, but $XONSH_HISTORY_FILE is let xonsh_histfile = env::var("XONSH_HISTORY_FILE").unwrap_or_else(|_| String::new()); let shell = env::var("SHELL").unwrap_or_else(|_| String::from("NO_SHELL")); if xonsh_histfile.to_lowercase().ends_with(".json") { println!("Detected Xonsh",); import::<Xonsh, DB>(db).await } else if xonsh_histfile.to_lowercase().ends_with(".sqlite") { println!("Detected Xonsh (SQLite backend)"); import::<XonshSqlite, DB>(db).await } else if shell.ends_with("/zsh") { if ZshHistDb::histpath().is_ok() { println!( "Detected Zsh-HistDb, using :{}", ZshHistDb::histpath().unwrap().to_str().unwrap() ); import::<ZshHistDb, DB>(db).await } else { println!("Detected ZSH"); import::<Zsh, DB>(db).await } } else if shell.ends_with("/fish") { println!("Detected Fish"); import::<Fish, DB>(db).await } else if shell.ends_with("/bash") { println!("Detected Bash"); import::<Bash, DB>(db).await } else if shell.ends_with("/nu") { if NuHistDb::histpath().is_ok() { println!( "Detected Nu-HistDb, using :{}", NuHistDb::histpath().unwrap().to_str().unwrap() ); import::<NuHistDb, DB>(db).await } else { println!("Detected Nushell"); import::<Nu, DB>(db).await } } else if shell.ends_with("/pwsh") { println!("Detected PowerShell"); import::<PowerShell, DB>(db).await } else { println!("cannot import {shell} history"); Ok(()) } } Self::Zsh => import::<Zsh, DB>(db).await, Self::ZshHistDb => import::<ZshHistDb, DB>(db).await, Self::Bash => import::<Bash, DB>(db).await, Self::Replxx => import::<Replxx, DB>(db).await, Self::Resh => import::<Resh, DB>(db).await, Self::Fish => import::<Fish, DB>(db).await, Self::Nu => import::<Nu, DB>(db).await, Self::NuHistDb => import::<NuHistDb, DB>(db).await, Self::Xonsh => import::<Xonsh, DB>(db).await, Self::XonshSqlite => import::<XonshSqlite, DB>(db).await, Self::Powershell => import::<PowerShell, DB>(db).await, } } } pub struct HistoryImporter<'db, DB: Database> { pb: ProgressBar, buf: Vec<History>, db: &'db DB, } impl<'db, DB: Database> HistoryImporter<'db, DB> { fn new(db: &'db DB, len: usize) -> Self { Self { pb: ProgressBar::new(len as u64), buf: Vec::with_capacity(BATCH_SIZE), db, } } async fn flush(self) -> Result<()> { if !self.buf.is_empty() { self.db.save_bulk(&self.buf).await?; } self.pb.finish(); Ok(()) } } #[async_trait] impl<DB: Database> Loader for HistoryImporter<'_, DB> { async fn push(&mut self, hist: History) -> Result<()> { self.pb.inc(1); self.buf.push(hist); if self.buf.len() == self.buf.capacity() { self.db.save_bulk(&self.buf).await?; self.buf.clear(); } Ok(()) } } async fn import<I: Importer + Send, DB: Database>(db: &DB) -> Result<()> { println!("Importing history from {}", I::NAME); let mut importer = I::new().await?; let len = importer.entries().await.unwrap(); let mut loader = HistoryImporter::new(db, len); importer.load(&mut loader).await?; loader.flush().await?; println!("Import complete!"); Ok(()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/dotfiles.rs
crates/atuin/src/command/client/dotfiles.rs
use clap::Subcommand; use eyre::Result; use atuin_client::{record::sqlite_store::SqliteStore, settings::Settings}; mod alias; mod var; #[derive(Subcommand, Debug)] #[command(infer_subcommands = true)] pub enum Cmd { /// Manage shell aliases with Atuin #[command(subcommand)] Alias(alias::Cmd), /// Manage shell and environment variables with Atuin #[command(subcommand)] Var(var::Cmd), } impl Cmd { pub async fn run(self, settings: &Settings, store: SqliteStore) -> Result<()> { match self { Self::Alias(cmd) => cmd.run(settings, store).await, Self::Var(cmd) => cmd.run(settings, store).await, } } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/account.rs
crates/atuin/src/command/client/account.rs
use clap::{Args, Subcommand}; use eyre::Result; use atuin_client::record::sqlite_store::SqliteStore; use atuin_client::settings::Settings; pub mod change_password; pub mod delete; pub mod login; pub mod logout; pub mod register; pub mod verify; #[derive(Args, Debug)] pub struct Cmd { #[command(subcommand)] command: Commands, } #[derive(Subcommand, Debug)] pub enum Commands { /// Login to the configured server Login(login::Cmd), /// Register a new account Register(register::Cmd), /// Log out Logout, /// Delete your account, and all synced data Delete, /// Change your password ChangePassword(change_password::Cmd), /// Verify your account Verify(verify::Cmd), } impl Cmd { pub async fn run(self, settings: Settings, store: SqliteStore) -> Result<()> { match self.command { Commands::Login(l) => l.run(&settings, &store).await, Commands::Register(r) => r.run(&settings).await, Commands::Logout => logout::run(&settings), Commands::Delete => delete::run(&settings).await, Commands::ChangePassword(c) => c.run(&settings).await, Commands::Verify(c) => c.run(&settings).await, } } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/store/push.rs
crates/atuin/src/command/client/store/push.rs
use atuin_common::record::HostId; use clap::Args; use eyre::Result; use uuid::Uuid; use atuin_client::{ api_client::Client, record::sync::Operation, record::{sqlite_store::SqliteStore, sync}, settings::Settings, }; #[derive(Args, Debug)] pub struct Push { /// The tag to push (eg, 'history'). Defaults to all tags #[arg(long, short)] pub tag: Option<String>, /// The host to push, in the form of a UUID host ID. Defaults to the current host. #[arg(long)] pub host: Option<Uuid>, /// Force push records /// This will override both host and tag, to be all hosts and all tags. First clear the remote store, then upload all of the /// local store #[arg(long, default_value = "false")] pub force: bool, } impl Push { pub async fn run(&self, settings: &Settings, store: SqliteStore) -> Result<()> { let host_id = Settings::host_id().expect("failed to get host_id"); if self.force { println!("Forcing remote store overwrite!"); println!("Clearing remote store"); let client = Client::new( &settings.sync_address, settings.session_token()?.as_str(), settings.network_connect_timeout, settings.network_timeout * 10, // we may be deleting a lot of data... so up the // timeout ) .expect("failed to create client"); client.delete_store().await?; } // We can actually just use the existing diff/etc to push // 1. Diff // 2. Get operations // 3. Filter operations by // a) are they an upload op? // b) are they for the host/tag we are pushing here? let (diff, _) = sync::diff(settings, &store).await?; let operations = sync::operations(diff, &store).await?; let operations = operations .into_iter() .filter(|op| match op { // No noops or downloads thx Operation::Noop { .. } | Operation::Download { .. } => false, // push, so yes plz to uploads! Operation::Upload { host, tag, .. } => { if self.force { return true; } if let Some(h) = self.host { if HostId(h) != *host { return false; } } else if *host != host_id { return false; } if let Some(t) = self.tag.clone() && t != *tag { return false; } true } }) .collect(); let (uploaded, _) = sync::sync_remote(operations, &store, settings).await?; println!("Uploaded {uploaded} records"); Ok(()) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/store/rekey.rs
crates/atuin/src/command/client/store/rekey.rs
use clap::Args; use eyre::{Result, bail}; use tokio::{fs::File, io::AsyncWriteExt}; use atuin_client::{ encryption::{Key, decode_key, encode_key, generate_encoded_key, load_key}, record::sqlite_store::SqliteStore, record::store::Store, settings::Settings, }; #[derive(Args, Debug)] pub struct Rekey { /// The new key to use for encryption. Omit for a randomly-generated key key: Option<String>, } impl Rekey { pub async fn run(&self, settings: &Settings, store: SqliteStore) -> Result<()> { let key = if let Some(key) = self.key.clone() { println!("Re-encrypting store with specified key"); match bip39::Mnemonic::from_phrase(&key, bip39::Language::English) { Ok(mnemonic) => encode_key(Key::from_slice(mnemonic.entropy()))?, Err(err) => { match err.downcast_ref::<bip39::ErrorKind>() { Some(err) => { match err { // assume they copied in the base64 key bip39::ErrorKind::InvalidWord => key, bip39::ErrorKind::InvalidChecksum => { bail!("key mnemonic was not valid") } bip39::ErrorKind::InvalidKeysize(_) | bip39::ErrorKind::InvalidWordLength(_) | bip39::ErrorKind::InvalidEntropyLength(_, _) => { bail!("key was not the correct length") } } } _ => { // unknown error. assume they copied the base64 key key } } } } } else { println!("Re-encrypting store with freshly-generated key"); let (_, encoded) = generate_encoded_key()?; encoded }; let current_key: [u8; 32] = load_key(settings)?.into(); let new_key: [u8; 32] = decode_key(key.clone())?.into(); store.re_encrypt(&current_key, &new_key).await?; println!("Store rewritten. Saving new key"); let mut file = File::create(settings.key_path.clone()).await?; file.write_all(key.as_bytes()).await?; Ok(()) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/store/rebuild.rs
crates/atuin/src/command/client/store/rebuild.rs
use atuin_dotfiles::store::{AliasStore, var::VarStore}; use atuin_scripts::store::ScriptStore; use clap::Args; use eyre::{Result, bail}; use atuin_client::{ database::Database, encryption, history::store::HistoryStore, record::sqlite_store::SqliteStore, settings::Settings, }; #[derive(Args, Debug)] pub struct Rebuild { pub tag: String, } impl Rebuild { pub async fn run( &self, settings: &Settings, store: SqliteStore, database: &dyn Database, ) -> Result<()> { // keep it as a string and not an enum atm // would be super cool to build this dynamically in the future // eg register handles for rebuilding various tags without having to make this part of the // binary big match self.tag.as_str() { "history" => { self.rebuild_history(settings, store.clone(), database) .await?; } "dotfiles" => { self.rebuild_dotfiles(settings, store.clone()).await?; } "scripts" => { self.rebuild_scripts(settings, store.clone()).await?; } tag => bail!("unknown tag: {tag}"), } Ok(()) } async fn rebuild_history( &self, settings: &Settings, store: SqliteStore, database: &dyn Database, ) -> Result<()> { let encryption_key: [u8; 32] = encryption::load_key(settings)?.into(); let host_id = Settings::host_id().expect("failed to get host_id"); let history_store = HistoryStore::new(store, host_id, encryption_key); history_store.build(database).await?; Ok(()) } async fn rebuild_dotfiles(&self, settings: &Settings, store: SqliteStore) -> Result<()> { let encryption_key: [u8; 32] = encryption::load_key(settings)?.into(); let host_id = Settings::host_id().expect("failed to get host_id"); let alias_store = AliasStore::new(store.clone(), host_id, encryption_key); let var_store = VarStore::new(store.clone(), host_id, encryption_key); alias_store.build().await?; var_store.build().await?; Ok(()) } async fn rebuild_scripts(&self, settings: &Settings, store: SqliteStore) -> Result<()> { let encryption_key: [u8; 32] = encryption::load_key(settings)?.into(); let host_id = Settings::host_id().expect("failed to get host_id"); let script_store = ScriptStore::new(store, host_id, encryption_key); let database = atuin_scripts::database::Database::new(settings.scripts.db_path.clone(), 1.0).await?; script_store.build(database).await?; Ok(()) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/store/pull.rs
crates/atuin/src/command/client/store/pull.rs
use clap::Args; use eyre::Result; use atuin_client::{ database::Database, record::store::Store, record::sync::Operation, record::{sqlite_store::SqliteStore, sync}, settings::Settings, }; #[derive(Args, Debug)] pub struct Pull { /// The tag to push (eg, 'history'). Defaults to all tags #[arg(long, short)] pub tag: Option<String>, /// Force push records /// This will first wipe the local store, and then download all records from the remote #[arg(long, default_value = "false")] pub force: bool, } impl Pull { pub async fn run( &self, settings: &Settings, store: SqliteStore, db: &dyn Database, ) -> Result<()> { if self.force { println!("Forcing local overwrite!"); println!("Clearing local store"); store.delete_all().await?; } // We can actually just use the existing diff/etc to push // 1. Diff // 2. Get operations // 3. Filter operations by // a) are they a download op? // b) are they for the host/tag we are pushing here? let (diff, _) = sync::diff(settings, &store).await?; let operations = sync::operations(diff, &store).await?; let operations = operations .into_iter() .filter(|op| match op { // No noops or downloads thx Operation::Noop { .. } | Operation::Upload { .. } => false, // pull, so yes plz to downloads! Operation::Download { tag, .. } => { if self.force { return true; } if let Some(t) = self.tag.clone() && t != *tag { return false; } true } }) .collect(); let (_, downloaded) = sync::sync_remote(operations, &store, settings).await?; println!("Downloaded {} records", downloaded.len()); crate::sync::build(settings, &store, db, Some(&downloaded)).await?; Ok(()) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/store/verify.rs
crates/atuin/src/command/client/store/verify.rs
use clap::Args; use eyre::Result; use atuin_client::{ encryption::load_key, record::{sqlite_store::SqliteStore, store::Store}, settings::Settings, }; #[derive(Args, Debug)] pub struct Verify {} impl Verify { pub async fn run(&self, settings: &Settings, store: SqliteStore) -> Result<()> { println!("Verifying local store can be decrypted with the current key"); let key = load_key(settings)?; match store.verify(&key.into()).await { Ok(()) => println!("Local store encryption verified OK"), Err(e) => println!("Failed to verify local store encryption: {e:?}"), } Ok(()) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/store/purge.rs
crates/atuin/src/command/client/store/purge.rs
use clap::Args; use eyre::Result; use atuin_client::{ encryption::load_key, record::{sqlite_store::SqliteStore, store::Store}, settings::Settings, }; #[derive(Args, Debug)] pub struct Purge {} impl Purge { pub async fn run(&self, settings: &Settings, store: SqliteStore) -> Result<()> { println!("Purging local records that cannot be decrypted"); let key = load_key(settings)?; match store.purge(&key.into()).await { Ok(()) => println!("Local store purge completed OK"), Err(e) => println!("Failed to purge local store: {e:?}"), } Ok(()) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/dotfiles/alias.rs
crates/atuin/src/command/client/dotfiles/alias.rs
use clap::Subcommand; use eyre::{Context, Result, eyre}; use atuin_client::{encryption, record::sqlite_store::SqliteStore, settings::Settings}; use atuin_dotfiles::{shell::Alias, store::AliasStore}; #[derive(Subcommand, Debug)] #[command(infer_subcommands = true)] pub enum Cmd { /// Set an alias Set { name: String, value: String }, /// Delete an alias Delete { name: String }, /// List all aliases List, /// Delete all aliases Clear, // There are too many edge cases to parse at the moment. Disable for now. // Import, } impl Cmd { async fn set(&self, store: &AliasStore, name: String, value: String) -> Result<()> { let illegal_char = regex::Regex::new("[ \t\n&();<>|\\\"'`$/]").unwrap(); if illegal_char.is_match(name.as_str()) { return Err(eyre!("Illegal character in alias name")); } let aliases = store.aliases().await?; let found: Vec<Alias> = aliases.into_iter().filter(|a| a.name == name).collect(); if found.is_empty() { println!("Aliasing '{name}={value}'."); } else { println!( "Overwriting alias '{name}={}' with '{name}={value}'.", found[0].value ); } store.set(&name, &value).await?; Ok(()) } async fn list(&self, store: &AliasStore) -> Result<()> { let aliases = store.aliases().await?; for i in aliases { println!("{}={}", i.name, i.value); } Ok(()) } async fn clear(&self, store: &AliasStore) -> Result<()> { let aliases = store.aliases().await?; for i in aliases { self.delete(store, i.name).await?; } Ok(()) } async fn delete(&self, store: &AliasStore, name: String) -> Result<()> { let mut aliases = store.aliases().await?.into_iter(); if let Some(alias) = aliases.find(|alias| alias.name == name) { println!("Deleting '{name}={}'.", alias.value); store.delete(&name).await?; } else { eprintln!("Cannot delete '{name}': Alias not set."); } Ok(()) } /* async fn import(&self, store: &AliasStore) -> Result<()> { let aliases = atuin_dotfiles::shell::import_aliases(store).await?; for i in aliases { println!("Importing {}={}", i.name, i.value); } Ok(()) } */ pub async fn run(&self, settings: &Settings, store: SqliteStore) -> Result<()> { if !settings.dotfiles.enabled { eprintln!( "Dotfiles are not enabled. Add\n\n[dotfiles]\nenabled = true\n\nto your configuration file to enable them.\n" ); eprintln!("The default configuration file is located at ~/.config/atuin/config.toml."); return Ok(()); } let encryption_key: [u8; 32] = encryption::load_key(settings) .context("could not load encryption key")? .into(); let host_id = Settings::host_id().expect("failed to get host_id"); let alias_store = AliasStore::new(store, host_id, encryption_key); match self { Self::Set { name, value } => self.set(&alias_store, name.clone(), value.clone()).await, Self::Delete { name } => self.delete(&alias_store, name.clone()).await, Self::List => self.list(&alias_store).await, Self::Clear => self.clear(&alias_store).await, } } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/dotfiles/var.rs
crates/atuin/src/command/client/dotfiles/var.rs
use clap::Subcommand; use eyre::{Context, Result}; use atuin_client::{encryption, record::sqlite_store::SqliteStore, settings::Settings}; use atuin_dotfiles::{shell::Var, store::var::VarStore}; #[derive(Subcommand, Debug)] #[command(infer_subcommands = true)] pub enum Cmd { /// Set a variable Set { name: String, value: String, #[clap(long, short, action)] no_export: bool, }, /// Delete a variable Delete { name: String }, /// List all variables List, } impl Cmd { async fn set(&self, store: VarStore, name: String, value: String, export: bool) -> Result<()> { let vars = store.vars().await?; let found: Vec<Var> = vars.into_iter().filter(|a| a.name == name).collect(); let show_export = if export { "export " } else { "" }; if found.is_empty() { println!("Setting '{show_export}{name}={value}'."); } else { println!( "Overwriting alias '{show_export}{name}={}' with '{name}={value}'.", found[0].value ); } store.set(&name, &value, export).await?; Ok(()) } async fn list(&self, store: VarStore) -> Result<()> { let vars = store.vars().await?; for i in vars.iter().filter(|v| !v.export) { println!("{}={}", i.name, i.value); } for i in vars.iter().filter(|v| v.export) { println!("export {}={}", i.name, i.value); } Ok(()) } async fn delete(&self, store: VarStore, name: String) -> Result<()> { let mut vars = store.vars().await?.into_iter(); if let Some(var) = vars.find(|var| var.name == name) { println!("Deleting '{name}={}'.", var.value); store.delete(&name).await?; } else { eprintln!("Cannot delete '{name}': Var not set."); } Ok(()) } pub async fn run(&self, settings: &Settings, store: SqliteStore) -> Result<()> { if !settings.dotfiles.enabled { eprintln!( "Dotfiles are not enabled. Add\n\n[dotfiles]\nenabled = true\n\nto your configuration file to enable them.\n" ); eprintln!("The default configuration file is located at ~/.config/atuin/config.toml."); return Ok(()); } let encryption_key: [u8; 32] = encryption::load_key(settings) .context("could not load encryption key")? .into(); let host_id = Settings::host_id().expect("failed to get host_id"); let var_store = VarStore::new(store, host_id, encryption_key); match self { Self::Set { name, value, no_export, } => { self.set(var_store, name.clone(), value.clone(), !no_export) .await } Self::Delete { name } => self.delete(var_store, name.clone()).await, Self::List => self.list(var_store).await, } } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/account/register.rs
crates/atuin/src/command/client/account/register.rs
use clap::Parser; use eyre::{Result, bail}; use tokio::{fs::File, io::AsyncWriteExt}; use atuin_client::{api_client, settings::Settings}; #[derive(Parser, Debug)] pub struct Cmd { #[clap(long, short)] pub username: Option<String>, #[clap(long, short)] pub password: Option<String>, #[clap(long, short)] pub email: Option<String>, } impl Cmd { pub async fn run(self, settings: &Settings) -> Result<()> { run(settings, self.username, self.email, self.password).await } } pub async fn run( settings: &Settings, username: Option<String>, email: Option<String>, password: Option<String>, ) -> Result<()> { use super::login::or_user_input; println!("Registering for an Atuin Sync account"); let username = or_user_input(username, "username"); let email = or_user_input(email, "email"); let password = password .clone() .unwrap_or_else(super::login::read_user_password); if password.is_empty() { bail!("please provide a password"); } let session = api_client::register(settings.sync_address.as_str(), &username, &email, &password).await?; let path = settings.session_path.as_str(); let mut file = File::create(path).await?; file.write_all(session.session.as_bytes()).await?; let _key = atuin_client::encryption::load_key(settings)?; println!( "Registration successful! Please make a note of your key (run 'atuin key') and keep it safe." ); println!( "You will need it to log in on other devices, and we cannot help recover it if you lose it." ); Ok(()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/account/login.rs
crates/atuin/src/command/client/account/login.rs
use std::{io, path::PathBuf}; use clap::Parser; use eyre::{Context, Result, bail}; use tokio::{fs::File, io::AsyncWriteExt}; use atuin_client::{ api_client, encryption::{Key, decode_key, encode_key, load_key}, record::sqlite_store::SqliteStore, record::store::Store, settings::Settings, }; use atuin_common::api::LoginRequest; use rpassword::prompt_password; #[derive(Parser, Debug)] pub struct Cmd { #[clap(long, short)] pub username: Option<String>, #[clap(long, short)] pub password: Option<String>, /// The encryption key for your account #[clap(long, short)] pub key: Option<String>, } fn get_input() -> Result<String> { let mut input = String::new(); io::stdin().read_line(&mut input)?; Ok(input.trim_end_matches(&['\r', '\n'][..]).to_string()) } impl Cmd { pub async fn run(&self, settings: &Settings, store: &SqliteStore) -> Result<()> { // TODO(ellie): Replace this with a call to atuin_client::login::login // The reason I haven't done this yet is that this implementation allows for // an empty key. This will use an existing key file. // // I'd quite like to ditch that behaviour, so have not brought it into the library // function. if settings.logged_in() { bail!( "You are already logged in! Please run 'atuin logout' if you wish to login again" ); } let username = or_user_input(self.username.clone(), "username"); let password = self.password.clone().unwrap_or_else(read_user_password); let key_path = settings.key_path.as_str(); let key_path = PathBuf::from(key_path); println!("IMPORTANT"); println!( "If you are already logged in on another machine, you must ensure that the key you use here is the same as the key you used there." ); println!("You can find your key by running 'atuin key' on the other machine"); println!("Do not share this key with anyone"); println!("\nRead more here: https://docs.atuin.sh/guide/sync/#login \n"); let key = or_user_input( self.key.clone(), "encryption key [blank to use existing key file]", ); // if provided, the key may be EITHER base64, or a bip mnemonic // try to normalize on base64 let key = if key.is_empty() { key } else { // try parse the key as a mnemonic... match bip39::Mnemonic::from_phrase(&key, bip39::Language::English) { Ok(mnemonic) => encode_key(Key::from_slice(mnemonic.entropy()))?, Err(err) => { match err.downcast_ref::<bip39::ErrorKind>() { Some(err) => { match err { // assume they copied in the base64 key bip39::ErrorKind::InvalidWord => key, bip39::ErrorKind::InvalidChecksum => { bail!("key mnemonic was not valid") } bip39::ErrorKind::InvalidKeysize(_) | bip39::ErrorKind::InvalidWordLength(_) | bip39::ErrorKind::InvalidEntropyLength(_, _) => { bail!("key was not the correct length") } } } _ => { // unknown error. assume they copied the base64 key key } } } } }; // I've simplified this a little, but it could really do with a refactor // Annoyingly, it's also very important to get it correct if key.is_empty() { if key_path.exists() { let bytes = fs_err::read_to_string(key_path) .context("existing key file couldn't be read")?; if decode_key(bytes).is_err() { bail!("the key in existing key file was invalid"); } } else { panic!( "No key provided. Please use 'atuin key' on your other machine, or recover your key from a backup." ) } } else if !key_path.exists() { if decode_key(key.clone()).is_err() { bail!("the specified key was invalid"); } let mut file = File::create(key_path).await?; file.write_all(key.as_bytes()).await?; } else { // we now know that the user has logged in specifying a key, AND that the key path // exists // 1. check if the saved key and the provided key match. if so, nothing to do. // 2. if not, re-encrypt the local history and overwrite the key let current_key: [u8; 32] = load_key(settings)?.into(); let encoded = key.clone(); // gonna want to save it in a bit let new_key: [u8; 32] = decode_key(key) .context("could not decode provided key - is not valid base64")? .into(); if new_key != current_key { println!("\nRe-encrypting local store with new key"); store.re_encrypt(&current_key, &new_key).await?; println!("Writing new key"); let mut file = File::create(key_path).await?; file.write_all(encoded.as_bytes()).await?; } } let session = api_client::login( settings.sync_address.as_str(), LoginRequest { username, password }, ) .await?; let session_path = settings.session_path.as_str(); let mut file = File::create(session_path).await?; file.write_all(session.session.as_bytes()).await?; println!("Logged in!"); Ok(()) } } pub(super) fn or_user_input(value: Option<String>, name: &'static str) -> String { value.unwrap_or_else(|| read_user_input(name)) } pub(super) fn read_user_password() -> String { let password = prompt_password("Please enter password: "); password.expect("Failed to read from input") } fn read_user_input(name: &'static str) -> String { eprint!("Please enter {name}: "); get_input().expect("Failed to read from input") } #[cfg(test)] mod tests { use atuin_client::encryption::Key; #[test] fn mnemonic_round_trip() { let key = Key::from([ 3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3, 2, 3, 8, 4, 6, 2, 6, 4, 3, 3, 8, 3, 2, 7, 9, 5, ]); let phrase = bip39::Mnemonic::from_entropy(&key, bip39::Language::English) .unwrap() .into_phrase(); let mnemonic = bip39::Mnemonic::from_phrase(&phrase, bip39::Language::English).unwrap(); assert_eq!(mnemonic.entropy(), key.as_slice()); assert_eq!( phrase, "adapt amused able anxiety mother adapt beef gaze amount else seat alcohol cage lottery avoid scare alcohol cactus school avoid coral adjust catch pink" ); } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/account/verify.rs
crates/atuin/src/command/client/account/verify.rs
use clap::Parser; use eyre::Result; use atuin_client::{api_client, settings::Settings}; #[derive(Parser, Debug)] pub struct Cmd { #[clap(long, short)] pub token: Option<String>, } impl Cmd { pub async fn run(self, settings: &Settings) -> Result<()> { run(settings, self.token).await } } pub async fn run(settings: &Settings, token: Option<String>) -> Result<()> { let client = api_client::Client::new( &settings.sync_address, settings.session_token()?.as_str(), settings.network_connect_timeout, settings.network_timeout, )?; let (email_sent, verified) = client.verify(token).await?; match (email_sent, verified) { (true, false) => { println!("Verification sent! Please check your inbox"); } (false, true) => { println!("Your account is verified"); } (false, false) => { println!( "Your Atuin server does not have mail setup. This is not required, though your account cannot be verified. Speak to your admin." ); } _ => { println!( "Invalid email and verification status. This is a bug. Please open an issue: https://github.com/atuinsh/atuin" ); } } Ok(()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/account/change_password.rs
crates/atuin/src/command/client/account/change_password.rs
use clap::Parser; use eyre::{Result, bail}; use atuin_client::{api_client, settings::Settings}; use rpassword::prompt_password; #[derive(Parser, Debug)] pub struct Cmd { #[clap(long, short)] pub current_password: Option<String>, #[clap(long, short)] pub new_password: Option<String>, } impl Cmd { pub async fn run(self, settings: &Settings) -> Result<()> { run(settings, self.current_password, self.new_password).await } } pub async fn run( settings: &Settings, current_password: Option<String>, new_password: Option<String>, ) -> Result<()> { let client = api_client::Client::new( &settings.sync_address, settings.session_token()?.as_str(), settings.network_connect_timeout, settings.network_timeout, )?; let current_password = current_password.clone().unwrap_or_else(|| { prompt_password("Please enter the current password: ").expect("Failed to read from input") }); if current_password.is_empty() { bail!("please provide the current password"); } let new_password = new_password.clone().unwrap_or_else(|| { prompt_password("Please enter the new password: ").expect("Failed to read from input") }); if new_password.is_empty() { bail!("please provide a new password"); } client .change_password(current_password, new_password) .await?; println!("Account password successfully changed!"); Ok(()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/account/logout.rs
crates/atuin/src/command/client/account/logout.rs
use atuin_client::settings::Settings; use eyre::Result; pub fn run(settings: &Settings) -> Result<()> { atuin_client::logout::logout(settings) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/account/delete.rs
crates/atuin/src/command/client/account/delete.rs
use atuin_client::{api_client, settings::Settings}; use eyre::{Result, bail}; use std::fs::remove_file; use std::path::PathBuf; pub async fn run(settings: &Settings) -> Result<()> { let session_path = settings.session_path.as_str(); if !PathBuf::from(session_path).exists() { bail!("You are not logged in"); } let client = api_client::Client::new( &settings.sync_address, settings.session_token()?.as_str(), settings.network_connect_timeout, settings.network_timeout, )?; client.delete().await?; // Fixes stale session+key when account is deleted via CLI. if PathBuf::from(session_path).exists() { remove_file(PathBuf::from(session_path))?; } println!("Your account is deleted"); Ok(()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/search/cursor.rs
crates/atuin/src/command/client/search/cursor.rs
use atuin_client::settings::WordJumpMode; pub struct Cursor { source: String, index: usize, } impl From<String> for Cursor { fn from(source: String) -> Self { Self { source, index: 0 } } } pub struct WordJumper<'a> { word_chars: &'a str, word_jump_mode: WordJumpMode, } impl WordJumper<'_> { fn is_word_boundary(&self, c: char, next_c: char) -> bool { (c.is_whitespace() && !next_c.is_whitespace()) || (!c.is_whitespace() && next_c.is_whitespace()) || (self.word_chars.contains(c) && !self.word_chars.contains(next_c)) || (!self.word_chars.contains(c) && self.word_chars.contains(next_c)) } fn emacs_get_next_word_pos(&self, source: &str, index: usize) -> usize { let index = (index + 1..source.len().saturating_sub(1)) .find(|&i| self.word_chars.contains(source.chars().nth(i).unwrap())) .unwrap_or(source.len()); (index + 1..source.len().saturating_sub(1)) .find(|&i| !self.word_chars.contains(source.chars().nth(i).unwrap())) .unwrap_or(source.len()) } fn emacs_get_prev_word_pos(&self, source: &str, index: usize) -> usize { let index = (1..index) .rev() .find(|&i| self.word_chars.contains(source.chars().nth(i).unwrap())) .unwrap_or(0); (1..index) .rev() .find(|&i| !self.word_chars.contains(source.chars().nth(i).unwrap())) .map_or(0, |i| i + 1) } fn subl_get_next_word_pos(&self, source: &str, index: usize) -> usize { let index = (index..source.len().saturating_sub(1)).find(|&i| { self.is_word_boundary( source.chars().nth(i).unwrap(), source.chars().nth(i + 1).unwrap(), ) }); if index.is_none() { return source.len(); } (index.unwrap() + 1..source.len()) .find(|&i| !source.chars().nth(i).unwrap().is_whitespace()) .unwrap_or(source.len()) } fn subl_get_prev_word_pos(&self, source: &str, index: usize) -> usize { let index = (1..index) .rev() .find(|&i| !source.chars().nth(i).unwrap().is_whitespace()); if index.is_none() { return 0; } (1..index.unwrap()) .rev() .find(|&i| { self.is_word_boundary( source.chars().nth(i - 1).unwrap(), source.chars().nth(i).unwrap(), ) }) .unwrap_or(0) } fn get_next_word_pos(&self, source: &str, index: usize) -> usize { match self.word_jump_mode { WordJumpMode::Emacs => self.emacs_get_next_word_pos(source, index), WordJumpMode::Subl => self.subl_get_next_word_pos(source, index), } } fn get_prev_word_pos(&self, source: &str, index: usize) -> usize { match self.word_jump_mode { WordJumpMode::Emacs => self.emacs_get_prev_word_pos(source, index), WordJumpMode::Subl => self.subl_get_prev_word_pos(source, index), } } } impl Cursor { pub fn as_str(&self) -> &str { self.source.as_str() } pub fn into_inner(self) -> String { self.source } /// Returns the string before the cursor pub fn substring(&self) -> &str { &self.source[..self.index] } /// Returns the currently selected [`char`] pub fn char(&self) -> Option<char> { self.source[self.index..].chars().next() } pub fn right(&mut self) { if self.index < self.source.len() { loop { self.index += 1; if self.source.is_char_boundary(self.index) { break; } } } } pub fn left(&mut self) -> bool { if self.index > 0 { loop { self.index -= 1; if self.source.is_char_boundary(self.index) { break true; } } } else { false } } pub fn next_word(&mut self, word_chars: &str, word_jump_mode: WordJumpMode) { let word_jumper = WordJumper { word_chars, word_jump_mode, }; self.index = word_jumper.get_next_word_pos(&self.source, self.index); } pub fn prev_word(&mut self, word_chars: &str, word_jump_mode: WordJumpMode) { let word_jumper = WordJumper { word_chars, word_jump_mode, }; self.index = word_jumper.get_prev_word_pos(&self.source, self.index); } pub fn insert(&mut self, c: char) { self.source.insert(self.index, c); self.index += c.len_utf8(); } pub fn remove(&mut self) -> Option<char> { if self.index < self.source.len() { Some(self.source.remove(self.index)) } else { None } } pub fn remove_next_word(&mut self, word_chars: &str, word_jump_mode: WordJumpMode) { let word_jumper = WordJumper { word_chars, word_jump_mode, }; let next_index = word_jumper.get_next_word_pos(&self.source, self.index); self.source.replace_range(self.index..next_index, ""); } pub fn remove_prev_word(&mut self, word_chars: &str, word_jump_mode: WordJumpMode) { let word_jumper = WordJumper { word_chars, word_jump_mode, }; let next_index = word_jumper.get_prev_word_pos(&self.source, self.index); self.source.replace_range(next_index..self.index, ""); self.index = next_index; } pub fn back(&mut self) -> Option<char> { if self.left() { self.remove() } else { None } } pub fn clear(&mut self) { self.source.clear(); self.index = 0; } pub fn end(&mut self) { self.index = self.source.len(); } pub fn start(&mut self) { self.index = 0; } pub fn position(&self) -> usize { self.index } } #[cfg(test)] mod cursor_tests { use super::Cursor; use super::*; static EMACS_WORD_JUMPER: WordJumper = WordJumper { word_chars: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789", word_jump_mode: WordJumpMode::Emacs, }; static SUBL_WORD_JUMPER: WordJumper = WordJumper { word_chars: "./\\()\"'-:,.;<>~!@#$%^&*|+=[]{}`~?", word_jump_mode: WordJumpMode::Subl, }; #[test] fn right() { // ö is 2 bytes let mut c = Cursor::from(String::from("öaöböcödöeöfö")); let indices = [0, 2, 3, 5, 6, 8, 9, 11, 12, 14, 15, 17, 18, 20, 20, 20, 20]; for i in indices { assert_eq!(c.index, i); c.right(); } } #[test] fn left() { // ö is 2 bytes let mut c = Cursor::from(String::from("öaöböcödöeöfö")); c.end(); let indices = [20, 18, 17, 15, 14, 12, 11, 9, 8, 6, 5, 3, 2, 0, 0, 0, 0]; for i in indices { assert_eq!(c.index, i); c.left(); } } #[test] fn test_emacs_get_next_word_pos() { let s = String::from(" aaa ((()))bbb ((())) "); let indices = [(0, 6), (3, 6), (7, 18), (19, 30)]; for (i_src, i_dest) in indices { assert_eq!(EMACS_WORD_JUMPER.get_next_word_pos(&s, i_src), i_dest); } assert_eq!(EMACS_WORD_JUMPER.get_next_word_pos("", 0), 0); } #[test] fn test_emacs_get_prev_word_pos() { let s = String::from(" aaa ((()))bbb ((())) "); let indices = [(30, 15), (29, 15), (15, 3), (3, 0)]; for (i_src, i_dest) in indices { assert_eq!(EMACS_WORD_JUMPER.get_prev_word_pos(&s, i_src), i_dest); } assert_eq!(EMACS_WORD_JUMPER.get_prev_word_pos("", 0), 0); } #[test] fn test_subl_get_next_word_pos() { let s = String::from(" aaa ((()))bbb ((())) "); let indices = [(0, 3), (1, 3), (3, 9), (9, 15), (15, 21), (21, 30)]; for (i_src, i_dest) in indices { assert_eq!(SUBL_WORD_JUMPER.get_next_word_pos(&s, i_src), i_dest); } assert_eq!(SUBL_WORD_JUMPER.get_next_word_pos("", 0), 0); } #[test] fn test_subl_get_prev_word_pos() { let s = String::from(" aaa ((()))bbb ((())) "); let indices = [(30, 21), (21, 15), (15, 9), (9, 3), (3, 0)]; for (i_src, i_dest) in indices { assert_eq!(SUBL_WORD_JUMPER.get_prev_word_pos(&s, i_src), i_dest); } assert_eq!(SUBL_WORD_JUMPER.get_prev_word_pos("", 0), 0); } #[test] fn pop() { let mut s = String::from("öaöböcödöeöfö"); let mut c = Cursor::from(s.clone()); c.end(); while !s.is_empty() { let c1 = s.pop(); let c2 = c.back(); assert_eq!(c1, c2); assert_eq!(s.as_str(), c.substring()); } let c1 = s.pop(); let c2 = c.back(); assert_eq!(c1, c2); } #[test] fn back() { let mut c = Cursor::from(String::from("öaöböcödöeöfö")); // move to ^ for _ in 0..4 { c.right(); } assert_eq!(c.substring(), "öaöb"); assert_eq!(c.back(), Some('b')); assert_eq!(c.back(), Some('ö')); assert_eq!(c.back(), Some('a')); assert_eq!(c.back(), Some('ö')); assert_eq!(c.back(), None); assert_eq!(c.as_str(), "öcödöeöfö"); } #[test] fn insert() { let mut c = Cursor::from(String::from("öaöböcödöeöfö")); // move to ^ for _ in 0..4 { c.right(); } assert_eq!(c.substring(), "öaöb"); c.insert('ö'); c.insert('g'); c.insert('ö'); c.insert('h'); assert_eq!(c.substring(), "öaöbögöh"); assert_eq!(c.as_str(), "öaöbögöhöcödöeöfö"); } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/search/engines.rs
crates/atuin/src/command/client/search/engines.rs
use async_trait::async_trait; use atuin_client::{ database::{Context, Database}, history::History, settings::{FilterMode, SearchMode, Settings}, }; use eyre::Result; use super::cursor::Cursor; pub mod db; pub mod skim; pub fn engine(search_mode: SearchMode) -> Box<dyn SearchEngine> { match search_mode { SearchMode::Skim => Box::new(skim::Search::new()) as Box<_>, mode => Box::new(db::Search(mode)) as Box<_>, } } pub struct SearchState { pub input: Cursor, pub filter_mode: FilterMode, pub context: Context, } impl SearchState { pub(crate) fn rotate_filter_mode(&mut self, settings: &Settings, offset: isize) { let mut i = settings .search .filters .iter() .position(|&m| m == self.filter_mode) .unwrap_or_default(); for _ in 0..settings.search.filters.len() { i = (i.wrapping_add_signed(offset)) % settings.search.filters.len(); let mode = settings.search.filters[i]; if self.filter_mode_available(mode, settings) { self.filter_mode = mode; break; } } } fn filter_mode_available(&self, mode: FilterMode, settings: &Settings) -> bool { match mode { FilterMode::Workspace => settings.workspaces && self.context.git_root.is_some(), _ => true, } } } #[async_trait] pub trait SearchEngine: Send + Sync + 'static { async fn full_query( &mut self, state: &SearchState, db: &mut dyn Database, ) -> Result<Vec<History>>; async fn query(&mut self, state: &SearchState, db: &mut dyn Database) -> Result<Vec<History>> { if state.input.as_str().is_empty() { Ok(db .list(&[state.filter_mode], &state.context, Some(200), true, false) .await? .into_iter() .collect::<Vec<_>>()) } else { self.full_query(state, db).await } } fn get_highlight_indices(&self, command: &str, search_input: &str) -> Vec<usize>; }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/search/history_list.rs
crates/atuin/src/command/client/search/history_list.rs
use std::time::Duration; use super::duration::format_duration; use super::engines::SearchEngine; use atuin_client::{ history::History, theme::{Meaning, Theme}, }; use atuin_common::utils::Escapable as _; use itertools::Itertools; use ratatui::{ buffer::Buffer, crossterm::style, layout::Rect, style::{Modifier, Style}, widgets::{Block, StatefulWidget, Widget}, }; use time::OffsetDateTime; pub struct HistoryHighlighter<'a> { pub engine: &'a dyn SearchEngine, pub search_input: &'a str, } impl HistoryHighlighter<'_> { pub fn get_highlight_indices(&self, command: &str) -> Vec<usize> { self.engine .get_highlight_indices(command, self.search_input) } } pub struct HistoryList<'a> { history: &'a [History], block: Option<Block<'a>>, inverted: bool, /// Apply an alternative highlighting to the selected row alternate_highlight: bool, now: &'a dyn Fn() -> OffsetDateTime, indicator: &'a str, theme: &'a Theme, history_highlighter: HistoryHighlighter<'a>, show_numeric_shortcuts: bool, } #[derive(Default)] pub struct ListState { offset: usize, selected: usize, max_entries: usize, } impl ListState { pub fn selected(&self) -> usize { self.selected } pub fn max_entries(&self) -> usize { self.max_entries } pub fn select(&mut self, index: usize) { self.selected = index; } } impl StatefulWidget for HistoryList<'_> { type State = ListState; fn render(mut self, area: Rect, buf: &mut Buffer, state: &mut Self::State) { let list_area = self.block.take().map_or(area, |b| { let inner_area = b.inner(area); b.render(area, buf); inner_area }); if list_area.width < 1 || list_area.height < 1 || self.history.is_empty() { return; } let list_height = list_area.height as usize; let (start, end) = self.get_items_bounds(state.selected, state.offset, list_height); state.offset = start; state.max_entries = end - start; let mut s = DrawState { buf, list_area, x: 0, y: 0, state, inverted: self.inverted, alternate_highlight: self.alternate_highlight, now: &self.now, indicator: self.indicator, theme: self.theme, history_highlighter: self.history_highlighter, show_numeric_shortcuts: self.show_numeric_shortcuts, }; for item in self.history.iter().skip(state.offset).take(end - start) { s.index(); s.duration(item); s.time(item); s.command(item); // reset line s.y += 1; s.x = 0; } } } impl<'a> HistoryList<'a> { #[allow(clippy::too_many_arguments)] pub fn new( history: &'a [History], inverted: bool, alternate_highlight: bool, now: &'a dyn Fn() -> OffsetDateTime, indicator: &'a str, theme: &'a Theme, history_highlighter: HistoryHighlighter<'a>, show_numeric_shortcuts: bool, ) -> Self { Self { history, block: None, inverted, alternate_highlight, now, indicator, theme, history_highlighter, show_numeric_shortcuts, } } pub fn block(mut self, block: Block<'a>) -> Self { self.block = Some(block); self } fn get_items_bounds(&self, selected: usize, offset: usize, height: usize) -> (usize, usize) { let offset = offset.min(self.history.len().saturating_sub(1)); let max_scroll_space = height.min(10).min(self.history.len() - selected); if offset + height < selected + max_scroll_space { let end = selected + max_scroll_space; (end - height, end) } else if selected < offset { (selected, selected + height) } else { (offset, offset + height) } } } struct DrawState<'a> { buf: &'a mut Buffer, list_area: Rect, x: u16, y: u16, state: &'a ListState, inverted: bool, alternate_highlight: bool, now: &'a dyn Fn() -> OffsetDateTime, indicator: &'a str, theme: &'a Theme, history_highlighter: HistoryHighlighter<'a>, show_numeric_shortcuts: bool, } // longest line prefix I could come up with #[allow(clippy::cast_possible_truncation)] // we know that this is <65536 length pub const PREFIX_LENGTH: u16 = " > 123ms 59s ago".len() as u16; static SPACES: &str = " "; static _ASSERT: () = assert!(SPACES.len() == PREFIX_LENGTH as usize); // these encode the slices of `" > "`, `" {n} "`, or `" "` in a compact form. // Yes, this is a hack, but it makes me feel happy static SLICES: &str = " > 1 2 3 4 5 6 7 8 9 "; impl DrawState<'_> { fn index(&mut self) { if !self.show_numeric_shortcuts { let i = self.y as usize + self.state.offset; let is_selected = i == self.state.selected(); let prompt: &str = if is_selected { self.indicator } else { " " }; self.draw(prompt, Style::default()); return; } // these encode the slices of `" > "`, `" {n} "`, or `" "` in a compact form. // Yes, this is a hack, but it makes me feel happy let i = self.y as usize + self.state.offset; let i = i.checked_sub(self.state.selected); let i = i.unwrap_or(10).min(10) * 2; let prompt: &str = if i == 0 { self.indicator } else { &SLICES[i..i + 3] }; self.draw(prompt, Style::default()); } fn duration(&mut self, h: &History) { let status = self.theme.as_style(if h.success() { Meaning::AlertInfo } else { Meaning::AlertError }); let duration = Duration::from_nanos(u64::try_from(h.duration).unwrap_or(0)); self.draw(&format_duration(duration), status.into()); } #[allow(clippy::cast_possible_truncation)] // we know that time.len() will be <6 fn time(&mut self, h: &History) { let style = self.theme.as_style(Meaning::Guidance); // Account for the chance that h.timestamp is "in the future" // This would mean that "since" is negative, and the unwrap here // would fail. // If the timestamp would otherwise be in the future, display // the time since as 0. let since = (self.now)() - h.timestamp; let time = format_duration(since.try_into().unwrap_or_default()); // pad the time a little bit before we write. this aligns things nicely // skip padding if for some reason it is already too long to align nicely let padding = usize::from(PREFIX_LENGTH).saturating_sub(usize::from(self.x) + 4 + time.len()); self.draw(&SPACES[..padding], Style::default()); self.draw(&time, style.into()); self.draw(" ago", style.into()); } fn command(&mut self, h: &History) { let mut style = self.theme.as_style(Meaning::Base); let mut row_highlighted = false; if !self.alternate_highlight && (self.y as usize + self.state.offset == self.state.selected) { row_highlighted = true; // if not applying alternative highlighting to the whole row, color the command style = self.theme.as_style(Meaning::AlertError); style.attributes.set(style::Attribute::Bold); } let highlight_indices = self.history_highlighter.get_highlight_indices( h.command .escape_control() .split_ascii_whitespace() .join(" ") .as_str(), ); let mut pos = 0; for section in h.command.escape_control().split_ascii_whitespace() { self.draw(" ", style.into()); for ch in section.chars() { if self.x > self.list_area.width { // Avoid attempting to draw a command section beyond the width // of the list return; } let mut style = style; if highlight_indices.contains(&pos) { if row_highlighted { // if the row is highlighted bold is not enough as the whole row is bold // change the color too style = self.theme.as_style(Meaning::AlertWarn); } style.attributes.set(style::Attribute::Bold); } let s = ch.to_string(); self.draw(&s, style.into()); pos += s.len(); } pos += 1; } } fn draw(&mut self, s: &str, mut style: Style) { let cx = self.list_area.left() + self.x; let cy = if self.inverted { self.list_area.top() + self.y } else { self.list_area.bottom() - self.y - 1 }; if self.alternate_highlight && (self.y as usize + self.state.offset == self.state.selected) { style = style.add_modifier(Modifier::REVERSED); } let w = (self.list_area.width - self.x) as usize; self.x += self.buf.set_stringn(cx, cy, s, w, style).0 - cx; } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/search/interactive.rs
crates/atuin/src/command/client/search/interactive.rs
use std::{ io::{Write, stdout}, time::Duration, }; use atuin_common::{shell::Shell, utils::Escapable as _}; use eyre::Result; use futures_util::FutureExt; use semver::Version; use time::OffsetDateTime; use unicode_width::UnicodeWidthStr; use super::{ cursor::Cursor, engines::{SearchEngine, SearchState}, history_list::{HistoryList, ListState, PREFIX_LENGTH}, }; use atuin_client::{ database::{Database, current_context}, history::{History, HistoryId, HistoryStats, store::HistoryStore}, settings::{CursorStyle, ExitMode, KeymapMode, PreviewStrategy, SearchMode, Settings}, }; use crate::command::client::search::history_list::HistoryHighlighter; use crate::command::client::theme::{Meaning, Theme}; use crate::{VERSION, command::client::search::engines}; use ratatui::{ Frame, Terminal, TerminalOptions, Viewport, backend::CrosstermBackend, crossterm::{ cursor::SetCursorStyle, event::{ self, DisableMouseCapture, EnableMouseCapture, Event, KeyCode, KeyEvent, KeyModifiers, MouseEvent, }, execute, terminal, }, layout::{Alignment, Constraint, Direction, Layout}, prelude::*, style::{Modifier, Style}, text::{Line, Span, Text}, widgets::{Block, BorderType, Borders, Padding, Paragraph, Tabs, block::Title}, }; #[cfg(not(target_os = "windows"))] use ratatui::crossterm::event::{ KeyboardEnhancementFlags, PopKeyboardEnhancementFlags, PushKeyboardEnhancementFlags, }; const TAB_TITLES: [&str; 2] = ["Search", "Inspect"]; pub enum InputAction { Accept(usize), AcceptInspecting, Copy(usize), Delete(usize), ReturnOriginal, ReturnQuery, Continue, Redraw, } #[derive(Clone)] pub struct InspectingState { current: Option<HistoryId>, next: Option<HistoryId>, previous: Option<HistoryId>, } impl InspectingState { pub fn move_to_previous(&mut self) { let previous = self.previous.clone(); self.reset(); self.current = previous; } pub fn move_to_next(&mut self) { let next = self.next.clone(); self.reset(); self.current = next; } pub fn reset(&mut self) { self.current = None; self.next = None; self.previous = None; } } pub fn to_compactness(f: &Frame, settings: &Settings) -> Compactness { if match settings.style { atuin_client::settings::Style::Auto => f.area().height < 14, atuin_client::settings::Style::Compact => true, atuin_client::settings::Style::Full => false, } { if settings.auto_hide_height != 0 && f.area().height <= settings.auto_hide_height { Compactness::Ultracompact } else { Compactness::Compact } } else { Compactness::Full } } #[allow(clippy::struct_field_names)] pub struct State { history_count: i64, update_needed: Option<Version>, results_state: ListState, switched_search_mode: bool, search_mode: SearchMode, results_len: usize, accept: bool, keymap_mode: KeymapMode, prefix: bool, current_cursor: Option<CursorStyle>, tab_index: usize, pub inspecting_state: InspectingState, search: SearchState, engine: Box<dyn SearchEngine>, now: Box<dyn Fn() -> OffsetDateTime + Send>, } #[derive(Clone, Copy)] pub enum Compactness { Ultracompact, Compact, Full, } #[derive(Clone, Copy)] struct StyleState { compactness: Compactness, invert: bool, inner_width: usize, } impl State { async fn query_results( &mut self, db: &mut dyn Database, smart_sort: bool, ) -> Result<Vec<History>> { let results = self.engine.query(&self.search, db).await?; self.inspecting_state = InspectingState { current: None, next: None, previous: None, }; self.results_state.select(0); self.results_len = results.len(); if smart_sort { Ok(atuin_history::sort::sort( self.search.input.as_str(), results, )) } else { Ok(results) } } fn handle_input<W>( &mut self, settings: &Settings, input: &Event, w: &mut W, ) -> Result<InputAction> where W: Write, { execute!(w, EnableMouseCapture)?; let r = match input { Event::Key(k) => self.handle_key_input(settings, k), Event::Mouse(m) => self.handle_mouse_input(*m), Event::Paste(d) => self.handle_paste_input(d), _ => InputAction::Continue, }; execute!(w, DisableMouseCapture)?; Ok(r) } fn handle_mouse_input(&mut self, input: MouseEvent) -> InputAction { match input.kind { event::MouseEventKind::ScrollDown => { self.scroll_down(1); } event::MouseEventKind::ScrollUp => { self.scroll_up(1); } _ => {} } InputAction::Continue } fn handle_paste_input(&mut self, input: &str) -> InputAction { for i in input.chars() { self.search.input.insert(i); } InputAction::Continue } fn cast_cursor_style(style: CursorStyle) -> SetCursorStyle { match style { CursorStyle::DefaultUserShape => SetCursorStyle::DefaultUserShape, CursorStyle::BlinkingBlock => SetCursorStyle::BlinkingBlock, CursorStyle::SteadyBlock => SetCursorStyle::SteadyBlock, CursorStyle::BlinkingUnderScore => SetCursorStyle::BlinkingUnderScore, CursorStyle::SteadyUnderScore => SetCursorStyle::SteadyUnderScore, CursorStyle::BlinkingBar => SetCursorStyle::BlinkingBar, CursorStyle::SteadyBar => SetCursorStyle::SteadyBar, } } fn set_keymap_cursor(&mut self, settings: &Settings, keymap_name: &str) { let cursor_style = if keymap_name == "__clear__" { None } else { settings.keymap_cursor.get(keymap_name).copied() } .or_else(|| self.current_cursor.map(|_| CursorStyle::DefaultUserShape)); if cursor_style != self.current_cursor && let Some(style) = cursor_style { self.current_cursor = cursor_style; let _ = execute!(stdout(), Self::cast_cursor_style(style)); } } pub fn initialize_keymap_cursor(&mut self, settings: &Settings) { match self.keymap_mode { KeymapMode::Emacs => self.set_keymap_cursor(settings, "emacs"), KeymapMode::VimNormal => self.set_keymap_cursor(settings, "vim_normal"), KeymapMode::VimInsert => self.set_keymap_cursor(settings, "vim_insert"), KeymapMode::Auto => {} } } pub fn finalize_keymap_cursor(&mut self, settings: &Settings) { match settings.keymap_mode_shell { KeymapMode::Emacs => self.set_keymap_cursor(settings, "emacs"), KeymapMode::VimNormal => self.set_keymap_cursor(settings, "vim_normal"), KeymapMode::VimInsert => self.set_keymap_cursor(settings, "vim_insert"), KeymapMode::Auto => self.set_keymap_cursor(settings, "__clear__"), } } fn handle_key_exit(settings: &Settings) -> InputAction { match settings.exit_mode { ExitMode::ReturnOriginal => InputAction::ReturnOriginal, ExitMode::ReturnQuery => InputAction::ReturnQuery, } } fn handle_key_input(&mut self, settings: &Settings, input: &KeyEvent) -> InputAction { if input.kind == event::KeyEventKind::Release { return InputAction::Continue; } let ctrl = input.modifiers.contains(KeyModifiers::CONTROL); let esc_allow_exit = !(self.tab_index == 0 && self.keymap_mode == KeymapMode::VimInsert); let cursor_at_end_of_line = self.search.input.position() == UnicodeWidthStr::width(self.search.input.as_str()); let cursor_at_start_of_line = self.search.input.position() == 0; // support ctrl-a prefix, like screen or tmux if !self.prefix && ctrl && input.code == KeyCode::Char(settings.keys.prefix.chars().next().unwrap_or('a')) { self.prefix = true; return InputAction::Continue; } // core input handling, common for all tabs let common: Option<InputAction> = match input.code { KeyCode::Char('c' | 'g') if ctrl => Some(InputAction::ReturnOriginal), KeyCode::Esc if esc_allow_exit => Some(Self::handle_key_exit(settings)), KeyCode::Char('[') if ctrl && esc_allow_exit => Some(Self::handle_key_exit(settings)), KeyCode::Tab => match self.tab_index { 0 => Some(InputAction::Accept(self.results_state.selected())), 1 => Some(InputAction::AcceptInspecting), _ => panic!("invalid tab index on input"), }, KeyCode::Right if cursor_at_end_of_line && settings.keys.accept_past_line_end => { Some(InputAction::Accept(self.results_state.selected())) } KeyCode::Left if cursor_at_start_of_line && settings.keys.accept_past_line_start => { Some(InputAction::Accept(self.results_state.selected())) } KeyCode::Left if cursor_at_start_of_line && settings.keys.exit_past_line_start => { Some(Self::handle_key_exit(settings)) } KeyCode::Backspace if cursor_at_start_of_line && settings.keys.accept_with_backspace => { Some(InputAction::Accept(self.results_state.selected())) } KeyCode::Char('o') if ctrl => { self.tab_index = (self.tab_index + 1) % TAB_TITLES.len(); Some(InputAction::Continue) } _ => None, }; if let Some(ret) = common { self.prefix = false; return ret; } // handle tab-specific input let action = match self.tab_index { 0 => self.handle_search_input(settings, input), 1 => super::inspector::input(self, settings, self.results_state.selected(), input), _ => panic!("invalid tab index on input"), }; self.prefix = false; action } fn handle_search_scroll_one_line( &mut self, settings: &Settings, enable_exit: bool, is_down: bool, ) -> InputAction { if is_down { if settings.keys.scroll_exits && enable_exit && self.results_state.selected() == 0 { return Self::handle_key_exit(settings); } self.scroll_down(1); } else { self.scroll_up(1); } InputAction::Continue } fn handle_search_up(&mut self, settings: &Settings, enable_exit: bool) -> InputAction { self.handle_search_scroll_one_line(settings, enable_exit, settings.invert) } fn handle_search_down(&mut self, settings: &Settings, enable_exit: bool) -> InputAction { self.handle_search_scroll_one_line(settings, enable_exit, !settings.invert) } fn handle_search_accept(&mut self, settings: &Settings) -> InputAction { if settings.enter_accept { self.accept = true; } InputAction::Accept(self.results_state.selected()) } #[allow(clippy::too_many_lines)] #[allow(clippy::cognitive_complexity)] fn handle_search_input(&mut self, settings: &Settings, input: &KeyEvent) -> InputAction { let ctrl = input.modifiers.contains(KeyModifiers::CONTROL); let alt = input.modifiers.contains(KeyModifiers::ALT); // Use Ctrl-n instead of Alt-n? let modfr = if settings.ctrl_n_shortcuts { ctrl } else { alt }; // reset the state, will be set to true later if user really did change it self.switched_search_mode = false; // first up handle prefix mappings. these take precedence over all others // eg, if a user types ctrl-a d, delete the history if self.prefix { // It'll be expanded. #[allow(clippy::single_match)] match input.code { KeyCode::Char('d') => { return InputAction::Delete(self.results_state.selected()); } KeyCode::Char('a') => { self.search.input.start(); // This prevents pressing ctrl-a twice while still in prefix mode self.prefix = false; return InputAction::Continue; } _ => {} } } // handle keymap specific keybindings. match self.keymap_mode { KeymapMode::VimNormal => match input.code { KeyCode::Char('?' | '/') if !ctrl => { self.search.input.clear(); self.set_keymap_cursor(settings, "vim_insert"); self.keymap_mode = KeymapMode::VimInsert; return InputAction::Continue; } KeyCode::Char('j') if !ctrl => { return self.handle_search_down(settings, true); } KeyCode::Char('k') if !ctrl => { return self.handle_search_up(settings, true); } KeyCode::Char('h') if !ctrl => { self.search.input.left(); return InputAction::Continue; } KeyCode::Char('l') if !ctrl => { self.search.input.right(); return InputAction::Continue; } KeyCode::Char('a') if !ctrl => { self.search.input.right(); self.set_keymap_cursor(settings, "vim_insert"); self.keymap_mode = KeymapMode::VimInsert; return InputAction::Continue; } KeyCode::Char('A') if !ctrl => { self.search.input.end(); self.set_keymap_cursor(settings, "vim_insert"); self.keymap_mode = KeymapMode::VimInsert; return InputAction::Continue; } KeyCode::Char('i') if !ctrl => { self.set_keymap_cursor(settings, "vim_insert"); self.keymap_mode = KeymapMode::VimInsert; return InputAction::Continue; } KeyCode::Char('I') if !ctrl => { self.search.input.start(); self.set_keymap_cursor(settings, "vim_insert"); self.keymap_mode = KeymapMode::VimInsert; return InputAction::Continue; } KeyCode::Char(c @ '1'..='9') => { return c.to_digit(10).map_or(InputAction::Continue, |c| { InputAction::Accept(self.results_state.selected() + c as usize) }); } KeyCode::Char(_) if !ctrl => { return InputAction::Continue; } _ => {} }, KeymapMode::VimInsert => { if input.code == KeyCode::Esc || (ctrl && input.code == KeyCode::Char('[')) { self.set_keymap_cursor(settings, "vim_normal"); self.keymap_mode = KeymapMode::VimNormal; return InputAction::Continue; } } _ => {} } match input.code { KeyCode::Enter => return self.handle_search_accept(settings), KeyCode::Char('m') if ctrl => return self.handle_search_accept(settings), KeyCode::Char('y') if ctrl => { return InputAction::Copy(self.results_state.selected()); } KeyCode::Char(c @ '1'..='9') if modfr => { return c.to_digit(10).map_or(InputAction::Continue, |c| { InputAction::Accept(self.results_state.selected() + c as usize) }); } KeyCode::Left if ctrl => self .search .input .prev_word(&settings.word_chars, settings.word_jump_mode), KeyCode::Char('b') if alt => self .search .input .prev_word(&settings.word_chars, settings.word_jump_mode), KeyCode::Left => { self.search.input.left(); } KeyCode::Char('b') if ctrl => { self.search.input.left(); } KeyCode::Right if ctrl => self .search .input .next_word(&settings.word_chars, settings.word_jump_mode), KeyCode::Char('f') if alt => self .search .input .next_word(&settings.word_chars, settings.word_jump_mode), KeyCode::Right => self.search.input.right(), KeyCode::Char('f') if ctrl => self.search.input.right(), KeyCode::Home => self.search.input.start(), KeyCode::Char('a') if ctrl => self.search.input.start(), KeyCode::Char('e') if ctrl => self.search.input.end(), KeyCode::End => self.search.input.end(), KeyCode::Backspace if ctrl => self .search .input .remove_prev_word(&settings.word_chars, settings.word_jump_mode), KeyCode::Backspace => { self.search.input.back(); } KeyCode::Char('h' | '?') if ctrl => { // Depending on the terminal, [Backspace] can be transmitted as // \x08 or \x7F. Also, [Ctrl+Backspace] can be transmitted as // \x08 or \x7F or \x1F. On the other hand, [Ctrl+h] and // [Ctrl+?] are also transmitted as \x08 or \x7F by the // terminals. // // The crossterm library translates \x08 and \x7F to C-h and // Backspace, respectively. With the extended keyboard // protocol enabled, crossterm can faithfully translate // [Ctrl+h] and [Ctrl+?] to C-h and C-?. There is no perfect // solution, but we treat C-h and C-? the same as backspace to // suppress quirks as much as possible. self.search.input.back(); } KeyCode::Delete if ctrl => self .search .input .remove_next_word(&settings.word_chars, settings.word_jump_mode), KeyCode::Delete => { self.search.input.remove(); } KeyCode::Char('d') if ctrl => { if self.search.input.as_str().is_empty() { return InputAction::ReturnOriginal; } self.search.input.remove(); } KeyCode::Char('w') if ctrl => { // remove the first batch of whitespace while matches!(self.search.input.back(), Some(c) if c.is_whitespace()) {} while self.search.input.left() { if self.search.input.char().unwrap().is_whitespace() { self.search.input.right(); // found whitespace, go back right break; } self.search.input.remove(); } } KeyCode::Char('u') if ctrl => self.search.input.clear(), KeyCode::Char('r') if ctrl => self.search.rotate_filter_mode(settings, 1), KeyCode::Char('s') if ctrl => { self.switched_search_mode = true; self.search_mode = self.search_mode.next(settings); self.engine = engines::engine(self.search_mode); } KeyCode::Down => { return self.handle_search_down(settings, true); } KeyCode::Up => { return self.handle_search_up(settings, true); } KeyCode::Char('n' | 'j') if ctrl => { return self.handle_search_down(settings, false); } KeyCode::Char('p' | 'k') if ctrl => { return self.handle_search_up(settings, false); } KeyCode::Char('l') if ctrl => { return InputAction::Redraw; } KeyCode::Char(c) => { self.search.input.insert(c); } KeyCode::PageDown if !settings.invert => { let scroll_len = self.results_state.max_entries() - settings.scroll_context_lines; self.scroll_down(scroll_len); } KeyCode::PageDown if settings.invert => { let scroll_len = self.results_state.max_entries() - settings.scroll_context_lines; self.scroll_up(scroll_len); } KeyCode::PageUp if !settings.invert => { let scroll_len = self.results_state.max_entries() - settings.scroll_context_lines; self.scroll_up(scroll_len); } KeyCode::PageUp if settings.invert => { let scroll_len = self.results_state.max_entries() - settings.scroll_context_lines; self.scroll_down(scroll_len); } _ => {} } InputAction::Continue } fn scroll_down(&mut self, scroll_len: usize) { let i = self.results_state.selected().saturating_sub(scroll_len); self.inspecting_state.reset(); self.results_state.select(i); } fn scroll_up(&mut self, scroll_len: usize) { let i = self.results_state.selected() + scroll_len; self.results_state .select(i.min(self.results_len.saturating_sub(1))); self.inspecting_state.reset(); } #[allow(clippy::cast_possible_truncation)] #[allow(clippy::bool_to_int_with_if)] fn calc_preview_height( settings: &Settings, results: &[History], selected: usize, tab_index: usize, compactness: Compactness, border_size: u16, preview_width: u16, ) -> u16 { if settings.show_preview && settings.preview.strategy == PreviewStrategy::Auto && tab_index == 0 && !results.is_empty() { let length_current_cmd = results[selected].command.len() as u16; // calculate the number of newlines in the command let num_newlines = results[selected] .command .chars() .filter(|&c| c == '\n') .count() as u16; if num_newlines > 0 { std::cmp::min( settings.max_preview_height, results[selected] .command .split('\n') .map(|line| { (line.len() as u16 + preview_width - 1 - border_size) / (preview_width - border_size) }) .sum(), ) + border_size * 2 } // The '- 19' takes the characters before the command (duration and time) into account else if length_current_cmd > preview_width - 19 { std::cmp::min( settings.max_preview_height, (length_current_cmd + preview_width - 1 - border_size) / (preview_width - border_size), ) + border_size * 2 } else { 1 } } else if settings.show_preview && settings.preview.strategy == PreviewStrategy::Static && tab_index == 0 { let longest_command = results .iter() .max_by(|h1, h2| h1.command.len().cmp(&h2.command.len())); longest_command.map_or(0, |v| { std::cmp::min( settings.max_preview_height, v.command .split('\n') .map(|line| { (line.len() as u16 + preview_width - 1 - border_size) / (preview_width - border_size) }) .sum(), ) }) + border_size * 2 } else if settings.show_preview && settings.preview.strategy == PreviewStrategy::Fixed { settings.max_preview_height + border_size * 2 } else if !matches!(compactness, Compactness::Full) || tab_index == 1 { 0 } else { 1 } } #[allow(clippy::bool_to_int_with_if)] #[allow(clippy::too_many_lines)] fn draw( &mut self, f: &mut Frame, results: &[History], stats: Option<HistoryStats>, inspecting: Option<&History>, settings: &Settings, theme: &Theme, ) { let compactness = to_compactness(f, settings); let invert = settings.invert; let border_size = match compactness { Compactness::Full => 1, _ => 0, }; let preview_width = f.area().width - 2; let preview_height = Self::calc_preview_height( settings, results, self.results_state.selected(), self.tab_index, compactness, border_size, preview_width, ); let show_help = settings.show_help && (matches!(compactness, Compactness::Full) || f.area().height > 1); // This is an OR, as it seems more likely for someone to wish to override // tabs unexpectedly being missed, than unexpectedly present. let show_tabs = settings.show_tabs && !matches!(compactness, Compactness::Ultracompact); let chunks = Layout::default() .direction(Direction::Vertical) .margin(0) .horizontal_margin(1) .constraints::<&[Constraint]>( if invert { [ Constraint::Length(1 + border_size), // input Constraint::Min(1), // results list Constraint::Length(preview_height), // preview Constraint::Length(if show_tabs { 1 } else { 0 }), // tabs Constraint::Length(if show_help { 1 } else { 0 }), // header (sic) ] } else { match compactness { Compactness::Ultracompact => [ Constraint::Length(if show_help { 1 } else { 0 }), // header Constraint::Length(0), // tabs Constraint::Min(1), // results list Constraint::Length(0), Constraint::Length(0), ], _ => [ Constraint::Length(if show_help { 1 } else { 0 }), // header Constraint::Length(if show_tabs { 1 } else { 0 }), // tabs Constraint::Min(1), // results list Constraint::Length(1 + border_size), // input Constraint::Length(preview_height), // preview ], } } .as_ref(), ) .split(f.area()); let input_chunk = if invert { chunks[0] } else { chunks[3] }; let results_list_chunk = if invert { chunks[1] } else { chunks[2] }; let preview_chunk = if invert { chunks[2] } else { chunks[4] }; let tabs_chunk = if invert { chunks[3] } else { chunks[1] }; let header_chunk = if invert { chunks[4] } else { chunks[0] }; // TODO: this should be split so that we have one interactive search container that is // EITHER a search box or an inspector. But I'm not doing that now, way too much atm. // also allocate less 🙈 let titles: Vec<_> = TAB_TITLES.iter().copied().map(Line::from).collect(); if show_tabs { let tabs = Tabs::new(titles) .block(Block::default().borders(Borders::NONE)) .select(self.tab_index) .style(Style::default()) .highlight_style(theme.as_style(Meaning::Important)); f.render_widget(tabs, tabs_chunk); } let style = StyleState { compactness, invert, inner_width: input_chunk.width.into(), }; let header_chunks = Layout::default() .direction(Direction::Horizontal) .constraints::<&[Constraint]>( [ Constraint::Ratio(1, 5), Constraint::Ratio(3, 5), Constraint::Ratio(1, 5), ] .as_ref(), ) .split(header_chunk); let title = self.build_title(theme); f.render_widget(title, header_chunks[0]); let help = self.build_help(settings, theme); f.render_widget(help, header_chunks[1]); let stats_tab = self.build_stats(theme); f.render_widget(stats_tab, header_chunks[2]); let indicator: String = match compactness { Compactness::Ultracompact => { if self.switched_search_mode { format!("S{}>", self.search_mode.as_str().chars().next().unwrap()) } else { format!( "{}> ", self.search.filter_mode.as_str().chars().next().unwrap() ) } } _ => " > ".to_string(), }; match self.tab_index { 0 => { let history_highlighter = HistoryHighlighter { engine: self.engine.as_ref(), search_input: self.search.input.as_str(), }; let results_list = Self::build_results_list( style, results, self.keymap_mode, &self.now, indicator.as_str(), theme, history_highlighter, settings.show_numeric_shortcuts, ); f.render_stateful_widget(results_list, results_list_chunk, &mut self.results_state); } 1 => { if results.is_empty() { let message = Paragraph::new("Nothing to inspect") .block( Block::new() .title(Title::from(" Info ".to_string())) .title_alignment(Alignment::Center) .borders(Borders::ALL) .padding(Padding::vertical(2)), ) .alignment(Alignment::Center); f.render_widget(message, results_list_chunk); } else { let inspecting = match inspecting { Some(inspecting) => inspecting, None => &results[self.results_state.selected()], }; super::inspector::draw( f, results_list_chunk, inspecting, &stats.expect("Drawing inspector, but no stats"), settings, theme, settings.timezone, ); } // HACK: I'm following up with abstracting this into the UI container, with a // sub-widget for search + for inspector let feedback = Paragraph::new( "The inspector is new - please give feedback (good, or bad) at https://forum.atuin.sh", ); f.render_widget(feedback, input_chunk); return;
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
true
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/search/inspector.rs
crates/atuin/src/command/client/search/inspector.rs
use std::time::Duration; use time::macros::format_description; use atuin_client::{ history::{History, HistoryStats}, settings::{Settings, Timezone}, }; use ratatui::{ Frame, crossterm::event::{KeyCode, KeyEvent, KeyModifiers}, layout::Rect, prelude::{Constraint, Direction, Layout}, style::Style, text::{Span, Text}, widgets::{Bar, BarChart, BarGroup, Block, Borders, Padding, Paragraph, Row, Table}, }; use super::duration::format_duration; use super::super::theme::{Meaning, Theme}; use super::interactive::{Compactness, InputAction, State, to_compactness}; #[allow(clippy::cast_sign_loss)] fn u64_or_zero(num: i64) -> u64 { if num < 0 { 0 } else { num as u64 } } pub fn draw_commands( f: &mut Frame<'_>, parent: Rect, history: &History, stats: &HistoryStats, compact: bool, theme: &Theme, ) { let commands = Layout::default() .direction(if compact { Direction::Vertical } else { Direction::Horizontal }) .constraints(if compact { [ Constraint::Length(1), Constraint::Length(1), Constraint::Min(0), ] } else { [ Constraint::Ratio(1, 4), Constraint::Ratio(1, 2), Constraint::Ratio(1, 4), ] }) .split(parent); let command = Paragraph::new(Text::from(Span::styled( history.command.clone(), theme.as_style(Meaning::Important), ))) .block(if compact { Block::new() .borders(Borders::NONE) .style(theme.as_style(Meaning::Base)) } else { Block::new() .borders(Borders::ALL) .style(theme.as_style(Meaning::Base)) .title("Command") .padding(Padding::horizontal(1)) }); let previous = Paragraph::new( stats .previous .clone() .map_or_else(|| "[No previous command]".to_string(), |prev| prev.command), ) .block(if compact { Block::new() .borders(Borders::NONE) .style(theme.as_style(Meaning::Annotation)) } else { Block::new() .borders(Borders::ALL) .style(theme.as_style(Meaning::Annotation)) .title("Previous command") .padding(Padding::horizontal(1)) }); // Add [] around blank text, as when this is shown in a list // compacted, it makes it more obviously control text. let next = Paragraph::new( stats .next .clone() .map_or_else(|| "[No next command]".to_string(), |next| next.command), ) .block(if compact { Block::new() .borders(Borders::NONE) .style(theme.as_style(Meaning::Annotation)) } else { Block::new() .borders(Borders::ALL) .title("Next command") .padding(Padding::horizontal(1)) .style(theme.as_style(Meaning::Annotation)) }); f.render_widget(previous, commands[0]); f.render_widget(command, commands[1]); f.render_widget(next, commands[2]); } pub fn draw_stats_table( f: &mut Frame<'_>, parent: Rect, history: &History, tz: Timezone, stats: &HistoryStats, theme: &Theme, ) { let duration = Duration::from_nanos(u64_or_zero(history.duration)); let avg_duration = Duration::from_nanos(stats.average_duration); let (host, user) = history.hostname.split_once(':').unwrap_or(("", "")); let rows = [ Row::new(vec!["Host".to_string(), host.to_string()]), Row::new(vec!["User".to_string(), user.to_string()]), Row::new(vec![ "Time".to_string(), history.timestamp.to_offset(tz.0).to_string(), ]), Row::new(vec!["Duration".to_string(), format_duration(duration)]), Row::new(vec![ "Avg duration".to_string(), format_duration(avg_duration), ]), Row::new(vec!["Exit".to_string(), history.exit.to_string()]), Row::new(vec!["Directory".to_string(), history.cwd.clone()]), Row::new(vec!["Session".to_string(), history.session.clone()]), Row::new(vec!["Total runs".to_string(), stats.total.to_string()]), ]; let widths = [Constraint::Ratio(1, 5), Constraint::Ratio(4, 5)]; let table = Table::new(rows, widths).column_spacing(1).block( Block::default() .title("Command stats") .borders(Borders::ALL) .style(theme.as_style(Meaning::Base)) .padding(Padding::vertical(1)), ); f.render_widget(table, parent); } fn num_to_day(num: &str) -> String { match num { "0" => "Sunday".to_string(), "1" => "Monday".to_string(), "2" => "Tuesday".to_string(), "3" => "Wednesday".to_string(), "4" => "Thursday".to_string(), "5" => "Friday".to_string(), "6" => "Saturday".to_string(), _ => "Invalid day".to_string(), } } fn sort_duration_over_time(durations: &[(String, i64)]) -> Vec<(String, i64)> { let format = format_description!("[day]-[month]-[year]"); let output = format_description!("[month]/[year repr:last_two]"); let mut durations: Vec<(time::Date, i64)> = durations .iter() .map(|d| { ( time::Date::parse(d.0.as_str(), &format).expect("invalid date string from sqlite"), d.1, ) }) .collect(); durations.sort_by(|a, b| a.0.cmp(&b.0)); durations .iter() .map(|(date, duration)| { ( date.format(output).expect("failed to format sqlite date"), *duration, ) }) .collect() } fn draw_stats_charts(f: &mut Frame<'_>, parent: Rect, stats: &HistoryStats, theme: &Theme) { let exits: Vec<Bar> = stats .exits .iter() .map(|(exit, count)| { Bar::default() .label(exit.to_string().into()) .value(u64_or_zero(*count)) }) .collect(); let exits = BarChart::default() .block( Block::default() .title("Exit code distribution") .style(theme.as_style(Meaning::Base)) .borders(Borders::ALL), ) .bar_width(3) .bar_gap(1) .bar_style(Style::default()) .value_style(Style::default()) .label_style(Style::default()) .data(BarGroup::default().bars(&exits)); let day_of_week: Vec<Bar> = stats .day_of_week .iter() .map(|(day, count)| { Bar::default() .label(num_to_day(day.as_str()).into()) .value(u64_or_zero(*count)) }) .collect(); let day_of_week = BarChart::default() .block( Block::default() .title("Runs per day") .style(theme.as_style(Meaning::Base)) .borders(Borders::ALL), ) .bar_width(3) .bar_gap(1) .bar_style(Style::default()) .value_style(Style::default()) .label_style(Style::default()) .data(BarGroup::default().bars(&day_of_week)); let duration_over_time = sort_duration_over_time(&stats.duration_over_time); let duration_over_time: Vec<Bar> = duration_over_time .iter() .map(|(date, duration)| { let d = Duration::from_nanos(u64_or_zero(*duration)); Bar::default() .label(date.clone().into()) .value(u64_or_zero(*duration)) .text_value(format_duration(d)) }) .collect(); let duration_over_time = BarChart::default() .block( Block::default() .title("Duration over time") .style(theme.as_style(Meaning::Base)) .borders(Borders::ALL), ) .bar_width(5) .bar_gap(1) .bar_style(Style::default()) .value_style(Style::default()) .label_style(Style::default()) .data(BarGroup::default().bars(&duration_over_time)); let layout = Layout::default() .direction(Direction::Vertical) .constraints([ Constraint::Ratio(1, 3), Constraint::Ratio(1, 3), Constraint::Ratio(1, 3), ]) .split(parent); f.render_widget(exits, layout[0]); f.render_widget(day_of_week, layout[1]); f.render_widget(duration_over_time, layout[2]); } pub fn draw( f: &mut Frame<'_>, chunk: Rect, history: &History, stats: &HistoryStats, settings: &Settings, theme: &Theme, tz: Timezone, ) { let compactness = to_compactness(f, settings); match compactness { Compactness::Ultracompact => draw_ultracompact(f, chunk, history, stats, theme), _ => draw_full(f, chunk, history, stats, theme, tz), } } pub fn draw_ultracompact( f: &mut Frame<'_>, chunk: Rect, history: &History, stats: &HistoryStats, theme: &Theme, ) { draw_commands(f, chunk, history, stats, true, theme); } pub fn draw_full( f: &mut Frame<'_>, chunk: Rect, history: &History, stats: &HistoryStats, theme: &Theme, tz: Timezone, ) { let vert_layout = Layout::default() .direction(Direction::Vertical) .constraints([Constraint::Ratio(1, 5), Constraint::Ratio(4, 5)]) .split(chunk); let stats_layout = Layout::default() .direction(Direction::Horizontal) .constraints([Constraint::Ratio(1, 3), Constraint::Ratio(2, 3)]) .split(vert_layout[1]); draw_commands(f, vert_layout[0], history, stats, false, theme); draw_stats_table(f, stats_layout[0], history, tz, stats, theme); draw_stats_charts(f, stats_layout[1], stats, theme); } // I'm going to break this out more, but just starting to move things around before changing // structure and making it nicer. pub fn input( state: &mut State, _settings: &Settings, selected: usize, input: &KeyEvent, ) -> InputAction { let ctrl = input.modifiers.contains(KeyModifiers::CONTROL); match input.code { KeyCode::Char('d') if ctrl => InputAction::Delete(selected), KeyCode::Up => { state.inspecting_state.move_to_previous(); InputAction::Redraw } KeyCode::Down => { state.inspecting_state.move_to_next(); InputAction::Redraw } _ => InputAction::Continue, } } #[cfg(test)] mod tests { use super::draw_ultracompact; use atuin_client::{ history::{History, HistoryId, HistoryStats}, theme::ThemeManager, }; use ratatui::{backend::TestBackend, prelude::*}; use time::OffsetDateTime; fn mock_history_stats() -> (History, HistoryStats) { let history = History { id: HistoryId::from("test1".to_string()), timestamp: OffsetDateTime::now_utc(), duration: 3, exit: 0, command: "/bin/cmd".to_string(), cwd: "/toot".to_string(), session: "sesh1".to_string(), hostname: "hostn".to_string(), deleted_at: None, }; let next = History { id: HistoryId::from("test2".to_string()), timestamp: OffsetDateTime::now_utc(), duration: 2, exit: 0, command: "/bin/cmd -os".to_string(), cwd: "/toot".to_string(), session: "sesh1".to_string(), hostname: "hostn".to_string(), deleted_at: None, }; let prev = History { id: HistoryId::from("test3".to_string()), timestamp: OffsetDateTime::now_utc(), duration: 1, exit: 0, command: "/bin/cmd -a".to_string(), cwd: "/toot".to_string(), session: "sesh1".to_string(), hostname: "hostn".to_string(), deleted_at: None, }; let stats = HistoryStats { next: Some(next.clone()), previous: Some(prev.clone()), total: 2, average_duration: 3, exits: Vec::new(), day_of_week: Vec::new(), duration_over_time: Vec::new(), }; (history, stats) } #[test] fn test_output_looks_correct_for_ultracompact() { let backend = TestBackend::new(22, 5); let mut terminal = Terminal::new(backend).expect("Could not create terminal"); let chunk = Rect::new(0, 0, 22, 5); let (history, stats) = mock_history_stats(); let prev = stats.previous.clone().unwrap(); let next = stats.next.clone().unwrap(); let mut manager = ThemeManager::new(Some(true), Some("".to_string())); let theme = manager.load_theme("(none)", None); let _ = terminal.draw(|f| draw_ultracompact(f, chunk, &history, &stats, &theme)); let mut lines = [" "; 5].map(|l| Line::from(l)); for (n, entry) in [prev, history, next].iter().enumerate() { let mut l = lines[n].to_string(); l.replace_range(0..entry.command.len(), &entry.command); lines[n] = Line::from(l); } terminal.backend().assert_buffer_lines(lines); } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/search/duration.rs
crates/atuin/src/command/client/search/duration.rs
use core::fmt; use std::{ops::ControlFlow, time::Duration}; #[allow(clippy::module_name_repetitions)] pub fn format_duration_into(dur: Duration, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn item(unit: &'static str, value: u64) -> ControlFlow<(&'static str, u64)> { if value > 0 { ControlFlow::Break((unit, value)) } else { ControlFlow::Continue(()) } } // impl taken and modified from // https://github.com/tailhook/humantime/blob/master/src/duration.rs#L295-L331 // Copyright (c) 2016 The humantime Developers fn fmt(f: Duration) -> ControlFlow<(&'static str, u64), ()> { let secs = f.as_secs(); let nanos = f.subsec_nanos(); let years = secs / 31_557_600; // 365.25d let year_days = secs % 31_557_600; let months = year_days / 2_630_016; // 30.44d let month_days = year_days % 2_630_016; let days = month_days / 86400; let day_secs = month_days % 86400; let hours = day_secs / 3600; let minutes = day_secs % 3600 / 60; let seconds = day_secs % 60; let millis = nanos / 1_000_000; let micros = nanos / 1_000; // a difference from our impl than the original is that // we only care about the most-significant segment of the duration. // If the item call returns `Break`, then the `?` will early-return. // This allows for a very consise impl item("y", years)?; item("mo", months)?; item("d", days)?; item("h", hours)?; item("m", minutes)?; item("s", seconds)?; item("ms", u64::from(millis))?; item("us", u64::from(micros))?; item("ns", u64::from(nanos))?; ControlFlow::Continue(()) } match fmt(dur) { ControlFlow::Break((unit, value)) => write!(f, "{value}{unit}"), ControlFlow::Continue(()) => write!(f, "0s"), } } #[allow(clippy::module_name_repetitions)] pub fn format_duration(f: Duration) -> String { struct F(Duration); impl fmt::Display for F { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { format_duration_into(self.0, f) } } F(f).to_string() }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/search/engines/db.rs
crates/atuin/src/command/client/search/engines/db.rs
use super::{SearchEngine, SearchState}; use async_trait::async_trait; use atuin_client::{ database::Database, database::OptFilters, history::History, settings::SearchMode, }; use eyre::Result; use norm::Metric; use norm::fzf::{FzfParser, FzfV2}; use std::ops::Range; pub struct Search(pub SearchMode); #[async_trait] impl SearchEngine for Search { async fn full_query( &mut self, state: &SearchState, db: &mut dyn Database, ) -> Result<Vec<History>> { Ok(db .search( self.0, state.filter_mode, &state.context, state.input.as_str(), OptFilters { limit: Some(200), ..Default::default() }, ) .await // ignore errors as it may be caused by incomplete regex .map_or(Vec::new(), |r| r.into_iter().collect())) } fn get_highlight_indices(&self, command: &str, search_input: &str) -> Vec<usize> { if self.0 == SearchMode::Prefix { return vec![]; } let mut fzf = FzfV2::new(); let mut parser = FzfParser::new(); let query = parser.parse(search_input); let mut ranges: Vec<Range<usize>> = Vec::new(); let _ = fzf.distance_and_ranges(query, command, &mut ranges); // convert ranges to all indices ranges.into_iter().flatten().collect() } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/search/engines/skim.rs
crates/atuin/src/command/client/search/engines/skim.rs
use std::path::Path; use async_trait::async_trait; use atuin_client::{database::Database, history::History, settings::FilterMode}; use eyre::Result; use fuzzy_matcher::{FuzzyMatcher, skim::SkimMatcherV2}; use itertools::Itertools; use time::OffsetDateTime; use tokio::task::yield_now; use uuid; use super::{SearchEngine, SearchState}; pub struct Search { all_history: Vec<(History, i32)>, engine: SkimMatcherV2, } impl Search { pub fn new() -> Self { Search { all_history: vec![], engine: SkimMatcherV2::default(), } } } #[async_trait] impl SearchEngine for Search { async fn full_query( &mut self, state: &SearchState, db: &mut dyn Database, ) -> Result<Vec<History>> { if self.all_history.is_empty() { self.all_history = db.all_with_count().await.unwrap(); } Ok(fuzzy_search(&self.engine, state, &self.all_history).await) } fn get_highlight_indices(&self, command: &str, search_input: &str) -> Vec<usize> { let (_, indices) = self .engine .fuzzy_indices(command, search_input) .unwrap_or_default(); indices } } #[allow(clippy::too_many_lines)] async fn fuzzy_search( engine: &SkimMatcherV2, state: &SearchState, all_history: &[(History, i32)], ) -> Vec<History> { let mut set = Vec::with_capacity(200); let mut ranks = Vec::with_capacity(200); let query = state.input.as_str(); let now = OffsetDateTime::now_utc(); for (i, (history, count)) in all_history.iter().enumerate() { if i % 256 == 0 { yield_now().await; } let context = &state.context; let git_root = context .git_root .as_ref() .and_then(|git_root| git_root.to_str()) .unwrap_or(&context.cwd); match state.filter_mode { FilterMode::Global => {} // we aggregate host by ',' separating them FilterMode::Host if history .hostname .split(',') .contains(&context.hostname.as_str()) => {} // we aggregate session by concattenating them. // sessions are 32 byte simple uuid formats FilterMode::Session if history .session .as_bytes() .chunks(32) .contains(&context.session.as_bytes()) => {} // SessionPreload: include current session + global history from before session start FilterMode::SessionPreload => { let is_current_session = { history .session .as_bytes() .chunks(32) .any(|chunk| chunk == context.session.as_bytes()) }; if !is_current_session { let Ok(uuid) = uuid::Uuid::parse_str(&context.session) else { log::warn!("failed to parse session id '{}'", context.session); continue; }; let Some(timestamp) = uuid.get_timestamp() else { log::warn!( "failed to get timestamp from uuid '{}'", uuid.as_hyphenated() ); continue; }; let (seconds, nanos) = timestamp.to_unix(); let Ok(session_start) = time::OffsetDateTime::from_unix_timestamp_nanos( i128::from(seconds) * 1_000_000_000 + i128::from(nanos), ) else { log::warn!( "failed to create OffsetDateTime from second: {seconds}, nanosecond: {nanos}" ); continue; }; if history.timestamp >= session_start { continue; } } } // we aggregate directory by ':' separating them FilterMode::Directory if history.cwd.split(':').contains(&context.cwd.as_str()) => {} FilterMode::Workspace if history.cwd.split(':').contains(&git_root) => {} _ => continue, } #[allow(clippy::cast_lossless, clippy::cast_precision_loss)] if let Some((score, indices)) = engine.fuzzy_indices(&history.command, query) { let begin = indices.first().copied().unwrap_or_default(); let mut duration = (now - history.timestamp).as_seconds_f64().log2(); if !duration.is_finite() || duration <= 1.0 { duration = 1.0; } // these + X.0 just make the log result a bit smoother. // log is very spiky towards 1-4, but I want a gradual decay. // eg: // log2(4) = 2, log2(5) = 2.3 (16% increase) // log2(8) = 3, log2(9) = 3.16 (5% increase) // log2(16) = 4, log2(17) = 4.08 (2% increase) let count = (*count as f64 + 8.0).log2(); let begin = (begin as f64 + 16.0).log2(); let path = path_dist(history.cwd.as_ref(), state.context.cwd.as_ref()); let path = (path as f64 + 8.0).log2(); // reduce longer durations, raise higher counts, raise matches close to the start let score = (-score as f64) * count / path / duration / begin; 'insert: { // algorithm: // 1. find either the position that this command ranks // 2. find the same command positioned better than our rank. for i in 0..set.len() { // do we out score the current position? if ranks[i] > score { ranks.insert(i, score); set.insert(i, history.clone()); let mut j = i + 1; while j < set.len() { // remove duplicates that have a worse score if set[j].command == history.command { ranks.remove(j); set.remove(j); // break this while loop because there won't be any other // duplicates. break; } j += 1; } // keep it limited if ranks.len() > 200 { ranks.pop(); set.pop(); } break 'insert; } // don't continue if this command has a better score already if set[i].command == history.command { break 'insert; } } if set.len() < 200 { ranks.push(score); set.push(history.clone()); } } } } set } fn path_dist(a: &Path, b: &Path) -> usize { let mut a: Vec<_> = a.components().collect(); let b: Vec<_> = b.components().collect(); let mut dist = 0; // pop a until there's a common ancestor while !b.starts_with(&a) { dist += 1; a.pop(); } b.len() - a.len() + dist }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/init/bash.rs
crates/atuin/src/command/client/init/bash.rs
use atuin_dotfiles::store::{AliasStore, var::VarStore}; use eyre::Result; pub fn init_static(disable_up_arrow: bool, disable_ctrl_r: bool) { let base = include_str!("../../../shell/atuin.bash"); let (bind_ctrl_r, bind_up_arrow) = if std::env::var("ATUIN_NOBIND").is_ok() { (false, false) } else { (!disable_ctrl_r, !disable_up_arrow) }; println!("__atuin_bind_ctrl_r={bind_ctrl_r}"); println!("__atuin_bind_up_arrow={bind_up_arrow}"); println!("{base}"); } pub async fn init( aliases: AliasStore, vars: VarStore, disable_up_arrow: bool, disable_ctrl_r: bool, ) -> Result<()> { init_static(disable_up_arrow, disable_ctrl_r); let aliases = atuin_dotfiles::shell::bash::alias_config(&aliases).await; let vars = atuin_dotfiles::shell::bash::var_config(&vars).await; println!("{aliases}"); println!("{vars}"); Ok(()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/init/xonsh.rs
crates/atuin/src/command/client/init/xonsh.rs
use atuin_dotfiles::store::{AliasStore, var::VarStore}; use eyre::Result; pub fn init_static(disable_up_arrow: bool, disable_ctrl_r: bool) { let base = include_str!("../../../shell/atuin.xsh"); let (bind_ctrl_r, bind_up_arrow) = if std::env::var("ATUIN_NOBIND").is_ok() { (false, false) } else { (!disable_ctrl_r, !disable_up_arrow) }; println!( "_ATUIN_BIND_CTRL_R={}", if bind_ctrl_r { "True" } else { "False" } ); println!( "_ATUIN_BIND_UP_ARROW={}", if bind_up_arrow { "True" } else { "False" } ); println!("{base}"); } pub async fn init( aliases: AliasStore, vars: VarStore, disable_up_arrow: bool, disable_ctrl_r: bool, ) -> Result<()> { init_static(disable_up_arrow, disable_ctrl_r); let aliases = atuin_dotfiles::shell::xonsh::alias_config(&aliases).await; let vars = atuin_dotfiles::shell::xonsh::var_config(&vars).await; println!("{aliases}"); println!("{vars}"); Ok(()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/init/zsh.rs
crates/atuin/src/command/client/init/zsh.rs
use atuin_dotfiles::store::{AliasStore, var::VarStore}; use eyre::Result; pub fn init_static(disable_up_arrow: bool, disable_ctrl_r: bool) { let base = include_str!("../../../shell/atuin.zsh"); println!("{base}"); if std::env::var("ATUIN_NOBIND").is_err() { const BIND_CTRL_R: &str = r"bindkey -M emacs '^r' atuin-search bindkey -M viins '^r' atuin-search-viins bindkey -M vicmd '/' atuin-search"; const BIND_UP_ARROW: &str = r"bindkey -M emacs '^[[A' atuin-up-search bindkey -M vicmd '^[[A' atuin-up-search-vicmd bindkey -M viins '^[[A' atuin-up-search-viins bindkey -M emacs '^[OA' atuin-up-search bindkey -M vicmd '^[OA' atuin-up-search-vicmd bindkey -M viins '^[OA' atuin-up-search-viins bindkey -M vicmd 'k' atuin-up-search-vicmd"; if !disable_ctrl_r { println!("{BIND_CTRL_R}"); } if !disable_up_arrow { println!("{BIND_UP_ARROW}"); } } } pub async fn init( aliases: AliasStore, vars: VarStore, disable_up_arrow: bool, disable_ctrl_r: bool, ) -> Result<()> { init_static(disable_up_arrow, disable_ctrl_r); let aliases = atuin_dotfiles::shell::zsh::alias_config(&aliases).await; let vars = atuin_dotfiles::shell::zsh::var_config(&vars).await; println!("{aliases}"); println!("{vars}"); Ok(()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/init/fish.rs
crates/atuin/src/command/client/init/fish.rs
use atuin_dotfiles::store::{AliasStore, var::VarStore}; use eyre::Result; fn print_bindings( indent: &str, disable_up_arrow: bool, disable_ctrl_r: bool, bind_ctrl_r: &str, bind_up_arrow: &str, bind_ctrl_r_ins: &str, bind_up_arrow_ins: &str, ) { if !disable_ctrl_r { println!("{indent}{bind_ctrl_r}"); } if !disable_up_arrow { println!("{indent}{bind_up_arrow}"); } println!("{indent}if bind -M insert >/dev/null 2>&1"); if !disable_ctrl_r { println!("{indent}{indent}{bind_ctrl_r_ins}"); } if !disable_up_arrow { println!("{indent}{indent}{bind_up_arrow_ins}"); } println!("{indent}end"); } pub fn init_static(disable_up_arrow: bool, disable_ctrl_r: bool) { let indent = " ".repeat(4); let base = include_str!("../../../shell/atuin.fish"); println!("{base}"); if std::env::var("ATUIN_NOBIND").is_err() { println!("if string match -q '4.*' $version"); // In fish 4.0 and above the option bind -k doesn't exist anymore, // instead we can use key names and modifiers directly. print_bindings( &indent, disable_up_arrow, disable_ctrl_r, "bind ctrl-r _atuin_search", "bind up _atuin_bind_up", "bind -M insert ctrl-r _atuin_search", "bind -M insert up _atuin_bind_up", ); println!("else"); // We keep these for compatibility with fish 3.x print_bindings( &indent, disable_up_arrow, disable_ctrl_r, r"bind \cr _atuin_search", &[ r"bind -k up _atuin_bind_up", r"bind \eOA _atuin_bind_up", r"bind \e\[A _atuin_bind_up", ] .join("; "), r"bind -M insert \cr _atuin_search", &[ r"bind -M insert -k up _atuin_bind_up", r"bind -M insert \eOA _atuin_bind_up", r"bind -M insert \e\[A _atuin_bind_up", ] .join("; "), ); println!("end"); } } pub async fn init( aliases: AliasStore, vars: VarStore, disable_up_arrow: bool, disable_ctrl_r: bool, ) -> Result<()> { init_static(disable_up_arrow, disable_ctrl_r); let aliases = atuin_dotfiles::shell::fish::alias_config(&aliases).await; let vars = atuin_dotfiles::shell::fish::var_config(&vars).await; println!("{aliases}"); println!("{vars}"); Ok(()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/init/powershell.rs
crates/atuin/src/command/client/init/powershell.rs
use atuin_dotfiles::store::{AliasStore, var::VarStore}; pub fn init_static(disable_up_arrow: bool, disable_ctrl_r: bool) { let base = include_str!("../../../shell/atuin.ps1"); let (bind_ctrl_r, bind_up_arrow) = if std::env::var("ATUIN_NOBIND").is_ok() { (false, false) } else { (!disable_ctrl_r, !disable_up_arrow) }; println!("{base}"); println!( "Enable-AtuinSearchKeys -CtrlR {} -UpArrow {}", ps_bool(bind_ctrl_r), ps_bool(bind_up_arrow) ); } pub async fn init( aliases: AliasStore, vars: VarStore, disable_up_arrow: bool, disable_ctrl_r: bool, ) -> eyre::Result<()> { init_static(disable_up_arrow, disable_ctrl_r); let aliases = atuin_dotfiles::shell::powershell::alias_config(&aliases).await; let vars = atuin_dotfiles::shell::powershell::var_config(&vars).await; println!("{aliases}"); println!("{vars}"); Ok(()) } fn ps_bool(value: bool) -> &'static str { if value { "$true" } else { "$false" } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/src/command/client/sync/status.rs
crates/atuin/src/command/client/sync/status.rs
use std::path::PathBuf; use crate::{SHA, VERSION}; use atuin_client::{api_client, database::Database, settings::Settings}; use colored::Colorize; use eyre::{Result, bail}; pub async fn run(settings: &Settings, db: &impl Database) -> Result<()> { let session_path = settings.session_path.as_str(); if !PathBuf::from(session_path).exists() { bail!("You are not logged in to a sync server - cannot show sync status"); } let client = api_client::Client::new( &settings.sync_address, settings.session_token()?.as_str(), settings.network_connect_timeout, settings.network_timeout, )?; let status = client.status().await?; let last_sync = Settings::last_sync()?; println!("Atuin v{VERSION} - Build rev {SHA}\n"); println!("{}", "[Local]".green()); if settings.auto_sync { println!("Sync frequency: {}", settings.sync_frequency); println!("Last sync: {}", last_sync.to_offset(settings.timezone.0)); } if !settings.sync.records { let local_count = db.history_count(false).await?; let deleted_count = db.history_count(true).await? - local_count; println!("History count: {local_count}"); println!("Deleted history count: {deleted_count}\n"); } if settings.auto_sync { println!("{}", "[Remote]".green()); println!("Address: {}", settings.sync_address); println!("Username: {}", status.username); } Ok(()) }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/tests/sync.rs
crates/atuin/tests/sync.rs
use atuin_common::{api::AddHistoryRequest, utils::uuid_v7}; use time::OffsetDateTime; mod common; #[tokio::test] async fn sync() { let path = format!("/{}", uuid_v7().as_simple()); let (address, shutdown, server) = common::start_server(&path).await; let client = common::register(&address).await; let hostname = uuid_v7().as_simple().to_string(); let now = OffsetDateTime::now_utc(); let data1 = uuid_v7().as_simple().to_string(); let data2 = uuid_v7().as_simple().to_string(); client .post_history(&[ AddHistoryRequest { id: uuid_v7().as_simple().to_string(), timestamp: now, data: data1.clone(), hostname: hostname.clone(), }, AddHistoryRequest { id: uuid_v7().as_simple().to_string(), timestamp: now, data: data2.clone(), hostname: hostname.clone(), }, ]) .await .unwrap(); let history = client .get_history(OffsetDateTime::UNIX_EPOCH, OffsetDateTime::UNIX_EPOCH, None) .await .unwrap(); assert_eq!(history.history, vec![data1, data2]); shutdown.send(()).unwrap(); server.await.unwrap(); }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/tests/users.rs
crates/atuin/tests/users.rs
use atuin_common::utils::uuid_v7; mod common; #[tokio::test] async fn registration() { let path = format!("/{}", uuid_v7().as_simple()); let (address, shutdown, server) = common::start_server(&path).await; dbg!(&address); // -- REGISTRATION -- let username = uuid_v7().as_simple().to_string(); let password = uuid_v7().as_simple().to_string(); let client = common::register_inner(&address, &username, &password).await; // the session token works let status = client.status().await.unwrap(); assert_eq!(status.username, username); // -- LOGIN -- let client = common::login(&address, username.clone(), password).await; // the session token works let status = client.status().await.unwrap(); assert_eq!(status.username, username); shutdown.send(()).unwrap(); server.await.unwrap(); } #[tokio::test] async fn change_password() { let path = format!("/{}", uuid_v7().as_simple()); let (address, shutdown, server) = common::start_server(&path).await; // -- REGISTRATION -- let username = uuid_v7().as_simple().to_string(); let password = uuid_v7().as_simple().to_string(); let client = common::register_inner(&address, &username, &password).await; // the session token works let status = client.status().await.unwrap(); assert_eq!(status.username, username); // -- PASSWORD CHANGE -- let current_password = password; let new_password = uuid_v7().as_simple().to_string(); let result = client .change_password(current_password, new_password.clone()) .await; // the password change request succeeded assert!(result.is_ok()); // -- LOGIN -- let client = common::login(&address, username.clone(), new_password).await; // login with new password yields a working token let status = client.status().await.unwrap(); assert_eq!(status.username, username); shutdown.send(()).unwrap(); server.await.unwrap(); } #[tokio::test] async fn multi_user_test() { let path = format!("/{}", uuid_v7().as_simple()); let (address, shutdown, server) = common::start_server(&path).await; dbg!(&address); // -- REGISTRATION -- let user_one = uuid_v7().as_simple().to_string(); let password_one = uuid_v7().as_simple().to_string(); let client_one = common::register_inner(&address, &user_one, &password_one).await; // the session token works let status = client_one.status().await.unwrap(); assert_eq!(status.username, user_one); let user_two = uuid_v7().as_simple().to_string(); let password_two = uuid_v7().as_simple().to_string(); let client_two = common::register_inner(&address, &user_two, &password_two).await; // the session token works let status = client_two.status().await.unwrap(); assert_eq!(status.username, user_two); // check that we can change user one's password, and _this does not affect user two_ let current_password = password_one; let new_password = uuid_v7().as_simple().to_string(); let result = client_one .change_password(current_password, new_password.clone()) .await; // the password change request succeeded assert!(result.is_ok()); // -- LOGIN -- let client_one = common::login(&address, user_one.clone(), new_password).await; let client_two = common::login(&address, user_two.clone(), password_two).await; // login with new password yields a working token let status = client_one.status().await.unwrap(); assert_eq!(status.username, user_one); assert_ne!(status.username, user_two); let status = client_two.status().await.unwrap(); assert_eq!(status.username, user_two); shutdown.send(()).unwrap(); server.await.unwrap(); }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin/tests/common/mod.rs
crates/atuin/tests/common/mod.rs
use std::{env, time::Duration}; use atuin_client::api_client; use atuin_common::utils::uuid_v7; use atuin_server::{Settings as ServerSettings, launch_with_tcp_listener}; use atuin_server_database::DbSettings; use atuin_server_postgres::Postgres; use futures_util::TryFutureExt; use tokio::{net::TcpListener, sync::oneshot, task::JoinHandle}; use tracing::{Dispatch, dispatcher}; use tracing_subscriber::{EnvFilter, layer::SubscriberExt}; pub async fn start_server(path: &str) -> (String, oneshot::Sender<()>, JoinHandle<()>) { let formatting_layer = tracing_tree::HierarchicalLayer::default() .with_writer(tracing_subscriber::fmt::TestWriter::new()) .with_indent_lines(true) .with_ansi(true) .with_targets(true) .with_indent_amount(2); let dispatch: Dispatch = tracing_subscriber::registry() .with(formatting_layer) .with(EnvFilter::new("atuin_server=debug,atuin_client=debug,info")) .into(); let db_uri = env::var("ATUIN_DB_URI") .unwrap_or_else(|_| "postgres://atuin:pass@localhost:5432/atuin".to_owned()); let server_settings = ServerSettings { host: "127.0.0.1".to_owned(), port: 0, path: path.to_owned(), sync_v1_enabled: true, open_registration: true, max_history_length: 8192, max_record_size: 1024 * 1024 * 1024, page_size: 1100, register_webhook_url: None, register_webhook_username: String::new(), db_settings: DbSettings { db_uri: db_uri, read_db_uri: None, }, metrics: atuin_server::settings::Metrics::default(), tls: atuin_server::settings::Tls::default(), mail: atuin_server::settings::Mail::default(), fake_version: None, }; let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let addr = listener.local_addr().unwrap(); let server = tokio::spawn(async move { let _tracing_guard = dispatcher::set_default(&dispatch); if let Err(e) = launch_with_tcp_listener::<Postgres>( server_settings, listener, shutdown_rx.unwrap_or_else(|_| ()), ) .await { tracing::error!(error=?e, "server error"); panic!("error running server: {e:?}"); } }); // let the server come online tokio::time::sleep(Duration::from_millis(200)).await; (format!("http://{addr}{path}"), shutdown_tx, server) } pub async fn register_inner<'a>( address: &'a str, username: &str, password: &str, ) -> api_client::Client<'a> { let email = format!("{}@example.com", uuid_v7().as_simple()); // registration works let registration_response = api_client::register(address, username, &email, password) .await .unwrap(); api_client::Client::new(address, &registration_response.session, 5, 30).unwrap() } #[allow(dead_code)] pub async fn login(address: &str, username: String, password: String) -> api_client::Client<'_> { // registration works let login_response = api_client::login( address, atuin_common::api::LoginRequest { username, password }, ) .await .unwrap(); api_client::Client::new(address, &login_response.session, 5, 30).unwrap() } #[allow(dead_code)] pub async fn register(address: &str) -> api_client::Client<'_> { let username = uuid_v7().as_simple().to_string(); let password = uuid_v7().as_simple().to_string(); register_inner(address, &username, &password).await }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-server-database/src/lib.rs
crates/atuin-server-database/src/lib.rs
#![forbid(unsafe_code)] pub mod calendar; pub mod models; use std::{ collections::HashMap, fmt::{Debug, Display}, ops::Range, }; use self::{ calendar::{TimePeriod, TimePeriodInfo}, models::{History, NewHistory, NewSession, NewUser, Session, User}, }; use async_trait::async_trait; use atuin_common::record::{EncryptedData, HostId, Record, RecordIdx, RecordStatus}; use serde::{Deserialize, Serialize}; use time::{Date, Duration, Month, OffsetDateTime, Time, UtcOffset}; use tracing::instrument; #[derive(Debug)] pub enum DbError { NotFound, Other(eyre::Report), } impl Display for DbError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{self:?}") } } impl<T: std::error::Error + Into<time::error::Error>> From<T> for DbError { fn from(value: T) -> Self { DbError::Other(value.into().into()) } } impl std::error::Error for DbError {} pub type DbResult<T> = Result<T, DbError>; #[derive(Debug, PartialEq)] pub enum DbType { Postgres, Sqlite, Unknown, } #[derive(Clone, Deserialize, Serialize)] pub struct DbSettings { pub db_uri: String, /// Optional URI for read replicas. If set, read-only queries will use this connection. pub read_db_uri: Option<String>, } impl DbSettings { pub fn db_type(&self) -> DbType { if self.db_uri.starts_with("postgres://") || self.db_uri.starts_with("postgresql://") { DbType::Postgres } else if self.db_uri.starts_with("sqlite://") { DbType::Sqlite } else { DbType::Unknown } } } fn redact_db_uri(uri: &str) -> String { url::Url::parse(uri) .map(|mut url| { let _ = url.set_password(Some("****")); url.to_string() }) .unwrap_or_else(|_| uri.to_string()) } // Do our best to redact passwords so they're not logged in the event of an error. impl Debug for DbSettings { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if self.db_type() == DbType::Postgres { let redacted_uri = redact_db_uri(&self.db_uri); let redacted_read_uri = self.read_db_uri.as_ref().map(|uri| redact_db_uri(uri)); f.debug_struct("DbSettings") .field("db_uri", &redacted_uri) .field("read_db_uri", &redacted_read_uri) .finish() } else { f.debug_struct("DbSettings") .field("db_uri", &self.db_uri) .field("read_db_uri", &self.read_db_uri) .finish() } } } #[async_trait] pub trait Database: Sized + Clone + Send + Sync + 'static { async fn new(settings: &DbSettings) -> DbResult<Self>; async fn get_session(&self, token: &str) -> DbResult<Session>; async fn get_session_user(&self, token: &str) -> DbResult<User>; async fn add_session(&self, session: &NewSession) -> DbResult<()>; async fn get_user(&self, username: &str) -> DbResult<User>; async fn get_user_session(&self, u: &User) -> DbResult<Session>; async fn add_user(&self, user: &NewUser) -> DbResult<i64>; async fn user_verified(&self, id: i64) -> DbResult<bool>; async fn verify_user(&self, id: i64) -> DbResult<()>; async fn user_verification_token(&self, id: i64) -> DbResult<String>; async fn update_user_password(&self, u: &User) -> DbResult<()>; async fn total_history(&self) -> DbResult<i64>; async fn count_history(&self, user: &User) -> DbResult<i64>; async fn count_history_cached(&self, user: &User) -> DbResult<i64>; async fn delete_user(&self, u: &User) -> DbResult<()>; async fn delete_history(&self, user: &User, id: String) -> DbResult<()>; async fn deleted_history(&self, user: &User) -> DbResult<Vec<String>>; async fn delete_store(&self, user: &User) -> DbResult<()>; async fn add_records(&self, user: &User, record: &[Record<EncryptedData>]) -> DbResult<()>; async fn next_records( &self, user: &User, host: HostId, tag: String, start: Option<RecordIdx>, count: u64, ) -> DbResult<Vec<Record<EncryptedData>>>; // Return the tail record ID for each store, so (HostID, Tag, TailRecordID) async fn status(&self, user: &User) -> DbResult<RecordStatus>; async fn count_history_range(&self, user: &User, range: Range<OffsetDateTime>) -> DbResult<i64>; async fn list_history( &self, user: &User, created_after: OffsetDateTime, since: OffsetDateTime, host: &str, page_size: i64, ) -> DbResult<Vec<History>>; async fn add_history(&self, history: &[NewHistory]) -> DbResult<()>; async fn oldest_history(&self, user: &User) -> DbResult<History>; #[instrument(skip_all)] async fn calendar( &self, user: &User, period: TimePeriod, tz: UtcOffset, ) -> DbResult<HashMap<u64, TimePeriodInfo>> { let mut ret = HashMap::new(); let iter: Box<dyn Iterator<Item = DbResult<(u64, Range<Date>)>> + Send> = match period { TimePeriod::Year => { // First we need to work out how far back to calculate. Get the // oldest history item let oldest = self .oldest_history(user) .await? .timestamp .to_offset(tz) .year(); let current_year = OffsetDateTime::now_utc().to_offset(tz).year(); // All the years we need to get data for // The upper bound is exclusive, so include current +1 let years = oldest..current_year + 1; Box::new(years.map(|year| { let start = Date::from_calendar_date(year, time::Month::January, 1)?; let end = Date::from_calendar_date(year + 1, time::Month::January, 1)?; Ok((year as u64, start..end)) })) } TimePeriod::Month { year } => { let months = std::iter::successors(Some(Month::January), |m| Some(m.next())).take(12); Box::new(months.map(move |month| { let start = Date::from_calendar_date(year, month, 1)?; let days = start.month().length(year); let end = start + Duration::days(days as i64); Ok((month as u64, start..end)) })) } TimePeriod::Day { year, month } => { let days = 1..month.length(year); Box::new(days.map(move |day| { let start = Date::from_calendar_date(year, month, day)?; let end = start .next_day() .ok_or_else(|| DbError::Other(eyre::eyre!("no next day?")))?; Ok((day as u64, start..end)) })) } }; for x in iter { let (index, range) = x?; let start = range.start.with_time(Time::MIDNIGHT).assume_offset(tz); let end = range.end.with_time(Time::MIDNIGHT).assume_offset(tz); let count = self.count_history_range(user, start..end).await?; ret.insert( index, TimePeriodInfo { count: count as u64, hash: "".to_string(), }, ); } Ok(ret) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-server-database/src/calendar.rs
crates/atuin-server-database/src/calendar.rs
// Calendar data use serde::{Deserialize, Serialize}; use time::Month; pub enum TimePeriod { Year, Month { year: i32 }, Day { year: i32, month: Month }, } #[derive(Debug, Serialize, Deserialize)] pub struct TimePeriodInfo { pub count: u64, // TODO: Use this for merkle tree magic pub hash: String, }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-server-database/src/models.rs
crates/atuin-server-database/src/models.rs
use time::OffsetDateTime; pub struct History { pub id: i64, pub client_id: String, // a client generated ID pub user_id: i64, pub hostname: String, pub timestamp: OffsetDateTime, /// All the data we have about this command, encrypted. /// /// Currently this is an encrypted msgpack object, but this may change in the future. pub data: String, pub created_at: OffsetDateTime, } pub struct NewHistory { pub client_id: String, pub user_id: i64, pub hostname: String, pub timestamp: OffsetDateTime, /// All the data we have about this command, encrypted. /// /// Currently this is an encrypted msgpack object, but this may change in the future. pub data: String, } pub struct User { pub id: i64, pub username: String, pub email: String, pub password: String, pub verified: Option<OffsetDateTime>, } pub struct Session { pub id: i64, pub user_id: i64, pub token: String, } pub struct NewUser { pub username: String, pub email: String, pub password: String, } pub struct NewSession { pub user_id: i64, pub token: String, }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-server-postgres/build.rs
crates/atuin-server-postgres/build.rs
// generated by `sqlx migrate build-script` fn main() { // trigger recompilation when a new migration is added println!("cargo:rerun-if-changed=migrations"); }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-server-postgres/src/wrappers.rs
crates/atuin-server-postgres/src/wrappers.rs
use ::sqlx::{FromRow, Result}; use atuin_common::record::{EncryptedData, Host, Record}; use atuin_server_database::models::{History, Session, User}; use sqlx::{Row, postgres::PgRow}; use time::PrimitiveDateTime; pub struct DbUser(pub User); pub struct DbSession(pub Session); pub struct DbHistory(pub History); pub struct DbRecord(pub Record<EncryptedData>); impl<'a> FromRow<'a, PgRow> for DbUser { fn from_row(row: &'a PgRow) -> Result<Self> { Ok(Self(User { id: row.try_get("id")?, username: row.try_get("username")?, email: row.try_get("email")?, password: row.try_get("password")?, verified: row.try_get("verified_at")?, })) } } impl<'a> ::sqlx::FromRow<'a, PgRow> for DbSession { fn from_row(row: &'a PgRow) -> ::sqlx::Result<Self> { Ok(Self(Session { id: row.try_get("id")?, user_id: row.try_get("user_id")?, token: row.try_get("token")?, })) } } impl<'a> ::sqlx::FromRow<'a, PgRow> for DbHistory { fn from_row(row: &'a PgRow) -> ::sqlx::Result<Self> { Ok(Self(History { id: row.try_get("id")?, client_id: row.try_get("client_id")?, user_id: row.try_get("user_id")?, hostname: row.try_get("hostname")?, timestamp: row .try_get::<PrimitiveDateTime, _>("timestamp")? .assume_utc(), data: row.try_get("data")?, created_at: row .try_get::<PrimitiveDateTime, _>("created_at")? .assume_utc(), })) } } impl<'a> ::sqlx::FromRow<'a, PgRow> for DbRecord { fn from_row(row: &'a PgRow) -> ::sqlx::Result<Self> { let timestamp: i64 = row.try_get("timestamp")?; let idx: i64 = row.try_get("idx")?; let data = EncryptedData { data: row.try_get("data")?, content_encryption_key: row.try_get("cek")?, }; Ok(Self(Record { id: row.try_get("client_id")?, host: Host::new(row.try_get("host")?), idx: idx as u64, timestamp: timestamp as u64, version: row.try_get("version")?, tag: row.try_get("tag")?, data, })) } } impl From<DbRecord> for Record<EncryptedData> { fn from(other: DbRecord) -> Record<EncryptedData> { Record { ..other.0 } } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-server-postgres/src/lib.rs
crates/atuin-server-postgres/src/lib.rs
use std::collections::HashMap; use std::ops::Range; use rand::Rng; use async_trait::async_trait; use atuin_common::record::{EncryptedData, HostId, Record, RecordIdx, RecordStatus}; use atuin_common::utils::crypto_random_string; use atuin_server_database::models::{History, NewHistory, NewSession, NewUser, Session, User}; use atuin_server_database::{Database, DbError, DbResult, DbSettings}; use futures_util::TryStreamExt; use sqlx::Row; use sqlx::postgres::PgPoolOptions; use time::{OffsetDateTime, PrimitiveDateTime, UtcOffset}; use tracing::{instrument, trace}; use uuid::Uuid; use wrappers::{DbHistory, DbRecord, DbSession, DbUser}; mod wrappers; const MIN_PG_VERSION: u32 = 14; #[derive(Clone)] pub struct Postgres { pool: sqlx::Pool<sqlx::postgres::Postgres>, /// Optional read replica pool for read-only queries read_pool: Option<sqlx::Pool<sqlx::postgres::Postgres>>, } impl Postgres { /// Returns the appropriate pool for read operations. /// Uses read_pool if available, otherwise falls back to the primary pool. fn read_pool(&self) -> &sqlx::Pool<sqlx::postgres::Postgres> { self.read_pool.as_ref().unwrap_or(&self.pool) } } fn fix_error(error: sqlx::Error) -> DbError { match error { sqlx::Error::RowNotFound => DbError::NotFound, error => DbError::Other(error.into()), } } #[async_trait] impl Database for Postgres { async fn new(settings: &DbSettings) -> DbResult<Self> { let pool = PgPoolOptions::new() .max_connections(100) .connect(settings.db_uri.as_str()) .await .map_err(fix_error)?; // Call server_version_num to get the DB server's major version number // The call returns None for servers older than 8.x. let pg_major_version: u32 = pool .acquire() .await .map_err(fix_error)? .server_version_num() .ok_or(DbError::Other(eyre::Report::msg( "could not get PostgreSQL version", )))? / 10000; if pg_major_version < MIN_PG_VERSION { return Err(DbError::Other(eyre::Report::msg(format!( "unsupported PostgreSQL version {pg_major_version}, minimum required is {MIN_PG_VERSION}" )))); } sqlx::migrate!("./migrations") .run(&pool) .await .map_err(|error| DbError::Other(error.into()))?; // Create read replica pool if configured let read_pool = if let Some(read_db_uri) = &settings.read_db_uri { tracing::info!("Connecting to read replica database"); let read_pool = PgPoolOptions::new() .max_connections(100) .connect(read_db_uri.as_str()) .await .map_err(fix_error)?; // Verify the read replica is also a supported PostgreSQL version let read_pg_major_version: u32 = read_pool .acquire() .await .map_err(fix_error)? .server_version_num() .ok_or(DbError::Other(eyre::Report::msg( "could not get PostgreSQL version from read replica", )))? / 10000; if read_pg_major_version < MIN_PG_VERSION { return Err(DbError::Other(eyre::Report::msg(format!( "unsupported PostgreSQL version {read_pg_major_version} on read replica, minimum required is {MIN_PG_VERSION}" )))); } Some(read_pool) } else { None }; Ok(Self { pool, read_pool }) } #[instrument(skip_all)] async fn get_session(&self, token: &str) -> DbResult<Session> { sqlx::query_as("select id, user_id, token from sessions where token = $1") .bind(token) .fetch_one(self.read_pool()) .await .map_err(fix_error) .map(|DbSession(session)| session) } #[instrument(skip_all)] async fn get_user(&self, username: &str) -> DbResult<User> { sqlx::query_as( "select id, username, email, password, verified_at from users where username = $1", ) .bind(username) .fetch_one(self.read_pool()) .await .map_err(fix_error) .map(|DbUser(user)| user) } #[instrument(skip_all)] async fn user_verified(&self, id: i64) -> DbResult<bool> { let res: (bool,) = sqlx::query_as("select verified_at is not null from users where id = $1") .bind(id) .fetch_one(self.read_pool()) .await .map_err(fix_error)?; Ok(res.0) } #[instrument(skip_all)] async fn verify_user(&self, id: i64) -> DbResult<()> { sqlx::query( "update users set verified_at = (current_timestamp at time zone 'utc') where id=$1", ) .bind(id) .execute(&self.pool) .await .map_err(fix_error)?; Ok(()) } /// Return a valid verification token for the user /// If the user does not have any token, create one, insert it, and return /// If the user has a token, but it's invalid, delete it, create a new one, return /// If the user already has a valid token, return it #[instrument(skip_all)] async fn user_verification_token(&self, id: i64) -> DbResult<String> { const TOKEN_VALID_MINUTES: i64 = 15; // First we check if there is a verification token let token: Option<(String, sqlx::types::time::OffsetDateTime)> = sqlx::query_as( "select token, valid_until from user_verification_token where user_id = $1", ) .bind(id) .fetch_optional(&self.pool) .await .map_err(fix_error)?; let token = if let Some((token, valid_until)) = token { trace!("Token for user {id} valid until {valid_until}"); // We have a token, AND it's still valid if valid_until > time::OffsetDateTime::now_utc() { token } else { // token has expired. generate a new one, return it let token = crypto_random_string::<24>(); sqlx::query("update user_verification_token set token = $2, valid_until = $3 where user_id=$1") .bind(id) .bind(&token) .bind(time::OffsetDateTime::now_utc() + time::Duration::minutes(TOKEN_VALID_MINUTES)) .execute(&self.pool) .await .map_err(fix_error)?; token } } else { // No token in the database! Generate one, insert it let token = crypto_random_string::<24>(); sqlx::query("insert into user_verification_token (user_id, token, valid_until) values ($1, $2, $3)") .bind(id) .bind(&token) .bind(time::OffsetDateTime::now_utc() + time::Duration::minutes(TOKEN_VALID_MINUTES)) .execute(&self.pool) .await .map_err(fix_error)?; token }; Ok(token) } #[instrument(skip_all)] async fn get_session_user(&self, token: &str) -> DbResult<User> { sqlx::query_as( "select users.id, users.username, users.email, users.password, users.verified_at from users inner join sessions on users.id = sessions.user_id and sessions.token = $1", ) .bind(token) .fetch_one(self.read_pool()) .await .map_err(fix_error) .map(|DbUser(user)| user) } #[instrument(skip_all)] async fn count_history(&self, user: &User) -> DbResult<i64> { // The cache is new, and the user might not yet have a cache value. // They will have one as soon as they post up some new history, but handle that // edge case. let res: (i64,) = sqlx::query_as( "select count(1) from history where user_id = $1", ) .bind(user.id) .fetch_one(self.read_pool()) .await .map_err(fix_error)?; Ok(res.0) } #[instrument(skip_all)] async fn total_history(&self) -> DbResult<i64> { // The cache is new, and the user might not yet have a cache value. // They will have one as soon as they post up some new history, but handle that // edge case. let res: (i64,) = sqlx::query_as("select sum(total) from total_history_count_user") .fetch_optional(self.read_pool()) .await .map_err(fix_error)? .unwrap_or((0,)); Ok(res.0) } #[instrument(skip_all)] async fn count_history_cached(&self, user: &User) -> DbResult<i64> { let res: (i32,) = sqlx::query_as( "select total from total_history_count_user where user_id = $1", ) .bind(user.id) .fetch_one(self.read_pool()) .await .map_err(fix_error)?; Ok(res.0 as i64) } async fn delete_store(&self, user: &User) -> DbResult<()> { let mut tx = self.pool.begin().await.map_err(fix_error)?; sqlx::query( "delete from store where user_id = $1", ) .bind(user.id) .execute(&mut *tx) .await .map_err(fix_error)?; sqlx::query( "delete from store_idx_cache where user_id = $1", ) .bind(user.id) .execute(&mut *tx) .await .map_err(fix_error)?; tx.commit().await.map_err(fix_error)?; Ok(()) } async fn delete_history(&self, user: &User, id: String) -> DbResult<()> { sqlx::query( "update history set deleted_at = $3 where user_id = $1 and client_id = $2 and deleted_at is null", // don't just keep setting it ) .bind(user.id) .bind(id) .bind(OffsetDateTime::now_utc()) .fetch_all(&self.pool) .await .map_err(fix_error)?; Ok(()) } #[instrument(skip_all)] async fn deleted_history(&self, user: &User) -> DbResult<Vec<String>> { // The cache is new, and the user might not yet have a cache value. // They will have one as soon as they post up some new history, but handle that // edge case. let res = sqlx::query( "select client_id from history where user_id = $1 and deleted_at is not null", ) .bind(user.id) .fetch_all(self.read_pool()) .await .map_err(fix_error)?; let res = res .iter() .map(|row| row.get::<String, _>("client_id")) .collect(); Ok(res) } #[instrument(skip_all)] async fn count_history_range( &self, user: &User, range: Range<OffsetDateTime>, ) -> DbResult<i64> { let res: (i64,) = sqlx::query_as( "select count(1) from history where user_id = $1 and timestamp >= $2::date and timestamp < $3::date", ) .bind(user.id) .bind(into_utc(range.start)) .bind(into_utc(range.end)) .fetch_one(self.read_pool()) .await .map_err(fix_error)?; Ok(res.0) } #[instrument(skip_all)] async fn list_history( &self, user: &User, created_after: OffsetDateTime, since: OffsetDateTime, host: &str, page_size: i64, ) -> DbResult<Vec<History>> { let res = sqlx::query_as( "select id, client_id, user_id, hostname, timestamp, data, created_at from history where user_id = $1 and hostname != $2 and created_at >= $3 and timestamp >= $4 order by timestamp asc limit $5", ) .bind(user.id) .bind(host) .bind(into_utc(created_after)) .bind(into_utc(since)) .bind(page_size) .fetch(self.read_pool()) .map_ok(|DbHistory(h)| h) .try_collect() .await .map_err(fix_error)?; Ok(res) } #[instrument(skip_all)] async fn add_history(&self, history: &[NewHistory]) -> DbResult<()> { let mut tx = self.pool.begin().await.map_err(fix_error)?; for i in history { let client_id: &str = &i.client_id; let hostname: &str = &i.hostname; let data: &str = &i.data; sqlx::query( "insert into history (client_id, user_id, hostname, timestamp, data) values ($1, $2, $3, $4, $5) on conflict do nothing ", ) .bind(client_id) .bind(i.user_id) .bind(hostname) .bind(i.timestamp) .bind(data) .execute(&mut *tx) .await .map_err(fix_error)?; } tx.commit().await.map_err(fix_error)?; Ok(()) } #[instrument(skip_all)] async fn delete_user(&self, u: &User) -> DbResult<()> { sqlx::query("delete from sessions where user_id = $1") .bind(u.id) .execute(&self.pool) .await .map_err(fix_error)?; sqlx::query("delete from history where user_id = $1") .bind(u.id) .execute(&self.pool) .await .map_err(fix_error)?; sqlx::query("delete from store where user_id = $1") .bind(u.id) .execute(&self.pool) .await .map_err(fix_error)?; sqlx::query("delete from user_verification_token where user_id = $1") .bind(u.id) .execute(&self.pool) .await .map_err(fix_error)?; sqlx::query("delete from total_history_count_user where user_id = $1") .bind(u.id) .execute(&self.pool) .await .map_err(fix_error)?; sqlx::query("delete from users where id = $1") .bind(u.id) .execute(&self.pool) .await .map_err(fix_error)?; Ok(()) } #[instrument(skip_all)] async fn update_user_password(&self, user: &User) -> DbResult<()> { sqlx::query( "update users set password = $1 where id = $2", ) .bind(&user.password) .bind(user.id) .execute(&self.pool) .await .map_err(fix_error)?; Ok(()) } #[instrument(skip_all)] async fn add_user(&self, user: &NewUser) -> DbResult<i64> { let email: &str = &user.email; let username: &str = &user.username; let password: &str = &user.password; let res: (i64,) = sqlx::query_as( "insert into users (username, email, password) values($1, $2, $3) returning id", ) .bind(username) .bind(email) .bind(password) .fetch_one(&self.pool) .await .map_err(fix_error)?; Ok(res.0) } #[instrument(skip_all)] async fn add_session(&self, session: &NewSession) -> DbResult<()> { let token: &str = &session.token; sqlx::query( "insert into sessions (user_id, token) values($1, $2)", ) .bind(session.user_id) .bind(token) .execute(&self.pool) .await .map_err(fix_error)?; Ok(()) } #[instrument(skip_all)] async fn get_user_session(&self, u: &User) -> DbResult<Session> { sqlx::query_as("select id, user_id, token from sessions where user_id = $1") .bind(u.id) .fetch_one(self.read_pool()) .await .map_err(fix_error) .map(|DbSession(session)| session) } #[instrument(skip_all)] async fn oldest_history(&self, user: &User) -> DbResult<History> { sqlx::query_as( "select id, client_id, user_id, hostname, timestamp, data, created_at from history where user_id = $1 order by timestamp asc limit 1", ) .bind(user.id) .fetch_one(self.read_pool()) .await .map_err(fix_error) .map(|DbHistory(h)| h) } #[instrument(skip_all)] async fn add_records(&self, user: &User, records: &[Record<EncryptedData>]) -> DbResult<()> { let mut tx = self.pool.begin().await.map_err(fix_error)?; // We won't have uploaded this data if it wasn't the max. Therefore, we can deduce the max // idx without having to make further database queries. Doing the query on this small // amount of data should be much, much faster. // // Worst case, say we get this wrong. We end up caching data that isn't actually the max // idx, so clients upload again. The cache logic can be verified with a sql query anyway :) let mut heads = HashMap::<(HostId, &str), u64>::new(); for i in records { let id = atuin_common::utils::uuid_v7(); let result = sqlx::query( "insert into store (id, client_id, host, idx, timestamp, version, tag, data, cek, user_id) values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) on conflict do nothing ", ) .bind(id) .bind(i.id) .bind(i.host.id) .bind(i.idx as i64) .bind(i.timestamp as i64) // throwing away some data, but i64 is still big in terms of time .bind(&i.version) .bind(&i.tag) .bind(&i.data.data) .bind(&i.data.content_encryption_key) .bind(user.id) .execute(&mut *tx) .await .map_err(fix_error)?; // Only update heads if we actually inserted the record if result.rows_affected() > 0 { heads .entry((i.host.id, &i.tag)) .and_modify(|e| { if i.idx > *e { *e = i.idx } }) .or_insert(i.idx); } } // we've built the map of heads for this push, so commit it to the database for ((host, tag), idx) in heads { sqlx::query( "insert into store_idx_cache (user_id, host, tag, idx) values ($1, $2, $3, $4) on conflict(user_id, host, tag) do update set idx = greatest(store_idx_cache.idx, $4) ", ) .bind(user.id) .bind(host) .bind(tag) .bind(idx as i64) .execute(&mut *tx) .await .map_err(fix_error)?; } tx.commit().await.map_err(fix_error)?; Ok(()) } #[instrument(skip_all)] async fn next_records( &self, user: &User, host: HostId, tag: String, start: Option<RecordIdx>, count: u64, ) -> DbResult<Vec<Record<EncryptedData>>> { tracing::debug!("{:?} - {:?} - {:?}", host, tag, start); let start = start.unwrap_or(0); let records: Result<Vec<DbRecord>, DbError> = sqlx::query_as( "select client_id, host, idx, timestamp, version, tag, data, cek from store where user_id = $1 and tag = $2 and host = $3 and idx >= $4 order by idx asc limit $5", ) .bind(user.id) .bind(tag.clone()) .bind(host) .bind(start as i64) .bind(count as i64) .fetch_all(self.read_pool()) .await .map_err(fix_error); let ret = match records { Ok(records) => { let records: Vec<Record<EncryptedData>> = records .into_iter() .map(|f| { let record: Record<EncryptedData> = f.into(); record }) .collect(); records } Err(DbError::NotFound) => { tracing::debug!("no records found in store: {:?}/{}", host, tag); return Ok(vec![]); } Err(e) => return Err(e), }; Ok(ret) } async fn status(&self, user: &User) -> DbResult<RecordStatus> { const STATUS_SQL: &str = "select host, tag, max(idx) from store where user_id = $1 group by host, tag"; // If IDX_CACHE_ROLLOUT is set, then we // 1. Read the value of the var, use it as a % chance of using the cache // 2. If we use the cache, just read from the cache table // 3. If we don't use the cache, read from the store table // IDX_CACHE_ROLLOUT should be between 0 and 100. let idx_cache_rollout = std::env::var("IDX_CACHE_ROLLOUT").unwrap_or("0".to_string()); let idx_cache_rollout = idx_cache_rollout.parse::<f64>().unwrap_or(0.0); let use_idx_cache = rand::thread_rng().gen_bool(idx_cache_rollout / 100.0); let mut res: Vec<(Uuid, String, i64)> = if use_idx_cache { tracing::debug!("using idx cache for user {}", user.id); sqlx::query_as("select host, tag, idx from store_idx_cache where user_id = $1") .bind(user.id) .fetch_all(self.read_pool()) .await .map_err(fix_error)? } else { tracing::debug!("using aggregate query for user {}", user.id); sqlx::query_as(STATUS_SQL) .bind(user.id) .fetch_all(self.read_pool()) .await .map_err(fix_error)? }; res.sort(); let mut status = RecordStatus::new(); for i in res.iter() { status.set_raw(HostId(i.0), i.1.clone(), i.2 as u64); } Ok(status) } } fn into_utc(x: OffsetDateTime) -> PrimitiveDateTime { let x = x.to_offset(UtcOffset::UTC); PrimitiveDateTime::new(x.date(), x.time()) } #[cfg(test)] mod tests { use time::macros::datetime; use crate::into_utc; #[test] fn utc() { let dt = datetime!(2023-09-26 15:11:02 +05:30); assert_eq!(into_utc(dt), datetime!(2023-09-26 09:41:02)); assert_eq!(into_utc(dt).assume_utc(), dt); let dt = datetime!(2023-09-26 15:11:02 -07:00); assert_eq!(into_utc(dt), datetime!(2023-09-26 22:11:02)); assert_eq!(into_utc(dt).assume_utc(), dt); let dt = datetime!(2023-09-26 15:11:02 +00:00); assert_eq!(into_utc(dt), datetime!(2023-09-26 15:11:02)); assert_eq!(into_utc(dt).assume_utc(), dt); } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-dotfiles/src/lib.rs
crates/atuin-dotfiles/src/lib.rs
pub mod shell; pub mod store;
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-dotfiles/src/store.rs
crates/atuin-dotfiles/src/store.rs
use std::collections::BTreeMap; use atuin_client::record::sqlite_store::SqliteStore; // Sync aliases // This will be noticeable similar to the kv store, though I expect the two shall diverge // While we will support a range of shell config, I'd rather have a larger number of small records // + stores, rather than one mega config store. use atuin_common::record::{DecryptedData, Host, HostId}; use atuin_common::utils::unquote; use eyre::{Result, bail, ensure, eyre}; use atuin_client::record::encryption::PASETO_V4; use atuin_client::record::store::Store; use crate::shell::Alias; const CONFIG_SHELL_ALIAS_VERSION: &str = "v0"; const CONFIG_SHELL_ALIAS_TAG: &str = "config-shell-alias"; const CONFIG_SHELL_ALIAS_FIELD_MAX_LEN: usize = 20000; // 20kb max total len, way more than should be needed. mod alias; pub mod var; #[derive(Debug, Clone, PartialEq, Eq)] pub enum AliasRecord { Create(Alias), // create a full record Delete(String), // delete by name } impl AliasRecord { pub fn serialize(&self) -> Result<DecryptedData> { use rmp::encode; let mut output = vec![]; match self { AliasRecord::Create(alias) => { encode::write_u8(&mut output, 0)?; // create encode::write_array_len(&mut output, 2)?; // 2 fields encode::write_str(&mut output, alias.name.as_str())?; encode::write_str(&mut output, alias.value.as_str())?; } AliasRecord::Delete(name) => { encode::write_u8(&mut output, 1)?; // delete encode::write_array_len(&mut output, 1)?; // 1 field encode::write_str(&mut output, name.as_str())?; } } Ok(DecryptedData(output)) } pub fn deserialize(data: &DecryptedData, version: &str) -> Result<Self> { use rmp::decode; fn error_report<E: std::fmt::Debug>(err: E) -> eyre::Report { eyre!("{err:?}") } match version { CONFIG_SHELL_ALIAS_VERSION => { let mut bytes = decode::Bytes::new(&data.0); let record_type = decode::read_u8(&mut bytes).map_err(error_report)?; match record_type { // create 0 => { let nfields = decode::read_array_len(&mut bytes).map_err(error_report)?; ensure!( nfields == 2, "too many entries in v0 shell alias create record" ); let bytes = bytes.remaining_slice(); let (key, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; let (value, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; if !bytes.is_empty() { bail!("trailing bytes in encoded shell alias record. malformed") } Ok(AliasRecord::Create(Alias { name: key.to_owned(), value: value.to_owned(), })) } // delete 1 => { let nfields = decode::read_array_len(&mut bytes).map_err(error_report)?; ensure!( nfields == 1, "too many entries in v0 shell alias delete record" ); let bytes = bytes.remaining_slice(); let (key, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; if !bytes.is_empty() { bail!("trailing bytes in encoded shell alias record. malformed") } Ok(AliasRecord::Delete(key.to_owned())) } n => { bail!("unknown AliasRecord type {n}") } } } _ => { bail!("unknown version {version:?}") } } } } #[derive(Debug, Clone)] pub struct AliasStore { pub store: SqliteStore, pub host_id: HostId, pub encryption_key: [u8; 32], } impl AliasStore { // will want to init the actual kv store when that is done pub fn new(store: SqliteStore, host_id: HostId, encryption_key: [u8; 32]) -> AliasStore { AliasStore { store, host_id, encryption_key, } } pub async fn posix(&self) -> Result<String> { let aliases = self.aliases().await?; Ok(Self::format_posix(&aliases)) } pub async fn xonsh(&self) -> Result<String> { let aliases = self.aliases().await?; Ok(Self::format_xonsh(&aliases)) } pub async fn powershell(&self) -> Result<String> { let aliases = self.aliases().await?; Ok(Self::format_powershell(&aliases)) } fn format_posix(aliases: &[Alias]) -> String { let mut config = String::new(); for alias in aliases { // If it's quoted, remove the quotes. If it's not quoted, do nothing. let value = unquote(alias.value.as_str()).unwrap_or(alias.value.clone()); // we're about to quote it ourselves anyway! config.push_str(&format!("alias {}='{}'\n", alias.name, value)); } config } fn format_xonsh(aliases: &[Alias]) -> String { let mut config = String::new(); for alias in aliases { config.push_str(&format!("aliases['{}'] ='{}'\n", alias.name, alias.value)); } config } fn format_powershell(aliases: &[Alias]) -> String { let mut config = String::new(); for alias in aliases { config.push_str(&crate::shell::powershell::format_alias(alias)); } config } pub async fn build(&self) -> Result<()> { let dir = atuin_common::utils::dotfiles_cache_dir(); tokio::fs::create_dir_all(dir.clone()).await?; let aliases = self.aliases().await?; // Build for all supported shells let posix = Self::format_posix(&aliases); let xonsh = Self::format_xonsh(&aliases); let powershell = Self::format_powershell(&aliases); // All the same contents, maybe optimize in the future or perhaps there will be quirks // per-shell // I'd prefer separation atm let zsh = dir.join("aliases.zsh"); let bash = dir.join("aliases.bash"); let fish = dir.join("aliases.fish"); let xsh = dir.join("aliases.xsh"); let ps1 = dir.join("aliases.ps1"); tokio::fs::write(zsh, &posix).await?; tokio::fs::write(bash, &posix).await?; tokio::fs::write(fish, &posix).await?; tokio::fs::write(xsh, &xonsh).await?; tokio::fs::write(ps1, &powershell).await?; Ok(()) } pub async fn set(&self, name: &str, value: &str) -> Result<()> { if name.len() + value.len() > CONFIG_SHELL_ALIAS_FIELD_MAX_LEN { return Err(eyre!( "alias record too large: max len {} bytes", CONFIG_SHELL_ALIAS_FIELD_MAX_LEN )); } let record = AliasRecord::Create(Alias { name: name.to_string(), value: value.to_string(), }); let bytes = record.serialize()?; let idx = self .store .last(self.host_id, CONFIG_SHELL_ALIAS_TAG) .await? .map_or(0, |entry| entry.idx + 1); let record = atuin_common::record::Record::builder() .host(Host::new(self.host_id)) .version(CONFIG_SHELL_ALIAS_VERSION.to_string()) .tag(CONFIG_SHELL_ALIAS_TAG.to_string()) .idx(idx) .data(bytes) .build(); self.store .push(&record.encrypt::<PASETO_V4>(&self.encryption_key)) .await?; // set mutates shell config, so build again self.build().await?; Ok(()) } pub async fn delete(&self, name: &str) -> Result<()> { if name.len() > CONFIG_SHELL_ALIAS_FIELD_MAX_LEN { return Err(eyre!( "alias record too large: max len {} bytes", CONFIG_SHELL_ALIAS_FIELD_MAX_LEN )); } let record = AliasRecord::Delete(name.to_string()); let bytes = record.serialize()?; let idx = self .store .last(self.host_id, CONFIG_SHELL_ALIAS_TAG) .await? .map_or(0, |entry| entry.idx + 1); let record = atuin_common::record::Record::builder() .host(Host::new(self.host_id)) .version(CONFIG_SHELL_ALIAS_VERSION.to_string()) .tag(CONFIG_SHELL_ALIAS_TAG.to_string()) .idx(idx) .data(bytes) .build(); self.store .push(&record.encrypt::<PASETO_V4>(&self.encryption_key)) .await?; // delete mutates shell config, so build again self.build().await?; Ok(()) } pub async fn aliases(&self) -> Result<Vec<Alias>> { let mut build = BTreeMap::new(); // this is sorted, oldest to newest let tagged = self.store.all_tagged(CONFIG_SHELL_ALIAS_TAG).await?; for record in tagged { let version = record.version.clone(); let decrypted = match version.as_str() { CONFIG_SHELL_ALIAS_VERSION => record.decrypt::<PASETO_V4>(&self.encryption_key)?, version => bail!("unknown version {version:?}"), }; let ar = AliasRecord::deserialize(&decrypted.data, version.as_str())?; match ar { AliasRecord::Create(a) => { build.insert(a.name.clone(), a); } AliasRecord::Delete(d) => { build.remove(&d); } } } Ok(build.into_values().collect()) } } #[cfg(test)] pub(crate) fn test_local_timeout() -> f64 { std::env::var("ATUIN_TEST_LOCAL_TIMEOUT") .ok() .and_then(|x| x.parse().ok()) // this hardcoded value should be replaced by a simple way to get the // default local_timeout of Settings if possible .unwrap_or(2.0) } #[cfg(test)] mod tests { use rand::rngs::OsRng; use atuin_client::record::sqlite_store::SqliteStore; use crate::shell::Alias; use super::{AliasRecord, AliasStore, CONFIG_SHELL_ALIAS_VERSION, test_local_timeout}; use crypto_secretbox::{KeyInit, XSalsa20Poly1305}; #[test] fn encode_decode() { let record = Alias { name: "k".to_owned(), value: "kubectl".to_owned(), }; let record = AliasRecord::Create(record); let snapshot = [204, 0, 146, 161, 107, 167, 107, 117, 98, 101, 99, 116, 108]; let encoded = record.serialize().unwrap(); let decoded = AliasRecord::deserialize(&encoded, CONFIG_SHELL_ALIAS_VERSION).unwrap(); assert_eq!(encoded.0, &snapshot); assert_eq!(decoded, record); } #[tokio::test] async fn build_aliases() { let store = SqliteStore::new(":memory:", test_local_timeout()) .await .unwrap(); let key: [u8; 32] = XSalsa20Poly1305::generate_key(&mut OsRng).into(); let host_id = atuin_common::record::HostId(atuin_common::utils::uuid_v7()); let alias = AliasStore::new(store, host_id, key); alias.set("k", "kubectl").await.unwrap(); alias.set("gp", "git push").await.unwrap(); alias .set("kgap", "'kubectl get pods --all-namespaces'") .await .unwrap(); let mut aliases = alias.aliases().await.unwrap(); aliases.sort_by_key(|a| a.name.clone()); assert_eq!(aliases.len(), 3); assert_eq!( aliases[0], Alias { name: String::from("gp"), value: String::from("git push") } ); assert_eq!( aliases[1], Alias { name: String::from("k"), value: String::from("kubectl") } ); assert_eq!( aliases[2], Alias { name: String::from("kgap"), value: String::from("'kubectl get pods --all-namespaces'") } ); let build = alias.posix().await.expect("failed to build aliases"); assert_eq!( build, "alias gp='git push' alias k='kubectl' alias kgap='kubectl get pods --all-namespaces' " ) } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-dotfiles/src/shell.rs
crates/atuin-dotfiles/src/shell.rs
use eyre::{Result, ensure, eyre}; use rmp::{decode, encode}; use serde::Serialize; use atuin_common::shell::{Shell, ShellError}; use crate::store::AliasStore; pub mod bash; pub mod fish; pub mod powershell; pub mod xonsh; pub mod zsh; #[derive(Debug, Clone, PartialEq, Eq, Serialize)] pub struct Alias { pub name: String, pub value: String, } #[derive(Debug, Clone, PartialEq, Eq, Serialize)] pub struct Var { pub name: String, pub value: String, // False? This is a _shell var_ // True? This is an _env var_ pub export: bool, } impl Var { /// Serialize into the given vec /// This is intended to be called by the store pub fn serialize(&self, output: &mut Vec<u8>) -> Result<()> { encode::write_array_len(output, 3)?; // 3 fields encode::write_str(output, self.name.as_str())?; encode::write_str(output, self.value.as_str())?; encode::write_bool(output, self.export)?; Ok(()) } pub fn deserialize(bytes: &mut decode::Bytes) -> Result<Self> { fn error_report<E: std::fmt::Debug>(err: E) -> eyre::Report { eyre!("{err:?}") } let nfields = decode::read_array_len(bytes).map_err(error_report)?; ensure!( nfields == 3, "too many entries in v0 dotfiles env create record, got {}, expected {}", nfields, 3 ); let bytes = bytes.remaining_slice(); let (key, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; let (value, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; let mut bytes = decode::Bytes::new(bytes); let export = decode::read_bool(&mut bytes).map_err(error_report)?; ensure!( bytes.remaining_slice().is_empty(), "trailing bytes in encoded dotfiles env record, malformed" ); Ok(Var { name: key.to_owned(), value: value.to_owned(), export, }) } } pub fn parse_alias(line: &str) -> Option<Alias> { // consider the fact we might be importing a fish alias // 'alias' output // fish: alias foo bar // posix: foo=bar let is_fish = line.split(' ').next().unwrap_or("") == "alias"; let parts: Vec<&str> = if is_fish { line.split(' ') .enumerate() .filter_map(|(n, i)| if n == 0 { None } else { Some(i) }) .collect() } else { line.split('=').collect() }; if parts.len() <= 1 { return None; } let mut parts = parts.iter().map(|s| s.to_string()); let name = parts.next().unwrap(); let remaining = if is_fish { parts.collect::<Vec<String>>().join(" ") } else { parts.collect::<Vec<String>>().join("=") }; Some(Alias { name, value: remaining.trim().to_string(), }) } pub fn existing_aliases(shell: Option<Shell>) -> Result<Vec<Alias>, ShellError> { let shell = if let Some(shell) = shell { shell } else { Shell::current() }; // this only supports posix-y shells atm if !shell.is_posixish() { return Err(ShellError::NotSupported); } // This will return a list of aliases, each on its own line // They will be in the form foo=bar let aliases = shell.run_interactive(["alias"])?; let aliases: Vec<Alias> = aliases.lines().filter_map(parse_alias).collect(); Ok(aliases) } /// Import aliases from the current shell /// This will not import aliases already in the store /// Returns aliases that were set pub async fn import_aliases(store: &AliasStore) -> Result<Vec<Alias>> { let shell_aliases = existing_aliases(None)?; let store_aliases = store.aliases().await?; let mut res = Vec::new(); for alias in shell_aliases { // O(n), but n is small, and imports infrequent // can always make a map if store_aliases.contains(&alias) { continue; } res.push(alias.clone()); store.set(&alias.name, &alias.value).await?; } Ok(res) } #[cfg(test)] mod tests { use crate::shell::{Alias, parse_alias}; #[test] fn test_parse_simple_alias() { let alias = super::parse_alias("foo=bar").expect("failed to parse alias"); assert_eq!(alias.name, "foo"); assert_eq!(alias.value, "bar"); } #[test] fn test_parse_quoted_alias() { let alias = super::parse_alias("emacs='TERM=xterm-24bits emacs -nw'") .expect("failed to parse alias"); assert_eq!(alias.name, "emacs"); assert_eq!(alias.value, "'TERM=xterm-24bits emacs -nw'"); let git_alias = super::parse_alias("gwip='git add -A; git rm $(git ls-files --deleted) 2> /dev/null; git commit --no-verify --no-gpg-sign --message \"--wip-- [skip ci]\"'").expect("failed to parse alias"); assert_eq!(git_alias.name, "gwip"); assert_eq!( git_alias.value, "'git add -A; git rm $(git ls-files --deleted) 2> /dev/null; git commit --no-verify --no-gpg-sign --message \"--wip-- [skip ci]\"'" ); } #[test] fn test_parse_quoted_alias_equals() { let alias = super::parse_alias("emacs='TERM=xterm-24bits emacs -nw --foo=bar'") .expect("failed to parse alias"); assert_eq!(alias.name, "emacs"); assert_eq!(alias.value, "'TERM=xterm-24bits emacs -nw --foo=bar'"); } #[test] fn test_parse_fish() { let alias = super::parse_alias("alias foo bar").expect("failed to parse alias"); assert_eq!(alias.name, "foo"); assert_eq!(alias.value, "bar"); let alias = super::parse_alias("alias x 'exa --icons --git --classify --group-directories-first'") .expect("failed to parse alias"); assert_eq!(alias.name, "x"); assert_eq!( alias.value, "'exa --icons --git --classify --group-directories-first'" ); } #[test] fn test_parse_with_fortune() { // Because we run the alias command in an interactive subshell // there may be other output. // Ensure that the parser can handle it // Annoyingly not all aliases are picked up all the time if we use // a non-interactive subshell. Boo. let shell = " / In a consumer society there are \\ | inevitably two kinds of slaves: the | | prisoners of addiction and the | \\ prisoners of envy. / ------------------------------------- \\ ^__^ \\ (oo)\\_______ (__)\\ )\\/\\ ||----w | || || emacs='TERM=xterm-24bits emacs -nw --foo=bar' k=kubectl "; let aliases: Vec<Alias> = shell.lines().filter_map(parse_alias).collect(); assert_eq!(aliases[0].name, "emacs"); assert_eq!(aliases[0].value, "'TERM=xterm-24bits emacs -nw --foo=bar'"); assert_eq!(aliases[1].name, "k"); assert_eq!(aliases[1].value, "kubectl"); } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-dotfiles/src/store/alias.rs
crates/atuin-dotfiles/src/store/alias.rs
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-dotfiles/src/store/var.rs
crates/atuin-dotfiles/src/store/var.rs
/// Store for shell vars /// I should abstract this and reuse code between the alias/env stores /// This is easier for now /// Once I have two implementations, building a common base is much easier. use std::collections::BTreeMap; use atuin_client::record::sqlite_store::SqliteStore; use atuin_common::record::{DecryptedData, Host, HostId}; use eyre::{Result, bail, ensure, eyre}; use atuin_client::record::encryption::PASETO_V4; use atuin_client::record::store::Store; use crate::shell::Var; const DOTFILES_VAR_VERSION: &str = "v0"; const DOTFILES_VAR_TAG: &str = "dotfiles-var"; const DOTFILES_VAR_LEN: usize = 20000; // 20kb max total len, way more than should be needed. #[derive(Debug, Clone, PartialEq, Eq)] pub enum VarRecord { Create(Var), // create a full record Delete(String), // delete by name } impl VarRecord { pub fn serialize(&self) -> Result<DecryptedData> { use rmp::encode; let mut output = vec![]; match self { VarRecord::Create(env) => { encode::write_u8(&mut output, 0)?; // create env.serialize(&mut output)?; } VarRecord::Delete(env) => { encode::write_u8(&mut output, 1)?; // delete encode::write_array_len(&mut output, 1)?; // 1 field encode::write_str(&mut output, env.as_str())?; } } Ok(DecryptedData(output)) } pub fn deserialize(data: &DecryptedData, version: &str) -> Result<Self> { use rmp::decode; fn error_report<E: std::fmt::Debug>(err: E) -> eyre::Report { eyre!("{err:?}") } match version { DOTFILES_VAR_VERSION => { let mut bytes = decode::Bytes::new(&data.0); let record_type = decode::read_u8(&mut bytes).map_err(error_report)?; match record_type { // create 0 => { let env = Var::deserialize(&mut bytes)?; Ok(VarRecord::Create(env)) } // delete 1 => { let nfields = decode::read_array_len(&mut bytes).map_err(error_report)?; ensure!( nfields == 1, "too many entries in v0 dotfiles var delete record" ); let bytes = bytes.remaining_slice(); let (key, bytes) = decode::read_str_from_slice(bytes).map_err(error_report)?; if !bytes.is_empty() { bail!("trailing bytes in encoded dotfiles var record. malformed") } Ok(VarRecord::Delete(key.to_owned())) } n => { bail!("unknown Dotfiles var record type {n}") } } } _ => { bail!("unknown version {version:?}") } } } } #[derive(Debug, Clone)] pub struct VarStore { pub store: SqliteStore, pub host_id: HostId, pub encryption_key: [u8; 32], } impl VarStore { // will want to init the actual kv store when that is done pub fn new(store: SqliteStore, host_id: HostId, encryption_key: [u8; 32]) -> VarStore { VarStore { store, host_id, encryption_key, } } /// Escape a value for use in POSIX shells (bash, zsh) /// This adds double quotes around the value and escapes any embedded double quotes fn escape_posix_value(value: &str) -> String { // If the value contains no special characters, we can use it unquoted if value .chars() .all(|c| c.is_alphanumeric() || c == '_' || c == '-' || c == '/' || c == '.') { value.to_string() } else { // Otherwise, wrap in double quotes and escape any special characters format!( "\"{}\"", value .replace('\\', "\\\\") .replace('"', "\\\"") .replace('$', "\\$") .replace('`', "\\`") ) } } /// Escape a value for use in fish shell /// Fish uses single quotes for literal strings, but we need to handle embedded single quotes fn escape_fish_value(value: &str) -> String { // If the value contains no special characters, we can use it unquoted if value .chars() .all(|c| c.is_alphanumeric() || c == '_' || c == '-' || c == '/' || c == '.') { value.to_string() } else { // Use single quotes and escape any embedded single quotes format!("'{}'", value.replace('\'', "\\'")) } } /// Escape a value for use in xonsh /// Xonsh uses Python-style string literals fn escape_xonsh_value(value: &str) -> String { // If the value contains no special characters, we can use it unquoted if value .chars() .all(|c| c.is_alphanumeric() || c == '_' || c == '-' || c == '/' || c == '.') { value.to_string() } else { // Use double quotes and escape appropriately for Python strings format!("\"{}\"", value.replace('\\', "\\\\").replace('"', "\\\"")) } } pub async fn xonsh(&self) -> Result<String> { let env = self.vars().await?; Ok(Self::format_xonsh(&env)) } pub async fn fish(&self) -> Result<String> { let env = self.vars().await?; Ok(Self::format_fish(&env)) } pub async fn posix(&self) -> Result<String> { let env = self.vars().await?; Ok(Self::format_posix(&env)) } pub async fn powershell(&self) -> Result<String> { let env = self.vars().await?; Ok(Self::format_powershell(&env)) } fn format_xonsh(env: &[Var]) -> String { let mut config = String::new(); for env in env { let escaped_value = Self::escape_xonsh_value(&env.value); config.push_str(&format!("${}={}\n", env.name, escaped_value)); } config } fn format_fish(env: &[Var]) -> String { let mut config = String::new(); for env in env { let escaped_value = Self::escape_fish_value(&env.value); config.push_str(&format!("set -gx {} {}\n", env.name, escaped_value)); } config } fn format_posix(env: &[Var]) -> String { let mut config = String::new(); for env in env { let escaped_value = Self::escape_posix_value(&env.value); if env.export { config.push_str(&format!("export {}={}\n", env.name, escaped_value)); } else { config.push_str(&format!("{}={}\n", env.name, escaped_value)); } } config } fn format_powershell(env: &[Var]) -> String { let mut config = String::new(); for var in env { config.push_str(&crate::shell::powershell::format_var(var)); } config } pub async fn build(&self) -> Result<()> { let dir = atuin_common::utils::dotfiles_cache_dir(); tokio::fs::create_dir_all(dir.clone()).await?; let env = self.vars().await?; // Build for all supported shells let posix = Self::format_posix(&env); let xonsh = Self::format_xonsh(&env); let fsh = Self::format_fish(&env); let powershell = Self::format_powershell(&env); // All the same contents, maybe optimize in the future or perhaps there will be quirks // per-shell // I'd prefer separation atm let zsh = dir.join("vars.zsh"); let bash = dir.join("vars.bash"); let fish = dir.join("vars.fish"); let xsh = dir.join("vars.xsh"); let ps1 = dir.join("vars.ps1"); tokio::fs::write(zsh, &posix).await?; tokio::fs::write(bash, &posix).await?; tokio::fs::write(fish, &fsh).await?; tokio::fs::write(xsh, &xonsh).await?; tokio::fs::write(ps1, &powershell).await?; Ok(()) } pub async fn set(&self, name: &str, value: &str, export: bool) -> Result<()> { if name.len() + value.len() > DOTFILES_VAR_LEN { return Err(eyre!( "var record too large: max len {} bytes", DOTFILES_VAR_LEN )); } let record = VarRecord::Create(Var { name: name.to_string(), value: value.to_string(), export, }); let bytes = record.serialize()?; let idx = self .store .last(self.host_id, DOTFILES_VAR_TAG) .await? .map_or(0, |entry| entry.idx + 1); let record = atuin_common::record::Record::builder() .host(Host::new(self.host_id)) .version(DOTFILES_VAR_VERSION.to_string()) .tag(DOTFILES_VAR_TAG.to_string()) .idx(idx) .data(bytes) .build(); self.store .push(&record.encrypt::<PASETO_V4>(&self.encryption_key)) .await?; // set mutates shell config, so build again self.build().await?; Ok(()) } pub async fn delete(&self, name: &str) -> Result<()> { if name.len() > DOTFILES_VAR_LEN { return Err(eyre!( "var record too large: max len {} bytes", DOTFILES_VAR_LEN, )); } let record = VarRecord::Delete(name.to_string()); let bytes = record.serialize()?; let idx = self .store .last(self.host_id, DOTFILES_VAR_TAG) .await? .map_or(0, |entry| entry.idx + 1); let record = atuin_common::record::Record::builder() .host(Host::new(self.host_id)) .version(DOTFILES_VAR_VERSION.to_string()) .tag(DOTFILES_VAR_TAG.to_string()) .idx(idx) .data(bytes) .build(); self.store .push(&record.encrypt::<PASETO_V4>(&self.encryption_key)) .await?; // delete mutates shell config, so build again self.build().await?; Ok(()) } pub async fn vars(&self) -> Result<Vec<Var>> { let mut build = BTreeMap::new(); // this is sorted, oldest to newest let tagged = self.store.all_tagged(DOTFILES_VAR_TAG).await?; for record in tagged { let version = record.version.clone(); let decrypted = match version.as_str() { DOTFILES_VAR_VERSION => record.decrypt::<PASETO_V4>(&self.encryption_key)?, version => bail!("unknown version {version:?}"), }; let ar = VarRecord::deserialize(&decrypted.data, version.as_str())?; match ar { VarRecord::Create(a) => { build.insert(a.name.clone(), a); } VarRecord::Delete(d) => { build.remove(&d); } } } Ok(build.into_values().collect()) } } #[cfg(test)] mod tests { use rand::rngs::OsRng; use atuin_client::record::sqlite_store::SqliteStore; use crate::{shell::Var, store::test_local_timeout}; use super::{DOTFILES_VAR_VERSION, VarRecord, VarStore}; use crypto_secretbox::{KeyInit, XSalsa20Poly1305}; #[test] fn encode_decode() { let record = Var { name: "BEEP".to_owned(), value: "boop".to_owned(), export: false, }; let record = VarRecord::Create(record); let snapshot = [ 204, 0, 147, 164, 66, 69, 69, 80, 164, 98, 111, 111, 112, 194, ]; let encoded = record.serialize().unwrap(); let decoded = VarRecord::deserialize(&encoded, DOTFILES_VAR_VERSION).unwrap(); assert_eq!(encoded.0, &snapshot); assert_eq!(decoded, record); } #[test] fn test_escape_posix_value() { // Simple values should not be quoted assert_eq!(VarStore::escape_posix_value("simple"), "simple"); assert_eq!(VarStore::escape_posix_value("path/to/file"), "path/to/file"); assert_eq!( VarStore::escape_posix_value("value_with_underscores"), "value_with_underscores" ); // Values with spaces should be quoted assert_eq!( VarStore::escape_posix_value("hello world"), "\"hello world\"" ); assert_eq!(VarStore::escape_posix_value("bar baz"), "\"bar baz\""); // Values with special characters should be quoted and escaped assert_eq!( VarStore::escape_posix_value("say \"hello\""), "\"say \\\"hello\\\"\"" ); assert_eq!( VarStore::escape_posix_value("path\\with\\backslashes"), "\"path\\\\with\\\\backslashes\"" ); assert_eq!( VarStore::escape_posix_value("say $hello"), "\"say \\$hello\"" ); assert_eq!( VarStore::escape_posix_value("see `example.md`"), "\"see \\`example.md\\`\"" ); } #[test] fn test_escape_fish_value() { // Simple values should not be quoted assert_eq!(VarStore::escape_fish_value("simple"), "simple"); assert_eq!(VarStore::escape_fish_value("path/to/file"), "path/to/file"); // Values with spaces should be single-quoted assert_eq!(VarStore::escape_fish_value("hello world"), "'hello world'"); assert_eq!(VarStore::escape_fish_value("bar baz"), "'bar baz'"); // Values with single quotes should be escaped assert_eq!(VarStore::escape_fish_value("don't"), "'don\\'t'"); } #[test] fn test_escape_xonsh_value() { // Simple values should not be quoted assert_eq!(VarStore::escape_xonsh_value("simple"), "simple"); assert_eq!(VarStore::escape_xonsh_value("path/to/file"), "path/to/file"); // Values with spaces should be quoted assert_eq!( VarStore::escape_xonsh_value("hello world"), "\"hello world\"" ); assert_eq!(VarStore::escape_xonsh_value("bar baz"), "\"bar baz\""); // Values with special characters should be quoted and escaped assert_eq!( VarStore::escape_xonsh_value("say \"hello\""), "\"say \\\"hello\\\"\"" ); assert_eq!( VarStore::escape_xonsh_value("path\\with\\backslashes"), "\"path\\\\with\\\\backslashes\"" ); } #[tokio::test] async fn build_vars() { let store = SqliteStore::new(":memory:", test_local_timeout()) .await .unwrap(); let key: [u8; 32] = XSalsa20Poly1305::generate_key(&mut OsRng).into(); let host_id = atuin_common::record::HostId(atuin_common::utils::uuid_v7()); let env = VarStore::new(store, host_id, key); env.set("BEEP", "boop", false).await.unwrap(); env.set("HOMEBREW_NO_AUTO_UPDATE", "1", true).await.unwrap(); let mut env_vars = env.vars().await.unwrap(); env_vars.sort_by_key(|a| a.name.clone()); assert_eq!(env_vars.len(), 2); assert_eq!( env_vars[0], Var { name: String::from("BEEP"), value: String::from("boop"), export: false, } ); assert_eq!( env_vars[1], Var { name: String::from("HOMEBREW_NO_AUTO_UPDATE"), value: String::from("1"), export: true, } ); } #[tokio::test] async fn test_var_generation_with_spaces() { let store = SqliteStore::new(":memory:", test_local_timeout()) .await .unwrap(); let key: [u8; 32] = XSalsa20Poly1305::generate_key(&mut OsRng).into(); let host_id = atuin_common::record::HostId(atuin_common::utils::uuid_v7()); let env = VarStore::new(store, host_id, key); // Test the exact scenario from the bug report env.set("FOO", "bar baz", true).await.unwrap(); let posix_output = env.posix().await.unwrap(); let fish_output = env.fish().await.unwrap(); let xonsh_output = env.xonsh().await.unwrap(); // POSIX should quote the value assert_eq!(posix_output, "export FOO=\"bar baz\"\n"); // Fish should quote the value assert_eq!(fish_output, "set -gx FOO 'bar baz'\n"); // Xonsh should quote the value assert_eq!(xonsh_output, "$FOO=\"bar baz\"\n"); } }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-dotfiles/src/shell/bash.rs
crates/atuin-dotfiles/src/shell/bash.rs
use std::path::PathBuf; use crate::store::{AliasStore, var::VarStore}; async fn cached_aliases(path: PathBuf, store: &AliasStore) -> String { match tokio::fs::read_to_string(path).await { Ok(aliases) => aliases, Err(r) => { // we failed to read the file for some reason, but the file does exist // fallback to generating new aliases on the fly store.posix().await.unwrap_or_else(|e| { format!("echo 'Atuin: failed to read and generate aliases: \n{r}\n{e}'",) }) } } } async fn cached_vars(path: PathBuf, store: &VarStore) -> String { match tokio::fs::read_to_string(path).await { Ok(vars) => vars, Err(r) => { // we failed to read the file for some reason, but the file does exist // fallback to generating new vars on the fly store.posix().await.unwrap_or_else(|e| { format!("echo 'Atuin: failed to read and generate vars: \n{r}\n{e}'",) }) } } } /// Return bash dotfile config /// /// Do not return an error. We should not prevent the shell from starting. /// /// In the worst case, Atuin should not function but the shell should start correctly. /// /// While currently this only returns aliases, it will be extended to also return other synced dotfiles pub async fn alias_config(store: &AliasStore) -> String { // First try to read the cached config let aliases = atuin_common::utils::dotfiles_cache_dir().join("aliases.bash"); if aliases.exists() { return cached_aliases(aliases, store).await; } if let Err(e) = store.build().await { return format!("echo 'Atuin: failed to generate aliases: {e}'"); } cached_aliases(aliases, store).await } pub async fn var_config(store: &VarStore) -> String { // First try to read the cached config let vars = atuin_common::utils::dotfiles_cache_dir().join("vars.bash"); if vars.exists() { return cached_vars(vars, store).await; } if let Err(e) = store.build().await { return format!("echo 'Atuin: failed to generate vars: {e}'"); } cached_vars(vars, store).await }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-dotfiles/src/shell/xonsh.rs
crates/atuin-dotfiles/src/shell/xonsh.rs
use std::path::PathBuf; use crate::store::{AliasStore, var::VarStore}; async fn cached_aliases(path: PathBuf, store: &AliasStore) -> String { match tokio::fs::read_to_string(path).await { Ok(aliases) => aliases, Err(r) => { // we failed to read the file for some reason, but the file does exist // fallback to generating new aliases on the fly store.xonsh().await.unwrap_or_else(|e| { format!("echo 'Atuin: failed to read and generate aliases: \n{r}\n{e}'",) }) } } } async fn cached_vars(path: PathBuf, store: &VarStore) -> String { match tokio::fs::read_to_string(path).await { Ok(vars) => vars, Err(r) => { // we failed to read the file for some reason, but the file does exist // fallback to generating new vars on the fly store.xonsh().await.unwrap_or_else(|e| { format!("echo 'Atuin: failed to read and generate vars: \n{r}\n{e}'",) }) } } } /// Return xonsh dotfile config /// /// Do not return an error. We should not prevent the shell from starting. /// /// In the worst case, Atuin should not function but the shell should start correctly. /// /// While currently this only returns aliases, it will be extended to also return other synced dotfiles pub async fn alias_config(store: &AliasStore) -> String { // First try to read the cached config let aliases = atuin_common::utils::dotfiles_cache_dir().join("aliases.xsh"); if aliases.exists() { return cached_aliases(aliases, store).await; } if let Err(e) = store.build().await { return format!("echo 'Atuin: failed to generate aliases: {e}'"); } cached_aliases(aliases, store).await } pub async fn var_config(store: &VarStore) -> String { // First try to read the cached config let vars = atuin_common::utils::dotfiles_cache_dir().join("vars.xsh"); if vars.exists() { return cached_vars(vars, store).await; } if let Err(e) = store.build().await { return format!("echo 'Atuin: failed to generate vars: {e}'"); } cached_vars(vars, store).await }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false
atuinsh/atuin
https://github.com/atuinsh/atuin/blob/8a010fed33ce19a9ddc589196c73c07ba7ba88e7/crates/atuin-dotfiles/src/shell/zsh.rs
crates/atuin-dotfiles/src/shell/zsh.rs
use std::path::PathBuf; use crate::store::{AliasStore, var::VarStore}; async fn cached_aliases(path: PathBuf, store: &AliasStore) -> String { match tokio::fs::read_to_string(path).await { Ok(aliases) => aliases, Err(r) => { // we failed to read the file for some reason, but the file does exist // fallback to generating new aliases on the fly store.posix().await.unwrap_or_else(|e| { format!("echo 'Atuin: failed to read and generate aliases: \n{r}\n{e}'",) }) } } } async fn cached_vars(path: PathBuf, store: &VarStore) -> String { match tokio::fs::read_to_string(path).await { Ok(aliases) => aliases, Err(r) => { // we failed to read the file for some reason, but the file does exist // fallback to generating new vars on the fly store.posix().await.unwrap_or_else(|e| { format!("echo 'Atuin: failed to read and generate aliases: \n{r}\n{e}'",) }) } } } /// Return zsh dotfile config /// /// Do not return an error. We should not prevent the shell from starting. /// /// In the worst case, Atuin should not function but the shell should start correctly. /// /// While currently this only returns aliases, it will be extended to also return other synced dotfiles pub async fn alias_config(store: &AliasStore) -> String { // First try to read the cached config let aliases = atuin_common::utils::dotfiles_cache_dir().join("aliases.zsh"); if aliases.exists() { return cached_aliases(aliases, store).await; } if let Err(e) = store.build().await { return format!("echo 'Atuin: failed to generate aliases: {e}'"); } cached_aliases(aliases, store).await } pub async fn var_config(store: &VarStore) -> String { // First try to read the cached config let vars = atuin_common::utils::dotfiles_cache_dir().join("vars.zsh"); if vars.exists() { return cached_vars(vars, store).await; } if let Err(e) = store.build().await { return format!("echo 'Atuin: failed to generate aliases: {e}'"); } cached_vars(vars, store).await }
rust
MIT
8a010fed33ce19a9ddc589196c73c07ba7ba88e7
2026-01-04T15:36:14.139439Z
false