repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/task/task.rs | src/task/task.rs | use super::tag::{SyntheticTag, TagInner};
use super::{utc_timestamp, Annotation, Status, Tag, Timestamp};
use crate::depmap::DependencyMap;
use crate::errors::{Error, Result};
use crate::storage::TaskMap;
use crate::{Operations, TaskData};
use chrono::prelude::*;
use log::trace;
use std::convert::AsRef;
use std::convert::TryInto;
use std::str::FromStr;
use std::sync::Arc;
use uuid::Uuid;
/// A task, with a high-level interface.
///
/// Building on [`crate::TaskData`], this type implements the task model, with ergonomic APIs to
/// manipulate tasks without deep familiarity with the [task
/// model](https://gothenburgbitfactory.org/taskchampion/tasks.html#keys).
///
/// Note that Task objects represent a snapshot of the task at a moment in time, and are not
/// protected by the atomicity of the backend storage. Concurrent modifications are safe,
/// but a Task that is cached for more than a few seconds may cause the user to see stale
/// data. Fetch, use, and drop Tasks quickly.
///
/// See the documentation for [`crate::Replica`] for background on the `ops` arguments to methods
/// on this type.
#[derive(Debug, Clone)]
pub struct Task {
// The underlying task data.
data: TaskData,
// The dependency map for this replica, for rapidly computing synthetic tags.
depmap: Arc<DependencyMap>,
// True if an operation has alredy been emitted to update the `modified` property.
updated_modified: bool,
}
impl PartialEq for Task {
fn eq(&self, other: &Task) -> bool {
// compare only the task data; depmap is just present for reference
self.data == other.data
}
}
/// An enum containing all of the key names defined in the data model, with the exception
/// of the properties containing data (`tag_..`, etc.)
#[derive(strum_macros::AsRefStr, strum_macros::EnumString)]
#[strum(serialize_all = "kebab-case")]
enum Prop {
Description,
Due,
Modified,
Start,
Status,
Priority,
Wait,
End,
Entry,
}
#[allow(clippy::ptr_arg)]
fn uda_string_to_tuple(key: &str) -> (&str, &str) {
let mut iter = key.splitn(2, '.');
let first = iter.next().unwrap();
let second = iter.next();
if let Some(second) = second {
(first, second)
} else {
("", first)
}
}
fn uda_tuple_to_string(namespace: impl AsRef<str>, key: impl AsRef<str>) -> String {
let namespace = namespace.as_ref();
let key = key.as_ref();
if namespace.is_empty() {
key.into()
} else {
format!("{namespace}.{key}")
}
}
impl Task {
pub(crate) fn new(data: TaskData, depmap: Arc<DependencyMap>) -> Task {
Task {
data,
depmap,
updated_modified: false,
}
}
/// Convert this Task into a TaskData.
pub fn into_task_data(self) -> TaskData {
self.data
}
/// Get this task's UUID.
pub fn get_uuid(&self) -> Uuid {
self.data.get_uuid()
}
#[deprecated(since = "0.7.0", note = "please use TaskData::properties")]
pub fn get_taskmap(&self) -> &TaskMap {
self.data.get_taskmap()
}
pub fn get_status(&self) -> Status {
self.data
.get(Prop::Status.as_ref())
.map(Status::from_taskmap)
.unwrap_or(Status::Pending)
}
pub fn get_description(&self) -> &str {
self.data.get(Prop::Description.as_ref()).unwrap_or("")
}
pub fn get_entry(&self) -> Option<Timestamp> {
self.get_timestamp(Prop::Entry.as_ref())
}
pub fn get_priority(&self) -> &str {
self.data.get(Prop::Priority.as_ref()).unwrap_or("")
}
/// Get the wait time. If this value is set, it will be returned, even
/// if it is in the past.
pub fn get_wait(&self) -> Option<Timestamp> {
self.get_timestamp(Prop::Wait.as_ref())
}
/// Determine whether this task is waiting now.
pub fn is_waiting(&self) -> bool {
if let Some(ts) = self.get_wait() {
return ts > Utc::now();
}
false
}
/// Determine whether this task is active -- that is, that it has been started
/// and not stopped.
pub fn is_active(&self) -> bool {
self.data.has(Prop::Start.as_ref())
}
/// Determine whether this task is blocked -- that is, has at least one unresolved dependency.
pub fn is_blocked(&self) -> bool {
self.depmap.dependencies(self.get_uuid()).next().is_some()
}
/// Determine whether this task is blocking -- that is, has at least one unresolved dependent.
pub fn is_blocking(&self) -> bool {
self.depmap.dependents(self.get_uuid()).next().is_some()
}
/// Determine whether a given synthetic tag is present on this task. All other
/// synthetic tag calculations are based on this one.
fn has_synthetic_tag(&self, synth: &SyntheticTag) -> bool {
match synth {
SyntheticTag::Waiting => self.is_waiting(),
SyntheticTag::Active => self.is_active(),
SyntheticTag::Pending => self.get_status() == Status::Pending,
SyntheticTag::Completed => self.get_status() == Status::Completed,
SyntheticTag::Deleted => self.get_status() == Status::Deleted,
SyntheticTag::Blocked => self.is_blocked(),
SyntheticTag::Unblocked => !self.is_blocked(),
SyntheticTag::Blocking => self.is_blocking(),
}
}
/// Check if this task has the given tag
pub fn has_tag(&self, tag: &Tag) -> bool {
match tag.inner() {
TagInner::User(s) => self.data.has(format!("tag_{s}")),
TagInner::Synthetic(st) => self.has_synthetic_tag(st),
}
}
/// Iterate over the task's tags
pub fn get_tags(&self) -> impl Iterator<Item = Tag> + '_ {
use strum::IntoEnumIterator;
self.data
.properties()
.filter_map(|k| {
if let Some(tag) = k.strip_prefix("tag_") {
if let Ok(tag) = tag.try_into() {
trace!("success with tag {tag}");
return Some(tag);
}
// note that invalid "tag_*" are ignored
trace!("skipped tag {tag}");
}
None
})
.chain(
SyntheticTag::iter()
.filter(move |st| self.has_synthetic_tag(st))
.map(|st| Tag::from_inner(TagInner::Synthetic(st))),
)
}
/// Iterate over the task's annotations, in arbitrary order.
pub fn get_annotations(&self) -> impl Iterator<Item = Annotation> + '_ {
self.data.iter().filter_map(|(k, v)| {
if let Some(ts) = k.strip_prefix("annotation_") {
if let Ok(ts) = ts.parse::<i64>() {
return Some(Annotation {
entry: utc_timestamp(ts),
description: v.to_owned(),
});
}
// note that invalid "annotation_*" are ignored
}
None
})
}
/// Get the named user defined attributes (UDA). This will return None
/// for any key defined in the Task data model, regardless of whether
/// it is set or not.
#[deprecated(note = "namespaced UDAs will not be supported in the future")]
pub fn get_uda(&self, namespace: &str, key: &str) -> Option<&str> {
#[allow(deprecated)]
self.get_legacy_uda(uda_tuple_to_string(namespace, key).as_ref())
}
/// Get the user defined attributes (UDAs) of this task, in arbitrary order. Each key is split
/// on the first `.` character. Legacy keys that do not contain `.` are represented as `("",
/// key)`.
#[deprecated(note = "namespaced UDAs will not be supported in the future")]
pub fn get_udas(&self) -> impl Iterator<Item = ((&str, &str), &str)> + '_ {
self.data
.iter()
.filter(|(k, _)| !Task::is_known_key(k))
.map(|(k, v)| (uda_string_to_tuple(k), v.as_ref()))
}
/// Get the named user defined attribute (UDA) in a legacy format. This will return None for
/// any key defined in the Task data model, regardless of whether it is set or not.
#[deprecated(note = "please use Task::get_user_defined_attribute")]
pub fn get_legacy_uda(&self, key: &str) -> Option<&str> {
self.get_user_defined_attribute(key)
}
/// Get the named user defined attribute (UDA). This will return None for any key
/// defined in the Task data model, regardless of whether it is set or not.
pub fn get_user_defined_attribute(&self, key: &str) -> Option<&str> {
if Task::is_known_key(key) {
return None;
}
self.data.get(key)
}
/// Like `get_udas`, but returning each UDA key as a single string.
#[deprecated(note = "please use Task::get_user_defined_attributes")]
pub fn get_legacy_udas(&self) -> impl Iterator<Item = (&str, &str)> + '_ {
self.get_user_defined_attributes()
}
/// Return each UDA key as a single string.
pub fn get_user_defined_attributes(&self) -> impl Iterator<Item = (&str, &str)> + '_ {
self.data
.iter()
.filter(|(p, _)| !Task::is_known_key(p))
.map(|(p, v)| (p.as_ref(), v.as_ref()))
}
/// Get the modification time for this task.
pub fn get_modified(&self) -> Option<Timestamp> {
self.get_timestamp(Prop::Modified.as_ref())
}
/// Get the due time for this task.
pub fn get_due(&self) -> Option<Timestamp> {
self.get_timestamp(Prop::Due.as_ref())
}
/// Get the UUIDs of tasks on which this task depends.
///
/// This includes all dependencies, regardless of their status. In fact, it may include
/// dependencies that do not exist.
pub fn get_dependencies(&self) -> impl Iterator<Item = Uuid> + '_ {
self.data.properties().filter_map(|p| {
if let Some(dep_str) = p.strip_prefix("dep_") {
if let Ok(u) = Uuid::parse_str(dep_str) {
return Some(u);
}
// (un-parseable dep_.. properties are ignored)
}
None
})
}
/// Get task's property value by name.
pub fn get_value<S: Into<String>>(&self, property: S) -> Option<&str> {
let property = property.into();
self.data.get(property)
}
/// Set the task's status.
///
/// This also updates the task's "end" property appropriately.
pub fn set_status(&mut self, status: Status, ops: &mut Operations) -> Result<()> {
match status {
Status::Pending | Status::Recurring => {
// clear "end" when a task becomes "pending" or "recurring"
if self.data.has(Prop::End.as_ref()) {
self.set_timestamp(Prop::End.as_ref(), None, ops)?;
}
}
Status::Completed | Status::Deleted => {
// set "end" when a task is deleted or completed
if !self.data.has(Prop::End.as_ref()) {
self.set_timestamp(Prop::End.as_ref(), Some(Utc::now()), ops)?;
}
}
_ => {}
}
self.set_value(
Prop::Status.as_ref(),
Some(String::from(status.to_taskmap())),
ops,
)
}
pub fn set_description(&mut self, description: String, ops: &mut Operations) -> Result<()> {
self.set_value(Prop::Description.as_ref(), Some(description), ops)
}
pub fn set_priority(&mut self, priority: String, ops: &mut Operations) -> Result<()> {
self.set_value(Prop::Priority.as_ref(), Some(priority), ops)
}
pub fn set_entry(&mut self, entry: Option<Timestamp>, ops: &mut Operations) -> Result<()> {
self.set_timestamp(Prop::Entry.as_ref(), entry, ops)
}
pub fn set_wait(&mut self, wait: Option<Timestamp>, ops: &mut Operations) -> Result<()> {
self.set_timestamp(Prop::Wait.as_ref(), wait, ops)
}
pub fn set_modified(&mut self, modified: Timestamp, ops: &mut Operations) -> Result<()> {
self.set_timestamp(Prop::Modified.as_ref(), Some(modified), ops)
}
/// Set a tasks's property by name.
///
/// This will automatically update the `modified` timestamp if it has not already been
/// modified, but will recognize modifications of the `modified` property and not make further
/// updates to it. Use [`TaskData::update`] to modify the task without this behavior.
pub fn set_value<S: Into<String>>(
&mut self,
property: S,
value: Option<String>,
ops: &mut Operations,
) -> Result<()> {
let property = property.into();
// update the modified timestamp unless we are setting it explicitly
if &property != "modified" && !self.updated_modified {
let now = format!("{}", Utc::now().timestamp());
trace!("task {}: set property modified={:?}", self.get_uuid(), now);
self.data.update(Prop::Modified.as_ref(), Some(now), ops);
self.updated_modified = true;
}
self.updated_modified = true;
if let Some(ref v) = value {
trace!(
"task {}: set property {}={:?}",
self.get_uuid(),
property,
v
);
} else {
trace!("task {}: remove property {}", self.get_uuid(), property);
}
self.data.update(property, value, ops);
Ok(())
}
/// Start the task by setting "start" to the current timestamp, if the task is not already
/// active.
pub fn start(&mut self, ops: &mut Operations) -> Result<()> {
if self.is_active() {
return Ok(());
}
self.set_timestamp(Prop::Start.as_ref(), Some(Utc::now()), ops)
}
/// Stop the task by removing the `start` key
pub fn stop(&mut self, ops: &mut Operations) -> Result<()> {
self.set_timestamp(Prop::Start.as_ref(), None, ops)
}
/// Mark this task as complete
pub fn done(&mut self, ops: &mut Operations) -> Result<()> {
self.set_status(Status::Completed, ops)
}
/// Mark this task as deleted.
///
/// Note that this does not delete the task. It merely marks the task as
/// deleted.
#[deprecated(
since = "0.7.0",
note = "please call `Task::set_status` with `Status::Deleted`"
)]
pub fn delete(&mut self, ops: &mut Operations) -> Result<()> {
self.set_status(Status::Deleted, ops)
}
/// Add a tag to this task. Does nothing if the tag is already present.
pub fn add_tag(&mut self, tag: &Tag, ops: &mut Operations) -> Result<()> {
if tag.is_synthetic() {
return Err(Error::Usage(String::from(
"Synthetic tags cannot be modified",
)));
}
self.set_value(format!("tag_{tag}"), Some("".to_owned()), ops)
}
/// Remove a tag from this task. Does nothing if the tag is not present.
pub fn remove_tag(&mut self, tag: &Tag, ops: &mut Operations) -> Result<()> {
if tag.is_synthetic() {
return Err(Error::Usage(String::from(
"Synthetic tags cannot be modified",
)));
}
self.set_value(format!("tag_{tag}"), None, ops)
}
/// Add a new annotation. Note that annotations with the same entry time
/// will overwrite one another.
pub fn add_annotation(&mut self, ann: Annotation, ops: &mut Operations) -> Result<()> {
self.set_value(
format!("annotation_{}", ann.entry.timestamp()),
Some(ann.description),
ops,
)
}
/// Remove an annotation, based on its entry time.
pub fn remove_annotation(&mut self, entry: Timestamp, ops: &mut Operations) -> Result<()> {
self.set_value(format!("annotation_{}", entry.timestamp()), None, ops)
}
pub fn set_due(&mut self, due: Option<Timestamp>, ops: &mut Operations) -> Result<()> {
self.set_timestamp(Prop::Due.as_ref(), due, ops)
}
/// Set a user-defined attribute (UDA). This will fail if the key is defined by the data
/// model.
#[deprecated(note = "namespaced UDAs will not be supported in the future")]
pub fn set_uda(
&mut self,
namespace: impl AsRef<str>,
key: impl AsRef<str>,
value: impl Into<String>,
ops: &mut Operations,
) -> Result<()> {
let key = uda_tuple_to_string(namespace, key);
#[allow(deprecated)]
self.set_legacy_uda(key, value, ops)
}
/// Remove a user-defined attribute (UDA). This will fail if the key is defined by the data
/// model.
#[deprecated(note = "namespaced UDAs will not be supported in the future")]
pub fn remove_uda(
&mut self,
namespace: impl AsRef<str>,
key: impl AsRef<str>,
ops: &mut Operations,
) -> Result<()> {
let key = uda_tuple_to_string(namespace, key);
#[allow(deprecated)]
self.remove_legacy_uda(key, ops)
}
/// Set a user-defined attribute (UDA), where the key is a legacy key.
#[deprecated(note = "please use Task::set_user_defined_attribute")]
pub fn set_legacy_uda(
&mut self,
key: impl Into<String>,
value: impl Into<String>,
ops: &mut Operations,
) -> Result<()> {
self.set_user_defined_attribute(key, value, ops)
}
/// Set a user-defined attribute (UDA).
pub fn set_user_defined_attribute(
&mut self,
key: impl Into<String>,
value: impl Into<String>,
ops: &mut Operations,
) -> Result<()> {
let key = key.into();
if Task::is_known_key(&key) {
return Err(Error::Usage(format!(
"Property name {key} as special meaning in a task and cannot be used as a UDA"
)));
}
self.set_value(key, Some(value.into()), ops)
}
/// Remove a user-defined attribute (UDA), where the key is a legacy key.
#[deprecated(note = "please use Task::remove_user_defined_attribute")]
pub fn remove_legacy_uda(
&mut self,
key: impl Into<String>,
ops: &mut Operations,
) -> Result<()> {
self.remove_user_defined_attribute(key, ops)
}
/// Remove a user-defined attribute (UDA).
pub fn remove_user_defined_attribute(
&mut self,
key: impl Into<String>,
ops: &mut Operations,
) -> Result<()> {
let key = key.into();
if Task::is_known_key(&key) {
return Err(Error::Usage(format!(
"Property name {key} as special meaning in a task and cannot be used as a UDA"
)));
}
self.set_value(key, None, ops)
}
/// Add a dependency.
pub fn add_dependency(&mut self, dep: Uuid, ops: &mut Operations) -> Result<()> {
let key = format!("dep_{dep}");
self.set_value(key, Some("".to_string()), ops)
}
/// Remove a dependency.
pub fn remove_dependency(&mut self, dep: Uuid, ops: &mut Operations) -> Result<()> {
let key = format!("dep_{dep}");
self.set_value(key, None, ops)
}
/// Get the given timestamp property.
///
/// This will return `None` if the property is not set, or if it is not a valid
/// timestamp. Otherwise, a correctly parsed Timestamp is returned.
pub fn get_timestamp(&self, property: &str) -> Option<Timestamp> {
if let Some(ts) = self.data.get(property) {
if let Ok(ts) = ts.parse() {
return Some(utc_timestamp(ts));
}
// if the value does not parse as an integer, default to None
}
None
}
/// Set the given timestamp property, mapping the value correctly.
pub fn set_timestamp(
&mut self,
property: &str,
value: Option<Timestamp>,
ops: &mut Operations,
) -> Result<()> {
self.set_value(property, value.map(|v| v.timestamp().to_string()), ops)
}
// -- utility functions
fn is_known_key(key: &str) -> bool {
Prop::from_str(key).is_ok()
|| key.starts_with("tag_")
|| key.starts_with("annotation_")
|| key.starts_with("dep_")
}
}
#[cfg(test)]
#[allow(deprecated)]
mod test {
use super::*;
use crate::{storage::inmemory::InMemoryStorage, Replica};
use pretty_assertions::assert_eq;
use std::collections::HashSet;
fn dm() -> Arc<DependencyMap> {
Arc::new(DependencyMap::new())
}
// Test task mutation by modifying a task and checking the assertions both on the
// modified task and on a re-loaded task after the operations are committed. Then,
// apply the same operations again and check that the result is the same.
async fn with_mut_task<MODIFY: Fn(&mut Task, &mut Operations), ASSERT: Fn(&Task)>(
modify: MODIFY,
assert: ASSERT,
) {
let mut replica = Replica::new(InMemoryStorage::new());
let mut ops = Operations::new();
let uuid = Uuid::new_v4();
let mut task = replica.create_task(uuid, &mut ops).await.unwrap();
// Modify the task
modify(&mut task, &mut ops);
// Check assertions about the task before committing it.
assert(&task);
println!("commiting operations from first call to modify function");
replica.commit_operations(ops).await.unwrap();
// Check assertions on task loaded from storage
let mut task = replica.get_task(uuid).await.unwrap().unwrap();
assert(&task);
// Apply the operations again, checking that they do not fail.
let mut ops = Operations::new();
modify(&mut task, &mut ops);
// Changes should still be as expected before commit.
assert(&task);
println!("commiting operations from second call to modify function");
replica.commit_operations(ops).await.unwrap();
// Changes should still be as expected when loaded from storage.
let task = replica.get_task(uuid).await.unwrap().unwrap();
assert(&task);
}
/// Create a user tag, without checking its validity
fn utag(name: &'static str) -> Tag {
Tag::from_inner(TagInner::User(name.into()))
}
/// Create a synthetic tag
fn stag(synth: SyntheticTag) -> Tag {
Tag::from_inner(TagInner::Synthetic(synth))
}
#[test]
fn test_is_active_never_started() {
let task = Task::new(TaskData::new(Uuid::new_v4(), TaskMap::new()), dm());
assert!(!task.is_active());
}
#[test]
fn test_is_active_active() {
let task = Task::new(
TaskData::new(
Uuid::new_v4(),
vec![(String::from("start"), String::from("1234"))]
.drain(..)
.collect(),
),
dm(),
);
assert!(task.is_active());
}
#[test]
fn test_is_active_inactive() {
let task = Task::new(TaskData::new(Uuid::new_v4(), Default::default()), dm());
assert!(!task.is_active());
}
#[test]
fn test_entry_not_set() {
let task = Task::new(TaskData::new(Uuid::new_v4(), TaskMap::new()), dm());
assert_eq!(task.get_entry(), None);
}
#[test]
fn test_entry_set() {
let ts = Utc.with_ymd_and_hms(1980, 1, 1, 0, 0, 0).unwrap();
let task = Task::new(
TaskData::new(
Uuid::new_v4(),
vec![(String::from("entry"), format!("{}", ts.timestamp()))]
.drain(..)
.collect(),
),
dm(),
);
assert_eq!(task.get_entry(), Some(ts));
}
#[test]
fn test_wait_not_set() {
let task = Task::new(TaskData::new(Uuid::new_v4(), TaskMap::new()), dm());
assert!(!task.is_waiting());
assert_eq!(task.get_wait(), None);
}
#[test]
fn test_wait_in_past() {
let ts = Utc.with_ymd_and_hms(1970, 1, 1, 0, 0, 0).unwrap();
let task = Task::new(
TaskData::new(
Uuid::new_v4(),
vec![(String::from("wait"), format!("{}", ts.timestamp()))]
.drain(..)
.collect(),
),
dm(),
);
assert!(!task.is_waiting());
assert_eq!(task.get_wait(), Some(ts));
}
#[test]
fn test_wait_in_future() {
let ts = Utc.with_ymd_and_hms(3000, 1, 1, 0, 0, 0).unwrap();
let task = Task::new(
TaskData::new(
Uuid::new_v4(),
vec![(String::from("wait"), format!("{}", ts.timestamp()))]
.drain(..)
.collect(),
),
dm(),
);
assert!(task.is_waiting());
assert_eq!(task.get_wait(), Some(ts));
}
#[test]
fn test_has_tag() {
let task = Task::new(
TaskData::new(
Uuid::new_v4(),
vec![
(String::from("tag_abc"), String::from("")),
(String::from("start"), String::from("1234")),
]
.drain(..)
.collect(),
),
dm(),
);
assert!(task.has_tag(&utag("abc")));
assert!(!task.has_tag(&utag("def")));
assert!(task.has_tag(&stag(SyntheticTag::Active)));
assert!(task.has_tag(&stag(SyntheticTag::Pending)));
assert!(!task.has_tag(&stag(SyntheticTag::Waiting)));
}
#[test]
fn test_get_tags() {
let task = Task::new(
TaskData::new(
Uuid::new_v4(),
vec![
(String::from("tag_abc"), String::from("")),
(String::from("tag_def"), String::from("")),
// set `wait` so the synthetic tag WAITING is present
(String::from("wait"), String::from("33158909732")),
]
.drain(..)
.collect(),
),
dm(),
);
let tags: HashSet<_> = task.get_tags().collect();
let exp = HashSet::from([
utag("abc"),
utag("def"),
stag(SyntheticTag::Pending),
stag(SyntheticTag::Waiting),
stag(SyntheticTag::Unblocked),
]);
assert_eq!(tags, exp);
}
#[test]
fn test_get_tags_invalid_tags() {
let taskdata = TaskData::new(
Uuid::new_v4(),
vec![
(String::from("tag_ok"), String::from("")),
(String::from("tag_"), String::from("")),
(String::from("tag_123"), String::from("")),
(String::from("tag_!!a"), String::from("")),
(String::from("tag_a!!"), String::from("")),
(String::from("tag_\u{1f980}a"), String::from("")),
(String::from("tag_\u{1f980}"), String::from("")),
]
.drain(..)
.collect(),
);
trace!("{:?}", taskdata);
let task = Task::new(taskdata, dm());
// only "ok" is OK
let tags: HashSet<_> = task.get_tags().collect();
assert_eq!(
tags,
HashSet::from([
stag(SyntheticTag::Pending),
utag("a!!"),
utag("\u{1f980}a"),
utag("\u{1f980}"),
stag(SyntheticTag::Unblocked),
utag("ok"),
])
);
}
#[test]
fn test_get_due() {
let test_time = Utc.with_ymd_and_hms(2033, 1, 1, 0, 0, 0).unwrap();
let task = Task::new(
TaskData::new(
Uuid::new_v4(),
vec![(String::from("due"), format!("{}", test_time.timestamp()))]
.drain(..)
.collect(),
),
dm(),
);
assert_eq!(task.get_due(), Some(test_time))
}
#[test]
fn test_get_invalid_due() {
let task = Task::new(
TaskData::new(
Uuid::new_v4(),
vec![(String::from("due"), String::from("invalid"))]
.drain(..)
.collect(),
),
dm(),
);
assert_eq!(task.get_due(), None);
}
#[tokio::test]
async fn test_due_new_task() {
with_mut_task(|_task, _ops| {}, |task| assert_eq!(task.get_due(), None)).await;
}
#[tokio::test]
async fn test_add_due() {
let test_time = Utc.with_ymd_and_hms(2033, 1, 1, 0, 0, 0).unwrap();
with_mut_task(
|task, ops| {
task.set_due(Some(test_time), ops).unwrap();
},
|task| assert_eq!(task.get_due(), Some(test_time)),
)
.await;
}
#[tokio::test]
async fn test_remove_due() {
with_mut_task(
|task, ops| {
task.data.update("due", Some("some-time".into()), ops);
assert!(task.data.has("due"));
task.set_due(None, ops).unwrap();
},
|task| {
assert!(!task.data.has("due"));
},
)
.await;
}
#[test]
fn test_get_priority_default() {
let task = Task::new(TaskData::new(Uuid::new_v4(), TaskMap::new()), dm());
assert_eq!(task.get_priority(), "");
}
#[test]
fn test_get_annotations() {
let task = Task::new(
TaskData::new(
Uuid::new_v4(),
vec![
(
String::from("annotation_1635301873"),
String::from("left message"),
),
(
String::from("annotation_1635301883"),
String::from("left another message"),
),
(String::from("annotation_"), String::from("invalid")),
(String::from("annotation_abcde"), String::from("invalid")),
]
.drain(..)
.collect(),
),
dm(),
);
let mut anns: Vec<_> = task.get_annotations().collect();
anns.sort();
assert_eq!(
anns,
vec![
Annotation {
entry: Utc.timestamp_opt(1635301873, 0).unwrap(),
description: "left message".into()
},
Annotation {
entry: Utc.timestamp_opt(1635301883, 0).unwrap(),
description: "left another message".into()
}
]
);
}
#[tokio::test]
async fn test_add_annotation() {
with_mut_task(
|task, ops| {
task.add_annotation(
Annotation {
entry: Utc.timestamp_opt(1635301900, 0).unwrap(),
description: "right message".into(),
},
ops,
)
.unwrap();
},
|task| {
let k = "annotation_1635301900";
assert_eq!(task.data.get(k).unwrap(), "right message".to_owned());
},
)
.await;
}
#[tokio::test]
async fn test_add_annotation_overwrite() {
with_mut_task(
|task, ops| {
task.add_annotation(
Annotation {
entry: Utc.timestamp_opt(1635301900, 0).unwrap(),
description: "right message".into(),
},
ops,
)
.unwrap();
task.add_annotation(
Annotation {
entry: Utc.timestamp_opt(1635301900, 0).unwrap(),
description: "right message 2".into(),
},
ops,
)
.unwrap();
},
|task| {
let k = "annotation_1635301900";
assert_eq!(task.data.get(k).unwrap(), "right message 2".to_owned());
},
)
.await;
}
#[tokio::test]
async fn test_remove_annotation() {
with_mut_task(
|task, ops| {
task.data
.update("annotation_1635301883", Some("left message".into()), ops);
task.set_value(
"annotation_1635301883",
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | true |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/task/data.rs | src/task/data.rs | use crate::{storage::TaskMap, Operation, Operations};
use chrono::Utc;
use uuid::Uuid;
/// A task.
///
/// This type presents a low-level interface consisting only of a key/value map. Interpretation of
/// fields is up to the user, and modifications both modify the [`TaskData`] and create one or
/// more [`Operation`](crate::Operation) values that can later be committed to the replica.
///
/// This interface is intended for sophisticated applications like Taskwarrior which give meaning
/// to key and values themselves. Use [`Task`](crate::Task) for a higher-level interface with
/// methods to update status, set tags, and so on.
///
/// See the documentation for [`crate::Replica`] for background on the `ops` arguments to methods
/// on this type.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct TaskData {
uuid: Uuid,
taskmap: TaskMap,
}
impl TaskData {
/// Constructor for a TaskData representing an existing task.
pub(crate) fn new(uuid: Uuid, taskmap: TaskMap) -> Self {
Self { uuid, taskmap }
}
/// Create a new, empty task with the given UUID.
pub fn create(uuid: Uuid, ops: &mut Operations) -> Self {
ops.push(Operation::Create { uuid });
Self {
uuid,
taskmap: TaskMap::new(),
}
}
/// Get this task's UUID.
pub fn get_uuid(&self) -> Uuid {
self.uuid
}
/// Get the taskmap (used only for deprecated `Task::get_taskmap`).
pub(in crate::task) fn get_taskmap(&self) -> &TaskMap {
&self.taskmap
}
/// Get a value on this task.
pub fn get(&self, property: impl AsRef<str>) -> Option<&str> {
self.taskmap.get(property.as_ref()).map(|v| v.as_str())
}
/// Check if the given property is set.
pub fn has(&self, property: impl AsRef<str>) -> bool {
self.taskmap.contains_key(property.as_ref())
}
/// Enumerate all properties on this task, in arbitrary order.
pub fn properties(&self) -> impl Iterator<Item = &String> {
self.taskmap.keys()
}
/// Enumerate all properties and their values on this task, in arbitrary order.
pub fn iter(&self) -> impl Iterator<Item = (&String, &String)> {
self.taskmap.iter()
}
/// Set or remove a value on this task, adding an Update operation to the
/// set of operations.
///
/// Setting a value to `None` removes that value from the task.
///
/// This method does not have any special handling of the `modified` property.
pub fn update(
&mut self,
property: impl Into<String>,
value: Option<String>,
ops: &mut Operations,
) {
let property = property.into();
let old_value = self.taskmap.get(&property).cloned();
if let Some(value) = &value {
self.taskmap.insert(property.clone(), value.clone());
} else {
self.taskmap.remove(&property);
}
ops.push(Operation::Update {
uuid: self.uuid,
property,
old_value,
value,
timestamp: Utc::now(),
});
}
/// Delete this task.
///
/// Note that this is different from setting status to [`Deleted`](crate::Status::Deleted):
/// the resulting operation removes the task from the database.
///
/// Deletion may interact poorly with modifications to the same task on other replicas. For
/// example, if a task is deleted on replica 1 and its description modified on replica 2, then
/// after both replicas have fully synced, the resulting task will only have a `description`
/// property.
///
/// After this call, the `TaskData` value still exists but has no properties and should be
/// dropped.
pub fn delete(&mut self, ops: &mut Operations) {
ops.push(Operation::Delete {
uuid: self.uuid,
old_task: std::mem::take(&mut self.taskmap),
});
}
}
#[cfg(test)]
mod test {
use super::*;
use chrono::DateTime;
use pretty_assertions::assert_eq;
const TEST_UUID: Uuid = Uuid::from_u128(1234);
fn make_ops(ops: &[Operation]) -> Operations {
let mut res = Operations::new();
for op in ops {
res.push(op.clone());
}
res
}
/// Set all operations' timestamps to the given timestamp, to ease use of
/// `assert_eq!`.
fn set_all_timestamps(ops: &mut Operations, set_to: DateTime<Utc>) {
for op in ops {
if let Operation::Update { timestamp, .. } = op {
*timestamp = set_to;
}
}
}
#[test]
fn create() {
let mut ops = Operations::new();
let t = TaskData::create(TEST_UUID, &mut ops);
assert_eq!(t.uuid, TEST_UUID);
assert_eq!(t.get_uuid(), TEST_UUID);
assert_eq!(t.taskmap, TaskMap::new());
assert_eq!(ops, make_ops(&[Operation::Create { uuid: TEST_UUID }]));
}
#[test]
fn get_uuid() {
let t = TaskData::new(TEST_UUID, TaskMap::new());
assert_eq!(t.get_uuid(), TEST_UUID);
}
#[test]
fn get() {
let t = TaskData::new(TEST_UUID, [("prop".to_string(), "val".to_string())].into());
assert_eq!(t.get("prop"), Some("val"));
assert_eq!(t.get("nosuch"), None)
}
#[test]
fn has() {
let t = TaskData::new(TEST_UUID, [("prop".to_string(), "val".to_string())].into());
assert!(t.has("prop"));
assert!(!t.has("nosuch"));
}
#[test]
fn properties() {
let t = TaskData::new(
TEST_UUID,
[
("prop1".to_string(), "val".to_string()),
("prop2".to_string(), "val".to_string()),
]
.into(),
);
let mut props: Vec<_> = t.properties().collect();
props.sort();
assert_eq!(props, vec!["prop1", "prop2"]);
}
#[test]
fn iter() {
let t = TaskData::new(
TEST_UUID,
[
("prop1".to_string(), "val1".to_string()),
("prop2".to_string(), "val2".to_string()),
]
.into(),
);
let mut props: Vec<_> = t.iter().map(|(p, v)| (p.as_str(), v.as_str())).collect();
props.sort();
assert_eq!(props, vec![("prop1", "val1"), ("prop2", "val2")]);
}
#[test]
fn update_new_prop() {
let mut ops = Operations::new();
let mut t = TaskData::new(TEST_UUID, TaskMap::new());
t.update("prop1", Some("val1".into()), &mut ops);
let now = Utc::now();
set_all_timestamps(&mut ops, now);
assert_eq!(
ops,
make_ops(&[Operation::Update {
uuid: TEST_UUID,
property: "prop1".into(),
old_value: None,
value: Some("val1".into()),
timestamp: now,
}])
);
assert_eq!(t.get("prop1"), Some("val1"));
}
#[test]
fn update_existing_prop() {
let mut ops = Operations::new();
let mut t = TaskData::new(TEST_UUID, [("prop1".to_string(), "val".to_string())].into());
t.update("prop1", Some("new".into()), &mut ops);
let now = Utc::now();
set_all_timestamps(&mut ops, now);
assert_eq!(
ops,
make_ops(&[Operation::Update {
uuid: TEST_UUID,
property: "prop1".into(),
old_value: Some("val".into()),
value: Some("new".into()),
timestamp: now,
}])
);
assert_eq!(t.get("prop1"), Some("new"));
}
#[test]
fn update_remove_prop() {
let mut ops = Operations::new();
let mut t = TaskData::new(TEST_UUID, [("prop1".to_string(), "val".to_string())].into());
t.update("prop1", None, &mut ops);
let now = Utc::now();
set_all_timestamps(&mut ops, now);
assert_eq!(
ops,
make_ops(&[Operation::Update {
uuid: TEST_UUID,
property: "prop1".into(),
old_value: Some("val".into()),
value: None,
timestamp: now,
}])
);
assert_eq!(t.get("prop1"), None);
}
#[test]
fn delete() {
let mut ops = Operations::new();
let mut t = TaskData::new(TEST_UUID, [("prop1".to_string(), "val".to_string())].into());
t.delete(&mut ops);
assert_eq!(
ops,
make_ops(&[Operation::Delete {
uuid: TEST_UUID,
old_task: [("prop1".to_string(), "val".to_string())].into(),
}])
);
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/task/tag.rs | src/task/tag.rs | use std::convert::TryFrom;
use std::fmt;
use std::str::FromStr;
/// A Tag is a descriptor for a task, that is either present or absent, and can be used for
/// filtering. Tags composed of all uppercase letters are reserved for synthetic tags.
///
/// Valid tags must not contain whitespace.
/// The first characters cannot be any of the characters in `+-*/()<>^!%=~`.
/// The first characters additionally cannot be a digit.
/// Subsequent characters cannot be `:`.
/// This definition is based on that of TaskWarrior src/Lexer.cpp isSingleCharOperator() isTag()
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
pub struct Tag(TagInner);
/// Inner type to hide the implementation
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
pub(super) enum TagInner {
User(String),
Synthetic(SyntheticTag),
}
// see doc comment for Tag, above
pub(crate) const INVALID_TAG_CHARACTERS: &str = "+-*/()<>^!%=~";
impl Tag {
/// True if this tag is a synthetic tag
pub fn is_synthetic(&self) -> bool {
matches!(self.0, TagInner::Synthetic(_))
}
/// True if this tag is a user-provided tag (not synthetic)
pub fn is_user(&self) -> bool {
matches!(self.0, TagInner::User(_))
}
pub(super) fn inner(&self) -> &TagInner {
&self.0
}
pub(super) fn from_inner(inner: TagInner) -> Self {
Self(inner)
}
}
impl FromStr for Tag {
type Err = anyhow::Error;
fn from_str(value: &str) -> Result<Tag, anyhow::Error> {
fn err(value: &str) -> Result<Tag, anyhow::Error> {
anyhow::bail!("invalid tag {:?}", value)
}
// first, look for synthetic tags
if value.chars().all(|c| c.is_ascii_uppercase()) {
if let Ok(st) = SyntheticTag::from_str(value) {
return Ok(Self(TagInner::Synthetic(st)));
}
// all uppercase, but not a valid synthetic tag
return err(value);
}
if let Some(c) = value.chars().next() {
if c.is_whitespace() || c.is_ascii_digit() || INVALID_TAG_CHARACTERS.contains(c) {
return err(value);
}
} else {
return err(value);
}
if !value
.chars()
.skip(1)
.all(|c| !(c.is_whitespace() || c == ':'))
{
return err(value);
}
Ok(Self(TagInner::User(String::from(value))))
}
}
impl TryFrom<&str> for Tag {
type Error = anyhow::Error;
fn try_from(value: &str) -> Result<Tag, Self::Error> {
Self::from_str(value)
}
}
impl TryFrom<&String> for Tag {
type Error = anyhow::Error;
fn try_from(value: &String) -> Result<Tag, Self::Error> {
Self::from_str(&value[..])
}
}
impl fmt::Display for Tag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self.0 {
TagInner::User(s) => s.fmt(f),
TagInner::Synthetic(st) => st.as_ref().fmt(f),
}
}
}
impl AsRef<str> for Tag {
fn as_ref(&self) -> &str {
match &self.0 {
TagInner::User(s) => s.as_ref(),
TagInner::Synthetic(st) => st.as_ref(),
}
}
}
/// A synthetic tag, represented as an `enum`. This type is used directly by
/// [`taskchampion::task::task`] for efficiency.
#[derive(
Debug,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
strum_macros::EnumString,
strum_macros::AsRefStr,
strum_macros::EnumIter,
)]
#[strum(serialize_all = "SCREAMING_SNAKE_CASE")]
pub(super) enum SyntheticTag {
// When adding items here, also implement and test them in `task.rs` and document them in
// `docs/src/tags.md`.
Waiting,
Active,
Pending,
Completed,
Deleted,
Blocked,
Unblocked,
Blocking,
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
use rstest::rstest;
use std::convert::TryInto;
#[rstest]
#[case::simple_single("z")]
#[case::simple_word("abc")]
#[case::colon_prefix(":abc")]
#[case::glyph_single("\u{1f980}")]
#[case::glyph_prefix("\u{1f980}testing")]
#[case::letters_and_numbers("a123_456")]
#[case::synthetic("WAITING")]
fn test_tag_try_into_success(#[case] s: &'static str) {
let tag: Tag = s.try_into().unwrap();
// check Display (via to_string) and AsRef while we're here
assert_eq!(tag.to_string(), s.to_owned());
assert_eq!(tag.as_ref(), s);
}
#[rstest]
#[case::empty("")]
#[case::colon_infix("a:b")]
#[case::digits("999")]
#[case::initial_plus("+testing")]
#[case::initial_dash_prefix("-testing")]
#[case::initial_dash_single("-")]
#[case::initial_white(" abcfoobar")]
#[case::subsequent_white("abc foobar")]
#[case::no_such_synthetic("NOSUCH")]
fn test_tag_try_into_err(#[case] s: &'static str) {
let tag: Result<Tag, _> = s.try_into();
assert_eq!(
tag.unwrap_err().to_string(),
format!("invalid tag \"{}\"", s)
);
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/taskdb/sync.rs | src/taskdb/sync.rs | use super::{apply, snapshot};
use crate::errors::Result;
use crate::server::{AddVersionResult, GetVersionResult, Server, SnapshotUrgency, SyncOp};
use crate::storage::StorageTxn;
use crate::Error;
use log::{info, trace, warn};
use serde::{Deserialize, Serialize};
use std::str;
#[derive(Serialize, Deserialize, Debug)]
struct Version {
operations: Vec<SyncOp>,
}
/// Sync to the given server, pulling remote changes and pushing local changes.
pub(super) async fn sync(
server: &mut Box<dyn Server>,
txn: &mut dyn StorageTxn,
avoid_snapshots: bool,
) -> Result<()> {
// if this taskdb is entirely empty, then start by getting and applying a snapshot
if txn.is_empty().await? {
trace!("storage is empty; attempting to apply a snapshot");
if let Some((version, snap)) = server.get_snapshot().await? {
snapshot::apply_snapshot(txn, version, snap.as_ref()).await?;
trace!("applied snapshot for version {}", version);
}
}
// For historical purposes, we keep transformed server operations in storage as synced
// operations. These will be added at the end of the sync process, when the outer loop is
// complete.
let mut transformed_server_ops = Vec::new();
// retry synchronizing until the server accepts our version (this allows for races between
// replicas trying to sync to the same server). If the server insists on the same base
// version twice, then we have diverged.
let mut requested_parent_version_id = None;
'outer: loop {
trace!("beginning sync outer loop");
let mut base_version_id = txn.base_version().await?;
let mut local_ops = txn.unsynced_operations().await?;
let sync_ops = local_ops.drain(..).filter_map(SyncOp::from_op);
let mut sync_ops_peekable = sync_ops.peekable();
// batch operations into versions of no more than a million bytes to avoid excessively large http requests.
let sync_ops_batched = std::iter::from_fn(|| {
let mut batch_size = 0;
let mut batch = Vec::new();
while let Some(op) = sync_ops_peekable.next_if(|op| {
batch_size += serde_json::to_string(&op).unwrap().len();
// include if the batch is empty or if the batch size limit is not exceeded.
batch.is_empty() || batch_size <= 1000000
}) {
batch.push(op);
}
Some(batch)
});
for mut sync_ops_batch in sync_ops_batched {
// first pull changes and "rebase" on top of them
loop {
trace!("beginning sync inner loop");
if let GetVersionResult::Version {
version_id,
history_segment,
..
} = server.get_child_version(base_version_id).await?
{
let version_str = str::from_utf8(&history_segment).unwrap();
let version: Version = serde_json::from_str(version_str).unwrap();
// apply this version and update base_version in storage
info!("applying version {:?} from server", version_id);
apply_version(
txn,
&mut sync_ops_batch,
&mut transformed_server_ops,
version,
)
.await?;
txn.set_base_version(version_id).await?;
base_version_id = version_id;
} else {
info!("no child versions of {:?}", base_version_id);
// at the moment, no more child versions, so we can try adding our own
break;
}
}
if sync_ops_batch.is_empty() {
info!("no changes to push to server");
// nothing to sync back to the server..
break 'outer;
}
trace!("sending {} operations to the server", sync_ops_batch.len());
// now make a version of our local changes and push those
let new_version = Version {
operations: sync_ops_batch,
};
let history_segment = serde_json::to_string(&new_version).unwrap().into();
info!("sending new version to server");
let (res, snapshot_urgency) =
server.add_version(base_version_id, history_segment).await?;
match res {
AddVersionResult::Ok(new_version_id) => {
info!("version {:?} received by server", new_version_id);
txn.set_base_version(new_version_id).await?;
base_version_id = new_version_id;
// make a snapshot if the server indicates it is urgent enough
let base_urgency = if avoid_snapshots {
SnapshotUrgency::High
} else {
SnapshotUrgency::Low
};
if snapshot_urgency >= base_urgency {
let snapshot = snapshot::make_snapshot(txn).await?;
server.add_snapshot(new_version_id, snapshot).await?;
}
}
AddVersionResult::ExpectedParentVersion(parent_version_id) => {
info!(
"new version rejected; must be based on {:?}",
parent_version_id
);
if let Some(requested) = requested_parent_version_id {
if parent_version_id == requested {
return Err(Error::OutOfSync);
}
}
requested_parent_version_id = Some(parent_version_id);
break;
}
}
}
}
// Add the transformed server ops to the DB. Critically, these are immediately marked as synced
// (via `txn.sync_complete`) and thus not subject to any of the invariants around operations
// and task state.
for op in transformed_server_ops {
txn.add_operation(op.into_op()).await?;
}
txn.sync_complete().await?;
txn.commit().await?;
Ok(())
}
async fn apply_version(
txn: &mut dyn StorageTxn,
local_ops: &mut Vec<SyncOp>,
transformed_server_ops: &mut Vec<SyncOp>,
mut version: Version,
) -> Result<()> {
// The situation here is that the server has already applied all server operations, and we
// have already applied all local operations, so states have diverged by several
// operations. We need to figure out what operations to apply locally and on the server in
// order to return to the same state.
//
// Operational transforms provide this on an operation-by-operation basis. To break this
// down, we treat each server operation individually, in order. For each such operation,
// we start in this state:
//
//
// base state-*
// / \-server op
// * *
// local / \ /
// ops * *
// / \ / new
// * * local
// local / \ / ops
// state-* *
// new-\ /
// server op *-new local state
//
// This is slightly complicated by the fact that the transform function can return None,
// indicating no operation is required. If this happens for a local op, we can just omit
// it. If it happens for server op, then we must copy the remaining local ops.
for server_op in version.operations.drain(..) {
trace!(
"rebasing local operations onto server operation {:?}",
server_op
);
let mut new_local_ops = Vec::with_capacity(local_ops.len());
let mut svr_op = Some(server_op);
for local_op in local_ops.drain(..) {
if let Some(o) = svr_op {
let (new_server_op, new_local_op) = SyncOp::transform(o, local_op.clone());
trace!("local operation {:?} -> {:?}", local_op, new_local_op);
svr_op = new_server_op;
if let Some(o) = new_local_op {
new_local_ops.push(o);
}
} else {
trace!(
"local operation {:?} unchanged (server operation consumed)",
local_op
);
new_local_ops.push(local_op);
}
}
if let Some(o) = svr_op {
if let Err(e) = apply::apply_op(txn, &o).await {
warn!("Invalid operation when syncing: {} (ignored)", e);
}
transformed_server_ops.push(o);
}
*local_ops = new_local_ops;
}
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::server::test::TestServer;
use crate::storage::inmemory::InMemoryStorage;
use crate::storage::{Storage, TaskMap};
use crate::taskdb::snapshot::SnapshotTasks;
use crate::taskdb::TaskDb;
use crate::{Operation, Operations};
use chrono::Utc;
use pretty_assertions::assert_eq;
use uuid::Uuid;
fn expect_operations(mut got: Vec<Operation>, mut exp: Vec<Operation>) {
got.sort();
exp.sort();
assert_eq!(got, exp);
}
#[tokio::test]
async fn test_sync() -> Result<()> {
let mut server: Box<dyn Server> = TestServer::new().server();
let mut db1 = TaskDb::new(InMemoryStorage::new());
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
let mut db2 = TaskDb::new(InMemoryStorage::new());
sync(&mut server, db2.storage.txn().await?.as_mut(), false).await?;
// make some changes in parallel to db1 and db2..
let uuid1 = Uuid::new_v4();
let mut ops = Operations::new();
let now1 = Utc::now();
ops.push(Operation::Create { uuid: uuid1 });
ops.push(Operation::Update {
uuid: uuid1,
property: "title".into(),
value: Some("my first task".into()),
old_value: None,
timestamp: now1,
});
let uuid2 = Uuid::new_v4();
ops.push(Operation::Create { uuid: uuid2 });
ops.push(Operation::Update {
uuid: uuid2,
property: "title".into(),
value: Some("my second task".into()),
old_value: None,
timestamp: now1,
});
db1.commit_operations(ops, |_| false).await?;
// and synchronize those around
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
sync(&mut server, db2.storage.txn().await?.as_mut(), false).await?;
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
assert_eq!(db1.sorted_tasks().await, db2.sorted_tasks().await);
// now make updates to the same task on both sides
let mut ops = Operations::new();
let now2 = now1 + chrono::Duration::seconds(1);
ops.push(Operation::Update {
uuid: uuid2,
property: "priority".into(),
value: Some("H".into()),
old_value: None,
timestamp: now2,
});
db1.commit_operations(ops, |_| false).await?;
let mut ops = Operations::new();
let now3 = now2 + chrono::Duration::seconds(1);
ops.push(Operation::Update {
uuid: uuid2,
property: "project".into(),
value: Some("personal".into()),
old_value: None,
timestamp: now3,
});
db1.commit_operations(ops, |_| false).await?;
// and synchronize those around
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
sync(&mut server, db2.storage.txn().await?.as_mut(), false).await?;
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
assert_eq!(db1.sorted_tasks().await, db2.sorted_tasks().await);
for (dbnum, db) in [(1, &mut db1), (2, &mut db2)] {
eprintln!("checking db{dbnum}");
expect_operations(
db.get_task_operations(uuid1).await?,
vec![
Operation::Create { uuid: uuid1 },
Operation::Update {
uuid: uuid1,
property: "title".into(),
value: Some("my first task".into()),
old_value: None,
timestamp: now1,
},
],
);
expect_operations(
db.get_task_operations(uuid2).await?,
vec![
Operation::Create { uuid: uuid2 },
Operation::Update {
uuid: uuid2,
property: "title".into(),
value: Some("my second task".into()),
old_value: None,
timestamp: now1,
},
Operation::Update {
uuid: uuid2,
property: "priority".into(),
value: Some("H".into()),
old_value: None,
timestamp: now2,
},
Operation::Update {
uuid: uuid2,
property: "project".into(),
value: Some("personal".into()),
old_value: None,
timestamp: now3,
},
],
);
}
Ok(())
}
#[tokio::test]
async fn test_sync_create_delete() -> Result<()> {
let mut server: Box<dyn Server> = TestServer::new().server();
let mut db1 = TaskDb::new(InMemoryStorage::new());
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
let mut db2 = TaskDb::new(InMemoryStorage::new());
sync(&mut server, db2.storage.txn().await?.as_mut(), false).await?;
// create and update a task..
let uuid = Uuid::new_v4();
let mut ops = Operations::new();
let now1 = Utc::now();
ops.push(Operation::Create { uuid });
ops.push(Operation::Update {
uuid,
property: "title".into(),
value: Some("my first task".into()),
old_value: None,
timestamp: now1,
});
db1.commit_operations(ops, |_| false).await?;
// and synchronize those around
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
sync(&mut server, db2.storage.txn().await?.as_mut(), false).await?;
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
assert_eq!(db1.sorted_tasks().await, db2.sorted_tasks().await);
// delete and re-create the task on db1
let mut ops = Operations::new();
let now2 = now1 + chrono::Duration::seconds(1);
ops.push(Operation::Delete {
uuid,
old_task: TaskMap::new(),
});
ops.push(Operation::Create { uuid });
ops.push(Operation::Update {
uuid,
property: "title".into(),
value: Some("my second task".into()),
old_value: None,
timestamp: now2,
});
db1.commit_operations(ops, |_| false).await?;
// and on db2, update a property of the task
let mut ops = Operations::new();
let now3 = now2 + chrono::Duration::seconds(1);
ops.push(Operation::Update {
uuid,
property: "project".into(),
value: Some("personal".into()),
old_value: None,
timestamp: now3,
});
db2.commit_operations(ops, |_| false).await?;
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
sync(&mut server, db2.storage.txn().await?.as_mut(), false).await?;
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
assert_eq!(db1.sorted_tasks().await, db2.sorted_tasks().await);
// This is a case where the task operations appear different on the replicas,
// because the update to "project" on db2 loses to the delete.
expect_operations(
db1.get_task_operations(uuid).await?,
vec![
Operation::Create { uuid },
Operation::Create { uuid },
Operation::Update {
uuid,
property: "title".into(),
value: Some("my first task".into()),
old_value: None,
timestamp: now1,
},
Operation::Update {
uuid,
property: "title".into(),
value: Some("my second task".into()),
old_value: None,
timestamp: now2,
},
Operation::Delete {
uuid,
old_task: TaskMap::new(),
},
],
);
expect_operations(
db2.get_task_operations(uuid).await?,
vec![
Operation::Create { uuid },
Operation::Create { uuid },
Operation::Update {
uuid,
property: "title".into(),
value: Some("my first task".into()),
old_value: None,
timestamp: now1,
},
Operation::Update {
uuid,
property: "title".into(),
value: Some("my second task".into()),
old_value: None,
timestamp: now2,
},
// This operation is not visible on db1 because the task is already deleted there
// when this update is synced in.
Operation::Update {
uuid,
property: "project".into(),
value: Some("personal".into()),
old_value: None,
timestamp: now3,
},
Operation::Delete {
uuid,
old_task: TaskMap::new(),
},
],
);
Ok(())
}
#[tokio::test]
async fn test_sync_conflicting_updates() -> Result<()> {
let mut server: Box<dyn Server> = TestServer::new().server();
let mut db1 = TaskDb::new(InMemoryStorage::new());
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
let mut db2 = TaskDb::new(InMemoryStorage::new());
sync(&mut server, db2.storage.txn().await?.as_mut(), false).await?;
// create and update a task..
let uuid = Uuid::new_v4();
let mut ops = Operations::new();
let now1 = Utc::now();
ops.push(Operation::Create { uuid });
ops.push(Operation::Update {
uuid,
property: "title".into(),
value: Some("my first task".into()),
old_value: None,
timestamp: now1,
});
db1.commit_operations(ops, |_| false).await?;
// and synchronize those around
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
sync(&mut server, db2.storage.txn().await?.as_mut(), false).await?;
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
assert_eq!(db1.sorted_tasks().await, db2.sorted_tasks().await);
// add different updates on db1 and db2
let mut ops = Operations::new();
let now2 = now1 + chrono::Duration::seconds(1);
ops.push(Operation::Update {
uuid,
property: "title".into(),
value: Some("from db1".into()),
old_value: None,
timestamp: now2,
});
db1.commit_operations(ops, |_| false).await?;
// and on db2, update a property of the task
let mut ops = Operations::new();
let now3 = now2 + chrono::Duration::seconds(1);
ops.push(Operation::Update {
uuid,
property: "title".into(),
value: Some("from db2".into()),
old_value: None,
timestamp: now3,
});
db2.commit_operations(ops, |_| false).await?;
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
sync(&mut server, db2.storage.txn().await?.as_mut(), false).await?;
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
assert_eq!(db1.sorted_tasks().await, db2.sorted_tasks().await);
expect_operations(
db1.get_task_operations(uuid).await?,
vec![
Operation::Create { uuid },
Operation::Update {
uuid,
property: "title".into(),
value: Some("my first task".into()),
old_value: None,
timestamp: now1,
},
// This operation is not visible on db2 because the "from db2" update has a later
// timestamp and thus wins over this one.
Operation::Update {
uuid,
property: "title".into(),
value: Some("from db1".into()),
old_value: None,
timestamp: now2,
},
Operation::Update {
uuid,
property: "title".into(),
value: Some("from db2".into()),
old_value: None,
timestamp: now3,
},
],
);
expect_operations(
db2.get_task_operations(uuid).await?,
vec![
Operation::Create { uuid },
Operation::Update {
uuid,
property: "title".into(),
value: Some("my first task".into()),
old_value: None,
timestamp: now1,
},
Operation::Update {
uuid,
property: "title".into(),
value: Some("from db2".into()),
old_value: None,
timestamp: now3,
},
],
);
Ok(())
}
#[tokio::test]
async fn test_sync_add_snapshot_start_with_snapshot() -> Result<()> {
let mut test_server = TestServer::new();
let mut server: Box<dyn Server> = test_server.server();
let mut db1 = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let mut ops = Operations::new();
ops.push(Operation::Create { uuid });
ops.push(Operation::Update {
uuid,
property: "title".into(),
value: Some("my first task".into()),
old_value: None,
timestamp: Utc::now(),
});
db1.commit_operations(ops, |_| false).await?;
test_server.set_snapshot_urgency(SnapshotUrgency::High);
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
// assert that a snapshot was added
let base_version = db1.storage.txn().await?.base_version().await?;
let (v, s) = test_server
.snapshot()
.ok_or_else(|| anyhow::anyhow!("no snapshot"))?;
assert_eq!(v, base_version);
let tasks = SnapshotTasks::decode(&s)?.into_inner();
assert_eq!(tasks[0].0, uuid);
// update the taskdb and sync again
let mut ops = Operations::new();
ops.push(Operation::Update {
uuid,
property: "title".into(),
value: Some("my first task, updated".into()),
old_value: None,
timestamp: Utc::now(),
});
db1.commit_operations(ops, |_| false).await?;
sync(&mut server, db1.storage.txn().await?.as_mut(), false).await?;
// delete the first version, so that db2 *must* initialize from
// the snapshot
test_server.delete_version(Uuid::nil());
// sync to a new DB and check that we got the expected results
let mut db2 = TaskDb::new(InMemoryStorage::new());
sync(&mut server, db2.storage.txn().await?.as_mut(), false).await?;
let task = db2.get_task(uuid).await?.unwrap();
assert_eq!(task.get("title").unwrap(), "my first task, updated");
Ok(())
}
#[tokio::test]
async fn test_sync_avoids_snapshot() -> Result<()> {
let test_server = TestServer::new();
let mut server: Box<dyn Server> = test_server.server();
let mut db1 = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let mut ops = Operations::new();
ops.push(Operation::Create { uuid });
db1.commit_operations(ops, |_| false).await?;
test_server.set_snapshot_urgency(SnapshotUrgency::Low);
sync(&mut server, db1.storage.txn().await?.as_mut(), true).await?;
// assert that a snapshot was not added, because we indicated
// we wanted to avoid snapshots and it was only low urgency
assert_eq!(test_server.snapshot(), None);
Ok(())
}
#[tokio::test]
async fn test_sync_batched() -> Result<()> {
let test_server = TestServer::new();
let mut server: Box<dyn Server> = test_server.server();
let mut db = TaskDb::new(InMemoryStorage::new());
sync(&mut server, db.storage.txn().await?.as_mut(), false).await?;
// add a task to db
let uuid1 = Uuid::new_v4();
let mut ops = Operations::new();
ops.push(Operation::Create { uuid: uuid1 });
ops.push(Operation::Update {
uuid: uuid1,
property: "title".into(),
value: Some("my first task".into()),
old_value: None,
timestamp: Utc::now(),
});
db.commit_operations(ops, |_| false).await?;
sync(&mut server, db.storage.txn().await?.as_mut(), true).await?;
assert_eq!(test_server.versions_len(), 1);
// chars are four bytes, but they're only one when converted to a String
let data = vec!['a'; 400000];
// add some large operations to db
let mut ops = Operations::new();
for _ in 0..3 {
ops.push(Operation::Update {
uuid: uuid1,
property: "description".into(),
value: Some(data.iter().collect()),
old_value: None,
timestamp: Utc::now(),
});
}
db.commit_operations(ops, |_| false).await?;
// this sync batches the operations into two versions.
sync(&mut server, db.storage.txn().await?.as_mut(), true).await?;
assert_eq!(test_server.versions_len(), 3);
Ok(())
}
#[tokio::test]
async fn test_sync_batches_at_least_one_op() -> Result<()> {
let test_server = TestServer::new();
let mut server: Box<dyn Server> = test_server.server();
let mut db = TaskDb::new(InMemoryStorage::new());
sync(&mut server, db.storage.txn().await?.as_mut(), false).await?;
// add a task to db
let uuid1 = Uuid::new_v4();
let mut ops = Operations::new();
ops.push(Operation::Create { uuid: uuid1 });
ops.push(Operation::Update {
uuid: uuid1,
property: "title".into(),
value: Some("my first task".into()),
old_value: None,
timestamp: Utc::now(),
});
db.commit_operations(ops, |_| false).await?;
sync(&mut server, db.storage.txn().await?.as_mut(), true).await?;
assert_eq!(test_server.versions_len(), 1);
// add an operation greater than the batch limit
let data = vec!['a'; 1000001];
let mut ops = Operations::new();
ops.push(Operation::Update {
uuid: uuid1,
property: "description".into(),
value: Some(data.iter().collect()),
old_value: None,
timestamp: Utc::now(),
});
db.commit_operations(ops, |_| false).await?;
sync(&mut server, db.storage.txn().await?.as_mut(), true).await?;
assert_eq!(test_server.versions_len(), 2);
Ok(())
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/taskdb/apply.rs | src/taskdb/apply.rs | use crate::errors::{Error, Result};
use crate::operation::Operation;
use crate::server::SyncOp;
use crate::storage::{StorageTxn, TaskMap};
use crate::Operations;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use uuid::Uuid;
/// Apply `operations` to the database in the given single transaction.
///
/// This updates the set of tasks in the database, but does not modify the list of operations.
/// If the operation does not make sense in the current state, it is ignored.
///
/// The transaction is not committed.
pub(super) async fn apply_operations(
txn: &mut dyn StorageTxn,
operations: &Operations,
) -> Result<()> {
// A cache of TaskMaps updated in this sequence of operations, but for which `txn.set_task` has
// not yet been called.
let mut tasks: HashMap<Uuid, Option<TaskMap>> = HashMap::new();
async fn get_cache<'t>(
uuid: Uuid,
tasks: &'t mut HashMap<Uuid, Option<TaskMap>>,
txn: &mut dyn StorageTxn,
) -> Result<Option<&'t mut TaskMap>> {
match tasks.entry(uuid) {
Entry::Occupied(occupied_entry) => Ok(occupied_entry.into_mut().as_mut()),
Entry::Vacant(vacant_entry) => {
let task = txn.get_task(uuid).await?;
Ok(vacant_entry.insert(task).as_mut())
}
}
}
// Call `txn.set_task` for this task, if necessary, and remove from the cache.
async fn flush_cache(
uuid: Uuid,
tasks: &mut HashMap<Uuid, Option<TaskMap>>,
txn: &mut dyn StorageTxn,
) -> Result<()> {
if let Entry::Occupied(occupied_entry) = tasks.entry(uuid) {
let v = occupied_entry.remove();
if let Some(taskmap) = v {
txn.set_task(uuid, taskmap).await?;
}
}
Ok(())
}
for operation in operations {
match operation {
Operation::Create { uuid } => {
// The create_task method will do nothing if the task exists. If it was cached
// as not existing, clear that information. If it had cached updates, then there
// is no harm flushing those updates now.
flush_cache(*uuid, &mut tasks, txn).await?;
txn.create_task(*uuid).await?;
}
Operation::Delete { uuid, .. } => {
// The delete_task method will do nothing if the task does not exist.
txn.delete_task(*uuid).await?;
// The task now unconditionally does not exist. If there was a pending
// `txn.set_task`, it can safely be skipped.
tasks.insert(*uuid, None);
}
Operation::Update {
uuid,
property,
value,
..
} => {
let task = get_cache(*uuid, &mut tasks, txn).await?;
// If the task does not exist, do nothing.
if let Some(task) = task {
if let Some(v) = value {
task.insert(property.clone(), v.clone());
} else {
task.remove(property);
}
}
}
Operation::UndoPoint => {}
}
}
// Flush any remaining tasks in the cache.
while let Some((uuid, _)) = tasks.iter().next() {
flush_cache(*uuid, &mut tasks, txn).await?;
}
Ok(())
}
/// Apply a [`SyncOp`] to the TaskDb's set of tasks (without recording it in the list of operations)
pub(super) async fn apply_op(txn: &mut dyn StorageTxn, op: &SyncOp) -> Result<()> {
match op {
SyncOp::Create { uuid } => {
// insert if the task does not already exist
if !txn.create_task(*uuid).await? {
return Err(Error::Database(format!("Task {uuid} already exists")));
}
}
SyncOp::Delete { ref uuid } => {
if !txn.delete_task(*uuid).await? {
return Err(Error::Database(format!("Task {uuid} does not exist")));
}
}
SyncOp::Update {
ref uuid,
ref property,
ref value,
timestamp: _,
} => {
// update if this task exists, otherwise ignore
if let Some(mut task) = txn.get_task(*uuid).await? {
match value {
Some(ref val) => task.insert(property.to_string(), val.clone()),
None => task.remove(property),
};
txn.set_task(*uuid, task).await?;
} else {
return Err(Error::Database(format!("Task {uuid} does not exist")));
}
}
}
Ok(())
}
#[cfg(test)]
mod tests {
#![allow(clippy::vec_init_then_push)]
use super::*;
use crate::storage::inmemory::InMemoryStorage;
use crate::storage::{taskmap_with, Storage, TaskMap};
use crate::taskdb::TaskDb;
use chrono::Utc;
use pretty_assertions::assert_eq;
use std::collections::HashMap;
use uuid::Uuid;
#[tokio::test]
async fn apply_operations_create() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let mut ops = Operations::new();
ops.push(Operation::Create { uuid });
{
let mut txn = db.storage.txn().await?;
apply_operations(txn.as_mut(), &ops).await?;
txn.commit().await?;
}
assert_eq!(db.sorted_tasks().await, vec![(uuid, vec![])]);
Ok(())
}
#[tokio::test]
async fn apply_operations_create_exists() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
{
let mut txn = db.storage.txn().await?;
txn.create_task(uuid).await?;
txn.set_task(uuid, taskmap_with(vec![("foo".into(), "bar".into())]))
.await?;
txn.commit().await?;
}
{
let mut ops = Operations::new();
ops.push(Operation::Create { uuid });
let mut txn = db.storage.txn().await?;
apply_operations(txn.as_mut(), &ops).await?;
txn.commit().await?;
}
assert_eq!(
db.sorted_tasks().await,
vec![(uuid, vec![("foo".into(), "bar".into())])]
);
Ok(())
}
#[tokio::test]
async fn apply_operations_create_exists_update() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let now = Utc::now();
let uuid = Uuid::new_v4();
{
let mut txn = db.storage.txn().await?;
txn.create_task(uuid).await?;
txn.set_task(uuid, taskmap_with(vec![("foo".into(), "bar".into())]))
.await?;
txn.commit().await?;
}
{
let mut ops = Operations::new();
ops.push(Operation::Create { uuid });
ops.push(Operation::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: now,
old_value: None,
});
let mut txn = db.storage.txn().await?;
apply_operations(txn.as_mut(), &ops).await?;
txn.commit().await?;
}
assert_eq!(
db.sorted_tasks().await,
vec![(
uuid,
vec![
("foo".into(), "bar".into()),
("title".into(), "my task".into())
]
)]
);
Ok(())
}
#[tokio::test]
async fn apply_operations_create_update() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let now = Utc::now();
let mut ops = Operations::new();
ops.push(Operation::Create { uuid });
ops.push(Operation::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: now,
old_value: None,
});
{
let mut txn = db.storage.txn().await?;
apply_operations(txn.as_mut(), &ops).await?;
txn.commit().await?;
}
assert_eq!(
db.sorted_tasks().await,
vec![(uuid, vec![("title".into(), "my task".into())])]
);
Ok(())
}
#[tokio::test]
async fn apply_operations_create_update_delete_prop() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let now = Utc::now();
let mut ops = Operations::new();
ops.push(Operation::Create { uuid });
ops.push(Operation::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: now,
old_value: None,
});
ops.push(Operation::Update {
uuid,
property: String::from("priority"),
value: Some("H".into()),
timestamp: now,
old_value: None,
});
ops.push(Operation::Update {
uuid,
property: String::from("title"),
value: None,
timestamp: now,
old_value: Some("my task".into()),
});
{
let mut txn = db.storage.txn().await?;
apply_operations(txn.as_mut(), &ops).await?;
txn.commit().await?;
}
assert_eq!(
db.sorted_tasks().await,
vec![(uuid, vec![("priority".into(), "H".into())])]
);
Ok(())
}
#[tokio::test]
async fn apply_operations_update_does_not_exist() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let now = Utc::now();
let mut ops = Operations::new();
ops.push(Operation::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: now,
old_value: None,
});
{
let mut txn = db.storage.txn().await?;
apply_operations(txn.as_mut(), &ops).await?;
txn.commit().await?;
}
assert_eq!(db.sorted_tasks().await, vec![]);
Ok(())
}
#[tokio::test]
async fn apply_operations_delete_then_update() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let now = Utc::now();
let mut ops = Operations::new();
ops.push(Operation::Create { uuid });
ops.push(Operation::Update {
uuid,
property: String::from("old"),
value: Some("uhoh".into()),
timestamp: now,
old_value: None,
});
ops.push(Operation::Delete {
uuid,
old_task: taskmap_with(vec![]),
});
ops.push(Operation::Update {
uuid,
property: String::from("new"),
value: Some("uhoh".into()),
timestamp: now,
old_value: None,
});
{
let mut txn = db.storage.txn().await?;
apply_operations(txn.as_mut(), &ops).await?;
txn.commit().await?;
}
assert_eq!(db.sorted_tasks().await, vec![]);
Ok(())
}
#[tokio::test]
async fn apply_operations_several_tasks() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let mut uuids = [Uuid::new_v4(), Uuid::new_v4()];
uuids.sort();
let now = Utc::now();
let mut ops = Operations::new();
ops.push(Operation::Create { uuid: uuids[0] });
ops.push(Operation::Create { uuid: uuids[1] });
ops.push(Operation::Update {
uuid: uuids[0],
property: String::from("p"),
value: Some("1".into()),
timestamp: now,
old_value: None,
});
ops.push(Operation::Update {
uuid: uuids[1],
property: String::from("p"),
value: Some("2".into()),
timestamp: now,
old_value: None,
});
{
let mut txn = db.storage.txn().await?;
apply_operations(txn.as_mut(), &ops).await?;
txn.commit().await?;
}
assert_eq!(
db.sorted_tasks().await,
vec![
(uuids[0], vec![("p".into(), "1".into())]),
(uuids[1], vec![("p".into(), "2".into())])
]
);
Ok(())
}
#[tokio::test]
async fn apply_operations_create_delete() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let now = Utc::now();
let mut ops = Operations::new();
ops.push(Operation::Create { uuid });
ops.push(Operation::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: now,
old_value: None,
});
ops.push(Operation::Delete {
uuid,
old_task: taskmap_with(vec![]),
});
{
let mut txn = db.storage.txn().await?;
apply_operations(txn.as_mut(), &ops).await?;
txn.commit().await?;
}
assert_eq!(db.sorted_tasks().await, vec![]);
Ok(())
}
#[tokio::test]
async fn apply_operations_delete_not_present() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let mut ops = Operations::new();
ops.push(Operation::Delete {
uuid,
old_task: taskmap_with(vec![]),
});
{
let mut txn = db.storage.txn().await?;
apply_operations(txn.as_mut(), &ops).await?;
txn.commit().await?;
}
assert_eq!(db.sorted_tasks().await, vec![]);
Ok(())
}
#[tokio::test]
async fn test_apply_create() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let op = SyncOp::Create { uuid };
{
let mut txn = db.storage.txn().await?;
apply_op(txn.as_mut(), &op).await?;
txn.commit().await?;
}
assert_eq!(db.sorted_tasks().await, vec![(uuid, vec![])]);
Ok(())
}
#[tokio::test]
async fn test_apply_create_exists() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
{
let mut txn = db.storage.txn().await?;
txn.create_task(uuid).await?;
let mut taskmap = TaskMap::new();
taskmap.insert("foo".into(), "bar".into());
txn.set_task(uuid, taskmap).await?;
txn.commit().await?;
}
let op = SyncOp::Create { uuid };
{
let mut txn = db.storage.txn().await?;
assert!(apply_op(txn.as_mut(), &op).await.is_err());
}
// create did not delete the old task..
assert_eq!(
db.sorted_tasks().await,
vec![(uuid, vec![("foo".into(), "bar".into())])]
);
Ok(())
}
#[tokio::test]
async fn test_apply_create_update() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let now = Utc::now();
let op1 = SyncOp::Create { uuid };
{
let mut txn = db.storage.txn().await?;
apply_op(txn.as_mut(), &op1).await?;
txn.commit().await?;
}
let op2 = SyncOp::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: now,
};
{
let mut txn = db.storage.txn().await?;
apply_op(txn.as_mut(), &op2).await?;
txn.commit().await?;
}
assert_eq!(
db.sorted_tasks().await,
vec![(uuid, vec![("title".into(), "my task".into())])]
);
Ok(())
}
#[tokio::test]
async fn test_apply_create_update_delete_prop() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let now = Utc::now();
let op1 = SyncOp::Create { uuid };
{
let mut txn = db.storage.txn().await?;
apply_op(txn.as_mut(), &op1).await?;
txn.commit().await?;
}
let op2 = SyncOp::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: now,
};
{
let mut txn = db.storage.txn().await?;
apply_op(txn.as_mut(), &op2).await?;
txn.commit().await?;
}
let op3 = SyncOp::Update {
uuid,
property: String::from("priority"),
value: Some("H".into()),
timestamp: now,
};
{
let mut txn = db.storage.txn().await?;
apply_op(txn.as_mut(), &op3).await?;
txn.commit().await?;
}
let op4 = SyncOp::Update {
uuid,
property: String::from("title"),
value: None,
timestamp: now,
};
{
let mut txn = db.storage.txn().await?;
apply_op(txn.as_mut(), &op4).await?;
txn.commit().await?;
}
let mut exp = HashMap::new();
let mut task = HashMap::new();
task.insert(String::from("priority"), String::from("H"));
exp.insert(uuid, task);
assert_eq!(
db.sorted_tasks().await,
vec![(uuid, vec![("priority".into(), "H".into())])]
);
Ok(())
}
#[tokio::test]
async fn test_apply_update_does_not_exist() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let op = SyncOp::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: Utc::now(),
};
{
let mut txn = db.storage.txn().await?;
assert_eq!(
apply_op(txn.as_mut(), &op).await.err().unwrap().to_string(),
format!("Task Database Error: Task {} does not exist", uuid)
);
txn.commit().await?;
}
Ok(())
}
#[tokio::test]
async fn test_apply_create_delete() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let now = Utc::now();
let op1 = SyncOp::Create { uuid };
{
let mut txn = db.storage.txn().await?;
apply_op(txn.as_mut(), &op1).await?;
txn.commit().await?;
}
let op2 = SyncOp::Update {
uuid,
property: String::from("priority"),
value: Some("H".into()),
timestamp: now,
};
{
let mut txn = db.storage.txn().await?;
apply_op(txn.as_mut(), &op2).await?;
txn.commit().await?;
}
let op3 = SyncOp::Delete { uuid };
{
let mut txn = db.storage.txn().await?;
apply_op(txn.as_mut(), &op3).await?;
txn.commit().await?;
}
assert_eq!(db.sorted_tasks().await, vec![]);
let mut old_task = TaskMap::new();
old_task.insert("priority".into(), "H".into());
Ok(())
}
#[tokio::test]
async fn test_apply_delete_not_present() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let op = SyncOp::Delete { uuid };
{
let mut txn = db.storage.txn().await?;
assert!(apply_op(txn.as_mut(), &op).await.is_err());
txn.commit().await?;
}
Ok(())
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/taskdb/undo.rs | src/taskdb/undo.rs | use super::apply;
use crate::errors::Result;
use crate::operation::{Operation, Operations};
use crate::server::SyncOp;
use crate::storage::StorageTxn;
use chrono::Utc;
use log::{debug, info, trace};
/// Return the operations back to and including the last undo point, or since the last sync if no
/// undo point is found.
///
/// The operations are returned in the order they were applied. Use [`commit_reversed_operations`]
/// to "undo" them.
pub(crate) async fn get_undo_operations(txn: &mut dyn StorageTxn) -> Result<Operations> {
let local_ops = txn.unsynced_operations().await?;
let last_undo_op_idx = local_ops
.iter()
.enumerate()
.rev()
.find(|(_, op)| op.is_undo_point())
.map(|(i, _)| i);
if let Some(last_undo_op_idx) = last_undo_op_idx {
Ok(local_ops[last_undo_op_idx..].to_vec())
} else {
Ok(local_ops)
}
}
/// Generate a sequence of SyncOp's to reverse the effects of this Operation.
fn reverse_ops(op: Operation) -> Vec<SyncOp> {
match op {
Operation::Create { uuid } => vec![SyncOp::Delete { uuid }],
Operation::Delete { uuid, mut old_task } => {
let mut ops = vec![SyncOp::Create { uuid }];
// We don't have the original update timestamp, but it doesn't
// matter because this SyncOp will just be applied and discarded.
let timestamp = Utc::now();
for (property, value) in old_task.drain() {
ops.push(SyncOp::Update {
uuid,
property,
value: Some(value),
timestamp,
});
}
ops
}
Operation::Update {
uuid,
property,
old_value,
timestamp,
..
} => vec![SyncOp::Update {
uuid,
property,
value: old_value,
timestamp,
}],
Operation::UndoPoint => vec![],
}
}
/// Commit the reverse of the given operations, beginning with the last operation in the given
/// operations and proceeding to the first.
///
/// This method only supports reversing operations if they precisely match local operations that
/// have not yet been synchronized, and will return `false` if this is not the case.
pub(crate) async fn commit_reversed_operations(
txn: &mut dyn StorageTxn,
undo_ops: Operations,
) -> Result<bool> {
let mut applied = false;
let local_ops = txn.unsynced_operations().await?;
let mut undo_ops = undo_ops.to_vec();
if undo_ops.is_empty() {
return Ok(false);
}
// TODO Support concurrent undo by adding the reverse of undo_ops rather than popping from operations.
// Verify that undo_ops are the most recent local ops.
let mut ok = false;
let local_undo_ops;
if undo_ops.len() <= local_ops.len() {
let new_len = local_ops.len() - undo_ops.len();
local_undo_ops = &local_ops[new_len..];
if local_undo_ops == undo_ops {
ok = true;
}
}
if !ok {
info!("Undo failed: concurrent changes to the database occurred.");
debug!("local_ops={:#?}\nundo_ops={:#?}", local_ops, undo_ops);
return Ok(applied);
}
undo_ops.reverse();
for op in undo_ops {
debug!("Reversing operation {:?}", op);
let rev_ops = reverse_ops(op.clone());
for op in rev_ops {
trace!("Applying reversed operation {:?}", op);
apply::apply_op(txn, &op).await?;
applied = true;
}
txn.remove_operation(op).await?;
}
txn.commit().await?;
Ok(applied)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::storage::inmemory::InMemoryStorage;
use crate::storage::Storage;
use crate::{storage::taskmap_with, taskdb::TaskDb};
use crate::{Operation, Operations};
use chrono::Utc;
use pretty_assertions::assert_eq;
use uuid::Uuid;
#[tokio::test]
#[allow(clippy::vec_init_then_push)]
async fn test_apply_create() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
let timestamp = Utc::now();
let mut ops = Operations::new();
// apply a few ops, capture the DB state, make an undo point, and then apply a few more
// ops.
ops.push(Operation::Create { uuid: uuid1 });
ops.push(Operation::Update {
uuid: uuid1,
property: "prop".into(),
value: Some("v1".into()),
old_value: None,
timestamp,
});
ops.push(Operation::Create { uuid: uuid2 });
ops.push(Operation::Update {
uuid: uuid2,
property: "prop".into(),
value: Some("v2".into()),
old_value: None,
timestamp,
});
ops.push(Operation::Update {
uuid: uuid2,
property: "prop2".into(),
value: Some("v3".into()),
old_value: Some("v2".into()),
timestamp,
});
db.commit_operations(ops, |_| false).await?;
let db_state = db.sorted_tasks().await;
let mut ops = Operations::new();
ops.push(Operation::UndoPoint);
ops.push(Operation::Delete {
uuid: uuid1,
old_task: [("prop".to_string(), "v1".to_string())].into(),
});
ops.push(Operation::Update {
uuid: uuid2,
property: "prop".into(),
value: None,
old_value: Some("v2".into()),
timestamp,
});
ops.push(Operation::Update {
uuid: uuid2,
property: "prop2".into(),
value: Some("new-value".into()),
old_value: Some("v3".into()),
timestamp,
});
db.commit_operations(ops, |_| false).await?;
assert_eq!(
db.operations().await.len(),
9,
"{:#?}",
db.operations().await
);
let undo_ops = get_undo_operations(db.storage.txn().await?.as_mut()).await?;
assert_eq!(undo_ops.len(), 4, "{:#?}", undo_ops);
assert_eq!(&undo_ops[..], &db.operations().await[5..]);
// Try committing the wrong set of ops.
assert!(!commit_reversed_operations(
db.storage.txn().await?.as_mut(),
undo_ops[1..=2].to_vec(),
)
.await?);
assert!(commit_reversed_operations(db.storage.txn().await?.as_mut(), undo_ops).await?);
// Note that we've subtracted the length of undo_ops.
assert_eq!(
db.operations().await.len(),
5,
"{:#?}",
db.operations().await
);
assert_eq!(
db.sorted_tasks().await,
db_state,
"{:#?}",
db.sorted_tasks().await
);
// Note that the number of undo operations is equal to the number of operations in the
// database here because there are no UndoPoints.
let undo_ops = get_undo_operations(db.storage.txn().await?.as_mut()).await?;
assert_eq!(undo_ops.len(), 5, "{:#?}", undo_ops);
assert!(commit_reversed_operations(db.storage.txn().await?.as_mut(), undo_ops).await?);
// empty db
assert_eq!(
db.operations().await.len(),
0,
"{:#?}",
db.operations().await
);
assert_eq!(
db.sorted_tasks().await,
vec![],
"{:#?}",
db.sorted_tasks().await
);
let undo_ops = get_undo_operations(db.storage.txn().await?.as_mut()).await?;
assert_eq!(undo_ops.len(), 0, "{:#?}", undo_ops);
// nothing left to undo, so commit_undo_ops() returns false
assert!(!commit_reversed_operations(db.storage.txn().await?.as_mut(), undo_ops).await?);
Ok(())
}
#[test]
fn test_reverse_create() {
let uuid = Uuid::new_v4();
assert_eq!(
reverse_ops(Operation::Create { uuid }),
vec![SyncOp::Delete { uuid }]
);
}
#[test]
fn test_reverse_delete() {
let uuid = Uuid::new_v4();
let reversed = reverse_ops(Operation::Delete {
uuid,
old_task: taskmap_with(vec![("prop1".into(), "v1".into())]),
});
assert_eq!(reversed.len(), 2);
assert_eq!(reversed[0], SyncOp::Create { uuid });
assert!(matches!(
&reversed[1],
SyncOp::Update { uuid: u, property: p, value: Some(v), ..}
if u == &uuid && p == "prop1" && v == "v1"
));
}
#[test]
fn test_reverse_update() {
let uuid = Uuid::new_v4();
let timestamp = Utc::now();
assert_eq!(
reverse_ops(Operation::Update {
uuid,
property: "prop".into(),
old_value: Some("foo".into()),
value: Some("v".into()),
timestamp,
}),
vec![SyncOp::Update {
uuid,
property: "prop".into(),
value: Some("foo".into()),
timestamp,
}]
);
}
#[test]
fn test_reverse_undo_point() {
assert_eq!(reverse_ops(Operation::UndoPoint), vec![]);
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/taskdb/mod.rs | src/taskdb/mod.rs | use std::collections::HashSet;
use crate::errors::Result;
use crate::operation::Operation;
use crate::server::Server;
use crate::storage::{Storage, TaskMap};
use crate::Operations;
use uuid::Uuid;
mod apply;
mod snapshot;
mod sync;
pub(crate) mod undo;
mod working_set;
/// A TaskDb is the backend for a replica. It manages the storage, operations, synchronization,
/// and so on, and all the invariants that come with it. It leaves the meaning of particular task
/// properties to the replica and task implementations.
pub(crate) struct TaskDb<S: Storage> {
storage: S,
}
impl<S: Storage> TaskDb<S> {
/// Create a new TaskDb with the given backend storage
pub(crate) fn new(storage: S) -> TaskDb<S> {
TaskDb { storage }
}
/// Apply `operations` to the database in a single transaction.
///
/// The operations will be appended to the list of local operations, and the set of tasks will
/// be updated accordingly.
///
/// Any operations for which `add_to_working_set` returns true will cause the relevant
/// task to be added to the working set.
pub(crate) async fn commit_operations<F>(
&mut self,
operations: Operations,
add_to_working_set: F,
) -> Result<()>
where
F: Fn(&Operation) -> bool,
{
let mut txn = self.storage.txn().await?;
apply::apply_operations(txn.as_mut(), &operations).await?;
// Calculate the task(s) to add to the working set.
let mut to_add = Vec::new();
for operation in &operations {
if add_to_working_set(operation) {
match operation {
Operation::Create { uuid }
| Operation::Update { uuid, .. }
| Operation::Delete { uuid, .. } => to_add.push(*uuid),
_ => {}
}
}
}
let mut working_set: HashSet<Uuid> = txn
.get_working_set()
.await?
.iter()
.filter_map(|u| *u)
.collect();
for uuid in to_add {
// Double-check that we are not adding a task to the working-set twice.
if !working_set.contains(&uuid) {
txn.add_to_working_set(uuid).await?;
working_set.insert(uuid);
}
}
for operation in operations {
txn.add_operation(operation).await?;
}
txn.commit().await
}
/// Get all tasks.
pub(crate) async fn all_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>> {
let mut txn = self.storage.txn().await?;
txn.all_tasks().await
}
/// Get the UUIDs of all tasks
pub(crate) async fn all_task_uuids(&mut self) -> Result<Vec<Uuid>> {
let mut txn = self.storage.txn().await?;
txn.all_task_uuids().await
}
/// Get the working set
pub(crate) async fn working_set(&mut self) -> Result<Vec<Option<Uuid>>> {
let mut txn = self.storage.txn().await?;
txn.get_working_set().await
}
/// Get a single task, by uuid.
pub(crate) async fn get_task(&mut self, uuid: Uuid) -> Result<Option<TaskMap>> {
let mut txn = self.storage.txn().await?;
txn.get_task(uuid).await
}
/// Get all pending tasks from the working set
pub(crate) async fn get_pending_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>> {
let mut txn = self.storage.txn().await?;
txn.get_pending_tasks().await
}
pub(crate) async fn get_task_operations(&mut self, uuid: Uuid) -> Result<Operations> {
let mut txn = self.storage.txn().await?;
txn.get_task_operations(uuid).await
}
/// Rebuild the working set using a function to identify tasks that should be in the set. This
/// renumbers the existing working-set tasks to eliminate gaps, and also adds any tasks that
/// are not already in the working set but should be. The rebuild occurs in a single
/// trasnsaction against the storage backend.
pub(crate) async fn rebuild_working_set<F>(
&mut self,
in_working_set: F,
renumber: bool,
) -> Result<()>
where
F: Fn(&TaskMap) -> bool,
{
working_set::rebuild(self.storage.txn().await?.as_mut(), in_working_set, renumber).await
}
/// Sync to the given server, pulling remote changes and pushing local changes.
///
/// If `avoid_snapshots` is true, the sync operations produces a snapshot only when the server
/// indicate it is urgent (snapshot urgency "high"). This allows time for other replicas to
/// create a snapshot before this one does.
///
/// Set this to true on systems more constrained in CPU, memory, or bandwidth than a typical desktop
/// system
pub(crate) async fn sync(
&mut self,
server: &mut Box<dyn Server>,
avoid_snapshots: bool,
) -> Result<()> {
let mut txn = self.storage.txn().await?;
sync::sync(server, txn.as_mut(), avoid_snapshots).await
}
/// Return the operations back to and including the last undo point, or since the last sync if
/// no undo point is found.
///
/// The operations are returned in the order they were applied. Use
/// [`commit_reversed_operations`] to "undo" them.
pub(crate) async fn get_undo_operations(&mut self) -> Result<Operations> {
let mut txn = self.storage.txn().await?;
undo::get_undo_operations(txn.as_mut()).await
}
/// Commit the reverse of the given operations, beginning with the last operation in the given
/// operations and proceeding to the first.
///
/// This method only supports reversing operations if they precisely match local operations
/// that have not yet been synchronized, and will return `false` if this is not the case.
pub(crate) async fn commit_reversed_operations(
&mut self,
undo_ops: Operations,
) -> Result<bool> {
let mut txn = self.storage.txn().await?;
undo::commit_reversed_operations(txn.as_mut(), undo_ops).await
}
/// Get the number of un-synchronized operations in storage, excluding undo
/// operations.
pub(crate) async fn num_operations(&mut self) -> Result<usize> {
let mut txn = self.storage.txn().await?;
Ok(txn
.unsynced_operations()
.await?
.iter()
.filter(|o| !o.is_undo_point())
.count())
}
/// Get the number of (un-synchronized) undo points in storage.
pub(crate) async fn num_undo_points(&mut self) -> Result<usize> {
let mut txn = self.storage.txn().await?;
Ok(txn
.unsynced_operations()
.await?
.iter()
.filter(|o| o.is_undo_point())
.count())
}
// functions for supporting tests
#[cfg(test)]
pub(crate) async fn sorted_tasks(&mut self) -> Vec<(Uuid, Vec<(String, String)>)> {
let mut res: Vec<(Uuid, Vec<(String, String)>)> = self
.all_tasks()
.await
.unwrap()
.iter()
.map(|(u, t)| {
let mut t = t
.iter()
.map(|(p, v)| (p.clone(), v.clone()))
.collect::<Vec<(String, String)>>();
t.sort();
(*u, t)
})
.collect();
res.sort();
res
}
#[cfg(test)]
pub(crate) async fn operations(&mut self) -> Vec<Operation> {
let mut txn = self.storage.txn().await.unwrap();
txn.unsynced_operations().await.unwrap().to_vec()
}
}
#[cfg(test)]
mod tests {
use crate::storage::inmemory::InMemoryStorage;
use super::*;
use chrono::Utc;
use pretty_assertions::assert_eq;
use uuid::Uuid;
#[tokio::test]
async fn commit_operations() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let now = Utc::now();
let mut ops = Operations::new();
ops.push(Operation::Create { uuid });
ops.push(Operation::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: now,
old_value: Some("old".into()),
});
db.commit_operations(ops, |_| false).await?;
assert_eq!(
db.sorted_tasks().await,
vec![(uuid, vec![("title".into(), "my task".into())])]
);
assert_eq!(
db.operations().await,
vec![
Operation::Create { uuid },
Operation::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: now,
old_value: Some("old".into()),
},
]
);
Ok(())
}
#[tokio::test]
async fn commit_operations_update_working_set() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let mut uuids = [Uuid::new_v4(), Uuid::new_v4(), Uuid::new_v4()];
uuids.sort();
let [uuid1, uuid2, uuid3] = uuids;
// uuid1 already exists in the working set.
{
let mut txn = db.storage.txn().await?;
txn.add_to_working_set(uuid1).await?;
txn.commit().await?;
}
let mut ops = Operations::new();
ops.push(Operation::Create { uuid: uuid1 });
ops.push(Operation::Create { uuid: uuid2 });
ops.push(Operation::Create { uuid: uuid3 });
ops.push(Operation::Create { uuid: uuid2 });
ops.push(Operation::Create { uuid: uuid3 });
// return true for updates to uuid1 or uuid2.
let add_to_working_set = |op: &Operation| match op {
Operation::Create { uuid } => *uuid == uuid1 || *uuid == uuid2,
_ => false,
};
db.commit_operations(ops, add_to_working_set).await?;
assert_eq!(
db.sorted_tasks().await,
vec![(uuid1, vec![]), (uuid2, vec![]), (uuid3, vec![]),]
);
assert_eq!(
db.operations().await,
vec![
Operation::Create { uuid: uuid1 },
Operation::Create { uuid: uuid2 },
Operation::Create { uuid: uuid3 },
Operation::Create { uuid: uuid2 },
Operation::Create { uuid: uuid3 },
]
);
// uuid2 was added to the working set, once, and uuid3 was not.
assert_eq!(
db.working_set().await?,
vec![None, Some(uuid1), Some(uuid2)],
);
Ok(())
}
#[tokio::test]
async fn test_num_operations() {
let mut db = TaskDb::new(InMemoryStorage::new());
let mut ops = Operations::new();
ops.push(Operation::Create {
uuid: Uuid::new_v4(),
});
ops.push(Operation::UndoPoint);
ops.push(Operation::Create {
uuid: Uuid::new_v4(),
});
db.commit_operations(ops, |_| false).await.unwrap();
assert_eq!(db.num_operations().await.unwrap(), 2);
}
#[tokio::test]
async fn test_num_undo_points() {
let mut db = TaskDb::new(InMemoryStorage::new());
let mut ops = Operations::new();
ops.push(Operation::UndoPoint);
db.commit_operations(ops, |_| false).await.unwrap();
assert_eq!(db.num_undo_points().await.unwrap(), 1);
let mut ops = Operations::new();
ops.push(Operation::UndoPoint);
db.commit_operations(ops, |_| false).await.unwrap();
assert_eq!(db.num_undo_points().await.unwrap(), 2);
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/taskdb/snapshot.rs | src/taskdb/snapshot.rs | use crate::errors::{Error, Result};
use crate::server::VersionId;
use crate::storage::{StorageTxn, TaskMap};
use flate2::{read::ZlibDecoder, write::ZlibEncoder, Compression};
use serde::de::{Deserialize, Deserializer, MapAccess, Visitor};
use serde::ser::{Serialize, SerializeMap, Serializer};
use std::fmt;
use uuid::Uuid;
/// A newtype to wrap the result of [`crate::storage::StorageTxn::all_tasks`]
pub(super) struct SnapshotTasks(Vec<(Uuid, TaskMap)>);
impl Serialize for SnapshotTasks {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut map = serializer.serialize_map(Some(self.0.len()))?;
for (k, v) in &self.0 {
map.serialize_entry(k, v)?;
}
map.end()
}
}
struct TaskDbVisitor;
impl<'de> Visitor<'de> for TaskDbVisitor {
type Value = SnapshotTasks;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a map representing a task snapshot")
}
fn visit_map<M>(self, mut access: M) -> std::result::Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
let mut map = SnapshotTasks(Vec::with_capacity(access.size_hint().unwrap_or(0)));
while let Some((key, value)) = access.next_entry()? {
map.0.push((key, value));
}
Ok(map)
}
}
impl<'de> Deserialize<'de> for SnapshotTasks {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_map(TaskDbVisitor)
}
}
impl SnapshotTasks {
pub(super) fn encode(&self) -> Result<Vec<u8>> {
let encoder = ZlibEncoder::new(Vec::new(), Compression::default());
// Wrap the ZlibEncoder in a buffer so that it sees fewer, larger writes. This
// dramatically encoding performance.
let mut encoder = std::io::BufWriter::new(encoder);
serde_json::to_writer(&mut encoder, &self)?;
let encoder = encoder
.into_inner()
.map_err(|e| anyhow::anyhow!("While flushing snapshot encoder: {e}"))?;
Ok(encoder.finish()?)
}
pub(super) fn decode(snapshot: &[u8]) -> Result<Self> {
let decoder = ZlibDecoder::new(snapshot);
Ok(serde_json::from_reader(decoder)?)
}
pub(super) fn into_inner(self) -> Vec<(Uuid, TaskMap)> {
self.0
}
}
/// Generate a snapshot (compressed, unencrypted) for the current state of the taskdb in the given
/// storage.
pub(super) async fn make_snapshot(txn: &mut dyn StorageTxn) -> Result<Vec<u8>> {
let all_tasks = SnapshotTasks(txn.all_tasks().await?);
all_tasks.encode()
}
/// Apply the given snapshot (compressed, unencrypted) to the taskdb's storage.
pub(super) async fn apply_snapshot(
txn: &mut dyn StorageTxn,
version: VersionId,
snapshot: &[u8],
) -> Result<()> {
let all_tasks = SnapshotTasks::decode(snapshot)?;
// double-check emptiness
if !txn.is_empty().await? {
return Err(Error::Database(String::from(
"Cannot apply snapshot to a non-empty task database",
)));
}
for (uuid, task) in all_tasks.into_inner().drain(..) {
txn.set_task(uuid, task).await?;
}
txn.set_base_version(version).await?;
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::storage::{inmemory::InMemoryStorage, Storage, TaskMap};
use pretty_assertions::assert_eq;
#[test]
fn test_serialize_empty() -> Result<()> {
let empty = SnapshotTasks(vec![]);
assert_eq!(serde_json::to_vec(&empty)?, b"{}".to_owned());
Ok(())
}
#[test]
fn test_serialize_tasks() -> Result<()> {
let u = Uuid::new_v4();
let m: TaskMap = vec![("description".to_owned(), "my task".to_owned())]
.drain(..)
.collect();
let all_tasks = SnapshotTasks(vec![(u, m)]);
assert_eq!(
serde_json::to_vec(&all_tasks)?,
format!("{{\"{u}\":{{\"description\":\"my task\"}}}}").into_bytes(),
);
Ok(())
}
#[tokio::test]
async fn test_round_trip() -> Result<()> {
let mut storage = InMemoryStorage::new();
let version = Uuid::new_v4();
let task1 = (
Uuid::new_v4(),
vec![("description".to_owned(), "one".to_owned())]
.drain(..)
.collect::<TaskMap>(),
);
let task2 = (
Uuid::new_v4(),
vec![("description".to_owned(), "two".to_owned())]
.drain(..)
.collect::<TaskMap>(),
);
{
let mut txn = storage.txn().await?;
txn.set_task(task1.0, task1.1.clone()).await?;
txn.set_task(task2.0, task2.1.clone()).await?;
txn.commit().await?;
}
let snap = {
let mut txn = storage.txn().await?;
make_snapshot(txn.as_mut()).await?
};
// apply that snapshot to a fresh bit of fake
let mut storage = InMemoryStorage::new();
{
let mut txn = storage.txn().await?;
apply_snapshot(txn.as_mut(), version, &snap).await?;
txn.commit().await?
}
{
let mut txn = storage.txn().await?;
assert_eq!(txn.get_task(task1.0).await?, Some(task1.1));
assert_eq!(txn.get_task(task2.0).await?, Some(task2.1));
assert_eq!(txn.all_tasks().await?.len(), 2);
assert_eq!(txn.base_version().await?, version);
assert_eq!(txn.unsynced_operations().await?.len(), 0);
assert_eq!(txn.get_working_set().await?.len(), 1);
}
Ok(())
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/taskdb/working_set.rs | src/taskdb/working_set.rs | use crate::errors::Result;
use crate::storage::{StorageTxn, TaskMap};
use std::collections::HashSet;
/// Rebuild the working set using a function to identify tasks that should be in the set. This
/// renumbers the existing working-set tasks to eliminate gaps, and also adds any tasks that
/// are not already in the working set but should be. The rebuild occurs in a single
/// transaction against the storage backend.
pub(crate) async fn rebuild<F>(
txn: &mut dyn StorageTxn,
in_working_set: F,
renumber: bool,
) -> Result<()>
where
F: Fn(&TaskMap) -> bool,
{
let old_ws = txn.get_working_set().await?;
let mut new_ws = vec![None]; // index 0 is always None
let mut seen = HashSet::new();
// The goal here is for existing working-set items to be "compressed' down to index 1, so
// we begin by scanning the current working set and inserting any tasks that should still
// be in the set into new_ws, implicitly dropping any tasks that are no longer in the
// working set.
for elt in &old_ws[1..] {
if let Some(uuid) = elt {
if let Some(task) = txn.get_task(*uuid).await? {
if in_working_set(&task) {
// The existing working-set item is still in the working set -- no change.
new_ws.push(Some(*uuid));
seen.insert(*uuid);
} else {
// The item should not be present. If we are not renumbering, then insert a
// blank working-set item here
if !renumber {
new_ws.push(None);
}
}
continue;
}
} else {
// This item was already None.
new_ws.push(None);
}
}
// Now go hunting for tasks that should be in this list but are not, adding them at the
// end of the list, whether renumbering or not
for (uuid, task) in txn.all_tasks().await? {
if !seen.contains(&uuid) && in_working_set(&task) {
new_ws.push(Some(uuid));
}
}
// Now use `set_working_set_item` to update any items within the range of the current
// working set.
for (i, (old, new)) in old_ws.iter().zip(new_ws.iter()).enumerate() {
if old != new {
txn.set_working_set_item(i, *new).await?;
}
}
// If there are more new items, add them.
match new_ws.len().cmp(&old_ws.len()) {
std::cmp::Ordering::Less => {
// Overall working set has shrunk, so set remaining items to None.
for (i, item) in old_ws.iter().enumerate().skip(new_ws.len()) {
if item.is_some() {
txn.set_working_set_item(i, None).await?;
}
}
}
std::cmp::Ordering::Equal => {}
std::cmp::Ordering::Greater => {
// Overall working set has grown, so add new items to the end.
for uuid in &new_ws[old_ws.len()..] {
txn.add_to_working_set(uuid.expect("new ws items should not be None"))
.await?;
}
}
}
txn.commit().await?;
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::storage::inmemory::InMemoryStorage;
use crate::storage::Storage;
use crate::taskdb::TaskDb;
use crate::{Operation, Operations};
use chrono::Utc;
use uuid::Uuid;
#[tokio::test]
async fn rebuild_working_set_renumber() -> Result<()> {
rebuild_working_set(true).await
}
#[tokio::test]
async fn rebuild_working_set_no_renumber() -> Result<()> {
rebuild_working_set(false).await
}
async fn rebuild_working_set(renumber: bool) -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let mut uuids = vec![];
uuids.push(Uuid::new_v4());
println!("uuids[0]: {:?} - pending, not in working set", uuids[0]);
uuids.push(Uuid::new_v4());
println!("uuids[1]: {:?} - pending, in working set", uuids[1]);
uuids.push(Uuid::new_v4());
println!("uuids[2]: {:?} - not pending, not in working set", uuids[2]);
uuids.push(Uuid::new_v4());
println!("uuids[3]: {:?} - not pending, in working set", uuids[3]);
uuids.push(Uuid::new_v4());
println!("uuids[4]: {:?} - pending, in working set", uuids[4]);
// add everything to the TaskDb
let mut ops = Operations::new();
for uuid in &uuids {
ops.push(Operation::Create { uuid: *uuid });
}
for i in &[0usize, 1, 4] {
ops.push(Operation::Update {
uuid: uuids[*i],
property: String::from("status"),
value: Some("pending".into()),
old_value: None,
timestamp: Utc::now(),
});
}
db.commit_operations(ops, |_| false).await?;
// set the existing working_set as we want it
{
let mut txn = db.storage.txn().await?;
txn.clear_working_set().await?;
for i in &[1usize, 3, 4] {
txn.add_to_working_set(uuids[*i]).await?;
}
txn.commit().await?;
}
assert_eq!(
db.working_set().await?,
vec![None, Some(uuids[1]), Some(uuids[3]), Some(uuids[4])]
);
rebuild(
db.storage.txn().await?.as_mut(),
|t| {
if let Some(status) = t.get("status") {
status == "pending"
} else {
false
}
},
renumber,
)
.await?;
let exp = if renumber {
// uuids[1] and uuids[4] are already in the working set, so are compressed
// to the top, and then uuids[0] is added.
vec![None, Some(uuids[1]), Some(uuids[4]), Some(uuids[0])]
} else {
// uuids[1] and uuids[4] are already in the working set, at indexes 1 and 3,
// and then uuids[0] is added.
vec![None, Some(uuids[1]), None, Some(uuids[4]), Some(uuids[0])]
};
assert_eq!(db.working_set().await?, exp);
Ok(())
}
#[tokio::test]
async fn rebuild_working_set_no_change() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let mut uuids = vec![];
uuids.push(Uuid::new_v4());
println!("uuids[0]: {:?} - pending, in working set", uuids[0]);
uuids.push(Uuid::new_v4());
println!("uuids[1]: {:?} - pending, in working set", uuids[1]);
uuids.push(Uuid::new_v4());
println!("uuids[2]: {:?} - pending, not in working set", uuids[2]);
// add everything to the TaskDb
let mut ops = Operations::new();
for uuid in &uuids {
ops.push(Operation::Create { uuid: *uuid });
ops.push(Operation::Update {
uuid: *uuid,
property: String::from("status"),
value: Some("pending".into()),
old_value: None,
timestamp: Utc::now(),
});
}
db.commit_operations(ops, |_| false).await?;
// set the existing working_set as we want it, containing UUIDs 0 and 1.
{
let mut txn = db.storage.txn().await?;
txn.clear_working_set().await?;
for i in &[0, 1] {
txn.add_to_working_set(uuids[*i]).await?;
}
txn.commit().await?;
}
rebuild(
db.storage.txn().await?.as_mut(),
|t| {
if let Some(status) = t.get("status") {
status == "pending"
} else {
false
}
},
true,
)
.await?;
assert_eq!(
db.working_set().await?,
vec![None, Some(uuids[0]), Some(uuids[1]), Some(uuids[2])]
);
Ok(())
}
#[tokio::test]
async fn rebuild_working_set_shrinks() -> Result<()> {
let mut db = TaskDb::new(InMemoryStorage::new());
let mut uuids = vec![];
uuids.push(Uuid::new_v4());
println!("uuids[0]: {:?} - pending, in working set", uuids[0]);
uuids.push(Uuid::new_v4());
println!("uuids[1]: {:?} - not pending, in working set", uuids[1]);
uuids.push(Uuid::new_v4());
println!("uuids[2]: {:?} - not pending, in working set", uuids[2]);
// add everything to the TaskDb
let mut ops = Operations::new();
for uuid in &uuids {
ops.push(Operation::Create { uuid: *uuid });
}
ops.push(Operation::Update {
uuid: uuids[0],
property: String::from("status"),
value: Some("pending".into()),
old_value: None,
timestamp: Utc::now(),
});
db.commit_operations(ops, |_| false).await?;
// set the existing working_set as we want it, containing all three UUIDs.
{
let mut txn = db.storage.txn().await?;
txn.clear_working_set().await?;
for uuid in &uuids {
txn.add_to_working_set(*uuid).await?;
}
txn.commit().await?;
}
rebuild(
db.storage.txn().await?.as_mut(),
|t| {
if let Some(status) = t.get("status") {
status == "pending"
} else {
false
}
},
true,
)
.await?;
assert_eq!(db.working_set().await?, vec![None, Some(uuids[0])]);
Ok(())
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/tests/update-and-delete-sync.rs | tests/update-and-delete-sync.rs | #![cfg(feature = "server-local")]
use taskchampion::chrono::{TimeZone, Utc};
use taskchampion::storage::inmemory::InMemoryStorage;
use taskchampion::{Operations, Replica, ServerConfig, Status, Uuid};
use tempfile::TempDir;
#[tokio::test]
async fn update_and_delete_sync_delete_first() -> anyhow::Result<()> {
update_and_delete_sync(true).await
}
#[tokio::test]
async fn update_and_delete_sync_update_first() -> anyhow::Result<()> {
update_and_delete_sync(false).await
}
/// Test what happens when an update is sync'd into a repo after a task is deleted.
/// If delete_first, then the deletion is sync'd to the server first; otherwise
/// the update is sync'd first. Either way, the task is gone.
async fn update_and_delete_sync(delete_first: bool) -> anyhow::Result<()> {
// set up two replicas, and demonstrate replication between them
let mut rep1 = Replica::new(InMemoryStorage::new());
let mut rep2 = Replica::new(InMemoryStorage::new());
let tmp_dir = TempDir::new().expect("TempDir failed");
let mut server = ServerConfig::Local {
server_dir: tmp_dir.path().to_path_buf(),
}
.into_server()
.await?;
// add a task on rep1, and sync it to rep2
let mut ops = Operations::new();
let u = Uuid::new_v4();
let mut t = rep1.create_task(u, &mut ops).await?;
t.set_description("test task".into(), &mut ops)?;
t.set_status(Status::Pending, &mut ops)?;
t.set_entry(Some(Utc::now()), &mut ops)?;
rep1.commit_operations(ops).await?;
rep1.sync(&mut server, false).await?;
rep2.sync(&mut server, false).await?;
// mark the task as deleted, long in the past, on rep2
{
let mut ops = Operations::new();
let mut t = rep2.get_task(u).await?.unwrap();
t.set_status(Status::Deleted, &mut ops)?;
t.set_modified(Utc.with_ymd_and_hms(1980, 1, 1, 0, 0, 0).unwrap(), &mut ops)?;
rep2.commit_operations(ops).await?;
}
// sync it back to rep1
rep2.sync(&mut server, false).await?;
rep1.sync(&mut server, false).await?;
// expire the task on rep1 and check that it is gone locally
rep1.expire_tasks().await?;
assert!(rep1.get_task(u).await?.is_none());
// modify the task on rep2
{
let mut ops = Operations::new();
let mut t = rep2.get_task(u).await?.unwrap();
t.set_description("modified".to_string(), &mut ops)?;
rep2.commit_operations(ops).await?;
}
// sync back and forth
if delete_first {
rep1.sync(&mut server, false).await?;
}
rep2.sync(&mut server, false).await?;
rep1.sync(&mut server, false).await?;
if !delete_first {
rep2.sync(&mut server, false).await?;
}
// check that the task is gone on both replicas
assert!(rep1.get_task(u).await?.is_none());
assert!(rep2.get_task(u).await?.is_none());
Ok(())
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/tests/gcp-tls.rs | tests/gcp-tls.rs | #![cfg(all(feature = "server-gcp", target_os = "linux"))]
use std::{
fs::{metadata, set_permissions, File},
io::Write,
os::unix::fs::PermissionsExt,
};
use taskchampion::ServerConfig;
use tempfile::TempDir;
mod tls_utils;
#[tokio::test]
/// Check that the GCP server implementation correctly uses, or does not use,
/// tls-native-roots.
async fn gcp_tls() -> anyhow::Result<()> {
// In order to attempt a connection, GCP requires a file containing what appears to be valid
// credentials.
let tmp_dir = TempDir::new()?;
let creds_path = tmp_dir.path().join("creds.json");
File::create_new(&creds_path)
.expect("Could not open temp creds file")
.write_all(
br#"{
"client_id": "765432109876-abcdefghijklmnopqrstuvwxyz.apps.googleusercontent.com",
"client_secret": "aBcDeFgHiJkLmNoPqRsTuVwXyZ",
"refresh_token": "1/abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz",
"type": "authorized_user"
}"#,
)
.expect("Could not write temp creds file");
// File::create_new creates files with permissions 0o000, which the GCP client
// will not be able to read.
let metadata = metadata(&creds_path)?;
let mut permissions = metadata.permissions();
permissions.set_mode(0o777);
set_permissions(&creds_path, permissions)?;
tls_utils::reset_seen_ssl_file();
// This will fail getting the salt due to bad credentials, but after making a TLS connection,
// which is what this test requires.
let _ = ServerConfig::Gcp {
bucket: "no-bucket".into(),
credential_path: Some(creds_path.to_str().unwrap().into()),
encryption_secret: b"abc".into(),
}
.into_server()
.await;
tls_utils::assert_expected_seen_ssl_file();
Ok(())
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/tests/aws-tls.rs | tests/aws-tls.rs | #![cfg(all(feature = "server-aws", target_os = "linux"))]
use taskchampion::{server::AwsCredentials, ServerConfig};
mod tls_utils;
#[tokio::test]
/// Check that the AWS server implementation correctly uses, or does not use,
/// tls-native-roots.
async fn aws_tls() -> anyhow::Result<()> {
tls_utils::reset_seen_ssl_file();
// This will fail getting the salt due to bad credentials, but after making a TLS connection,
// which is what this test requires.
let _ = ServerConfig::Aws {
region: Some("us-east-2".into()),
bucket: "gotheneburgbitfactory".into(),
credentials: AwsCredentials::AccessKey {
access_key_id: "not".into(),
secret_access_key: "valid".into(),
},
endpoint_url: None,
force_path_style: false,
encryption_secret: b"abc".into(),
}
.into_server()
.await;
tls_utils::assert_expected_seen_ssl_file();
Ok(())
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/tests/syncing-proptest.rs | tests/syncing-proptest.rs | #![cfg(all(feature = "server-local", not(target_arch = "wasm32")))]
use pretty_assertions::assert_eq;
use proptest::prelude::*;
use taskchampion::storage::inmemory::InMemoryStorage;
use taskchampion::{Operations, Replica, ServerConfig, TaskData, Uuid};
use tempfile::TempDir;
#[derive(Debug, Clone)]
enum Action {
Create,
Update(String, String),
Delete,
Sync,
}
fn action() -> impl Strategy<Value = Action> {
prop_oneof![
Just(Action::Create),
("(description|project|due)", "(a|b|c)").prop_map(|(p, v)| Action::Update(p, v)),
Just(Action::Delete),
Just(Action::Sync),
]
}
fn actions() -> impl Strategy<Value = Vec<(Action, u8)>> {
proptest::collection::vec((action(), (0..3u8)), 0..100)
}
proptest! {
#[test]
/// Check that various sequences of operations on mulitple db's do not get the db's into an
/// incompatible state. The main concern here is that there might be a sequence of operations
/// that results in a task being in different states in different replicas. Different tasks
/// cannot interfere with one another, so this focuses on a single task.
fn multi_replica_sync(action_sequence in actions()) {
let tmp_dir = TempDir::new().expect("TempDir failed");
let uuid = Uuid::parse_str("83a2f9ef-f455-4195-b92e-a54c161eebfc").unwrap();
let server_config = ServerConfig::Local {
server_dir: tmp_dir.path().to_path_buf(),
};
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
.block_on(async {
let mut server = server_config.into_server().await.unwrap();
let mut replicas = [
Replica::new(InMemoryStorage::new()),
Replica::new(InMemoryStorage::new()),
Replica::new(InMemoryStorage::new()),
];
for (action, rep) in action_sequence {
println!("{action:?} on rep {rep}");
let rep = &mut replicas[rep as usize];
match action {
Action::Create => {
if rep.get_task_data(uuid).await.unwrap().is_none() {
let mut ops = Operations::new();
TaskData::create(uuid, &mut ops);
rep.commit_operations(ops).await.unwrap();
}
}
Action::Update(p, v) => {
if let Some(mut t) = rep.get_task_data(uuid).await.unwrap() {
let mut ops = Operations::new();
t.update(p, Some(v), &mut ops);
rep.commit_operations(ops).await.unwrap();
}
}
Action::Delete => {
if let Some(mut t) = rep.get_task_data(uuid).await.unwrap() {
let mut ops = Operations::new();
t.delete(&mut ops);
rep.commit_operations(ops).await.unwrap();
}
}
Action::Sync => rep.sync(&mut server, false).await.unwrap(),
}
}
// Sync all of the replicas, twice, to flush out any un-synced changes.
for rep in &mut replicas {
rep.sync(&mut server, false).await.unwrap()
}
for rep in &mut replicas {
rep.sync(&mut server, false).await.unwrap()
}
let t0 = replicas[0].get_task_data(uuid).await.unwrap();
let t1 = replicas[1].get_task_data(uuid).await.unwrap();
let t2 = replicas[2].get_task_data(uuid).await.unwrap();
assert_eq!(t0, t1);
assert_eq!(t1, t2);
});
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/tests/cross-sync.rs | tests/cross-sync.rs | #![cfg(feature = "server-local")]
use chrono::Utc;
use pretty_assertions::assert_eq;
use taskchampion::storage::inmemory::InMemoryStorage;
use taskchampion::{Operations, Replica, ServerConfig, Status, Uuid};
use tempfile::TempDir;
#[tokio::test]
async fn cross_sync() -> anyhow::Result<()> {
// set up two replicas, and demonstrate replication between them
let mut rep1 = Replica::new(InMemoryStorage::new());
let mut rep2 = Replica::new(InMemoryStorage::new());
let tmp_dir = TempDir::new().expect("TempDir failed");
let server_config = ServerConfig::Local {
server_dir: tmp_dir.path().to_path_buf(),
};
let mut server = server_config.into_server().await?;
let (uuid1, uuid2) = (Uuid::new_v4(), Uuid::new_v4());
let mut ops = Operations::new();
// add some tasks on rep1
let mut t1 = rep1.create_task(uuid1, &mut ops).await?;
t1.set_description("test 1".into(), &mut ops)?;
t1.set_status(Status::Pending, &mut ops)?;
t1.set_entry(Some(Utc::now()), &mut ops)?;
let mut t2 = rep1.create_task(uuid2, &mut ops).await?;
t2.set_description("test 2".into(), &mut ops)?;
t2.set_status(Status::Pending, &mut ops)?;
t2.set_entry(Some(Utc::now()), &mut ops)?;
// modify t1
t1.start(&mut ops)?;
rep1.commit_operations(ops).await?;
rep1.sync(&mut server, false).await?;
rep2.sync(&mut server, false).await?;
// those tasks should exist on rep2 now
let mut t12 = rep2
.get_task(uuid1)
.await?
.expect("expected task 1 on rep2");
let t22 = rep2
.get_task(uuid2)
.await?
.expect("expected task 2 on rep2");
assert_eq!(t12.get_description(), "test 1");
assert_eq!(t12.is_active(), true);
assert_eq!(t22.get_description(), "test 2");
assert_eq!(t22.is_active(), false);
// make non-conflicting changes on the two replicas
let mut ops = Operations::new();
t2.set_status(Status::Completed, &mut ops)?;
rep1.commit_operations(ops).await?;
let mut ops = Operations::new();
t12.set_status(Status::Completed, &mut ops)?;
rep2.commit_operations(ops).await?;
// sync those changes back and forth
rep1.sync(&mut server, false).await?; // rep1 -> server
rep2.sync(&mut server, false).await?; // server -> rep2, rep2 -> server
rep1.sync(&mut server, false).await?; // server -> rep1
let t1 = rep1
.get_task(uuid1)
.await?
.expect("expected task 1 on rep1");
assert_eq!(t1.get_status(), Status::Completed);
let mut t22 = rep2
.get_task(uuid2)
.await?
.expect("expected task 2 on rep2");
assert_eq!(t22.get_status(), Status::Completed);
rep1.rebuild_working_set(true).await?;
rep2.rebuild_working_set(true).await?;
// Make task 2 pending again, and observe that it is in the working set in both replicas after
// sync.
let mut ops = Operations::new();
t22.set_status(Status::Pending, &mut ops)?;
rep2.commit_operations(ops).await?;
let ws = rep2.working_set().await?;
assert_eq!(ws.by_index(1), Some(uuid2));
// Pending status is not sync'd to rep1 yet.
let ws = rep1.working_set().await?;
assert_eq!(ws.by_index(1), None);
rep2.sync(&mut server, false).await?;
rep1.sync(&mut server, false).await?;
let ws = rep1.working_set().await?;
assert_eq!(ws.by_index(1), Some(uuid2));
Ok(())
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/tests/sync-server-tls.rs | tests/sync-server-tls.rs | #![cfg(all(feature = "server-sync", target_os = "linux"))]
use taskchampion::ServerConfig;
use uuid::Uuid;
mod tls_utils;
#[tokio::test]
/// Check that the sync server implementation correctly uses, or does not use,
/// tls-native-roots.
async fn sync_server_tls() -> anyhow::Result<()> {
tls_utils::reset_seen_ssl_file();
let mut server = ServerConfig::Remote {
url: "https://gothenburgbitfactory.org/".to_string(),
client_id: Uuid::new_v4(),
encryption_secret: b"abc".into(),
}
.into_server()
.await?;
// This will return a 404, which is not a `Result::Err`.
server.get_child_version(Uuid::new_v4()).await?;
tls_utils::assert_expected_seen_ssl_file();
Ok(())
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/tests/tls_utils.rs | tests/tls_utils.rs | #![cfg(target_os = "linux")]
//! Utilities for testing TLS behaviors.
//!
//! This package intercepts calls to libc's `open64` method, and checks for paths containing
//! `/ssl/`. This is useful to determine whether some deeply nested library is using native TLS
//! certificates (which are typically at pathnames containing `/ssl`) or built-in certificate lists
//! (which are embedded in the binary and not read from disk).
//!
//! This is likely to only work on GNU libc on Linux, but that's sufficient to verify that TC's
//! use of libraries is correctly communicating a desire to use, or not use, native certificates.
use libc::{c_char, c_int, dlsym, RTLD_NEXT};
use std::ffi::CStr;
static mut INTERCEPTING: bool = false;
static mut SEEN_SSL: bool = false;
/// Intercept the libc `open64` call, and set a flag when a path containing `/ssl/` is seen.
///
/// # SAFETY
///
/// This works on Linux, enough to perform some tests.
#[no_mangle]
pub unsafe extern "C" fn open64(path: *const c_char, flags: c_int) -> c_int {
if unsafe { INTERCEPTING } {
let path_rust = CStr::from_ptr(path)
.to_str()
.expect("path is not valid utf-8");
if path_rust.contains("/ssl/") {
unsafe { SEEN_SSL = true };
}
println!("open64({path_rust:?}, ..)");
}
type Open64Fn = unsafe extern "C" fn(*const c_char, c_int) -> c_int;
let original_open64: Open64Fn = {
let ptr = dlsym(RTLD_NEXT, c"open64".as_ptr());
assert!(!ptr.is_null());
std::mem::transmute(ptr)
};
original_open64(path, flags)
}
/// Return true if a file containing `/ssl/` has been opened since the
/// last call to `reset_seen_ssl_file`.
pub fn have_seen_ssl_file() -> bool {
unsafe { SEEN_SSL }
}
/// Reset the flag returned by `have_seen_ssl_file` to false.
pub fn reset_seen_ssl_file() {
unsafe {
SEEN_SSL = false;
INTERCEPTING = true;
}
}
/// Assert that an SSL file was seen if the `tls-native-roots` feature
/// is enabled, and otherwise assert that no such file was seen.
pub fn assert_expected_seen_ssl_file() {
#[cfg(feature = "tls-native-roots")]
assert!(have_seen_ssl_file(), "Expected something to open a filename containing /ssl/ to load native certs, since `tls-native-roots` is enabled.");
#[cfg(not(feature = "tls-native-roots"))]
assert!(!have_seen_ssl_file(), "Did not expect anything to open a filename containing /ssl/ to load native certs, since `tls-native-roots` is not enabled.");
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/xtask/src/main.rs | xtask/src/main.rs | //! This executable defines the `cargo xtask` subcommands.
//!
//! At the moment it is very simple, but if this grows more subcommands then
//! it will be sensible to use `clap` or another similar library.
use regex::Regex;
use std::env;
use std::fs::File;
use std::io::{BufRead, BufReader, Seek, Write};
use std::path::{Path, PathBuf};
/// Tuples of the form (PATH, REGEX) where PATH and REGEX are literals where PATH is a file that
/// conains the Minimum Supported Rust Version and REGEX is the pattern to find the appropriate
/// line in the file. PATH is relative to the root directory in the repo.
const MSRV_PATH_REGEX: &[(&str, &str)] = &[
(
".github/workflows/checks.yml",
r#"toolchain: "[0-9.]+*" # MSRV"#,
),
(".github/workflows/rust-tests.yml", r#""[0-9.]+" # MSRV"#),
("src/crate-doc.md", r#"Rust version [0-9.]+ and higher"#),
("Cargo.toml", r#"^rust-version = "[0-9.]+""#),
];
pub fn main() -> anyhow::Result<()> {
let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR")?);
let workspace_dir = manifest_dir.parent().unwrap();
let arguments: Vec<String> = env::args().collect();
if arguments.len() < 2 {
anyhow::bail!("xtask: Valid arguments are: `msrv <version x.y>`");
}
match arguments[1].as_str() {
"msrv" => msrv(arguments, workspace_dir),
_ => anyhow::bail!("xtask: unknown xtask"),
}
}
/// `cargo xtask msrv (X.Y)`
///
/// This checks and updates the Minimum Supported Rust Version for all files specified in MSRV_PATH_REGEX`.
/// Each line where the regex matches will have all values of the form `#.##` replaced with the given MSRV.
fn msrv(args: Vec<String>, workspace_dir: &Path) -> anyhow::Result<()> {
// check that (X.Y) argument is (mostly) valid:
if args.len() < 3 || !args[2].chars().all(|c| c.is_numeric() || c == '.') {
anyhow::bail!("xtask: Invalid argument format. Xtask msrv argument takes the form \"X.Y(y)\", where XYy are numbers. eg: `cargo run xtask msrv 1.68`");
}
let version_replacement_string = &args[2];
// set regex for replacing version number only within the pattern found within a line
let re_msrv_version = Regex::new(r"([0-9]+(\.|[0-9]+|))+")?;
// for each file in const paths tuple
for msrv_file in MSRV_PATH_REGEX {
let mut is_pattern_in_file = false;
let mut updated_file = false;
let path = workspace_dir.join(msrv_file.0);
let path = Path::new(&path);
if !path.exists() {
anyhow::bail!("xtask: path does not exist {}", &path.display());
};
let mut file = File::options().read(true).write(true).open(path)?;
let reader = BufReader::new(&file);
// set search string and the replacement string for version number content
let re_msrv_pattern = Regex::new(msrv_file.1)?;
// for each line in file
let mut file_string = String::new();
for line in reader.lines() {
let line = &line?;
// if rust version pattern is found and is different, update it
if let Some(pattern_offset) = re_msrv_pattern.find(line) {
is_pattern_in_file = true;
if !pattern_offset.as_str().contains(version_replacement_string) {
file_string += &re_msrv_version.replace(line, version_replacement_string);
file_string += "\n";
updated_file = true;
continue;
}
}
file_string += line;
file_string += "\n";
}
// if pattern was found and updated, write to disk
if updated_file {
// Set the file length to the file_string length
file.set_len(file_string.len() as u64)?;
// set the cursor to the beginning of the file and write
file.seek(std::io::SeekFrom::Start(0))?;
file.write_all(file_string.as_bytes())?;
// notify user this file was updated
println!("xtask: Updated MSRV in {}", msrv_file.0);
} else if !is_pattern_in_file {
println!(
"xtask: Pattern {:?} not found in {}",
msrv_file.1, msrv_file.0
);
}
}
Ok(())
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
imbolc/tower-cookies | https://github.com/imbolc/tower-cookies/blob/2910227be9735902bfc82715c3d6e30db78dd001/src/lib.rs | src/lib.rs | #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
#![warn(clippy::all, missing_docs, nonstandard_style, future_incompatible)]
#![forbid(unsafe_code)]
#![cfg_attr(docsrs, feature(doc_cfg))]
use cookie::CookieJar;
use http::HeaderValue;
use parking_lot::Mutex;
use std::sync::Arc;
#[doc(inline)]
pub use self::service::{CookieManager, CookieManagerLayer};
#[cfg(feature = "signed")]
pub use self::signed::SignedCookies;
#[cfg(feature = "private")]
pub use self::private::PrivateCookies;
#[cfg(any(feature = "signed", feature = "private"))]
pub use cookie::Key;
pub use cookie::Cookie;
#[doc(inline)]
pub use cookie;
#[cfg(feature = "axum-core")]
#[cfg_attr(docsrs, doc(cfg(feature = "axum-core")))]
mod extract;
#[cfg(feature = "signed")]
mod signed;
#[cfg(feature = "private")]
mod private;
pub mod service;
/// A parsed on-demand cookie jar.
#[derive(Clone, Debug, Default)]
pub struct Cookies {
inner: Arc<Mutex<Inner>>,
}
impl Cookies {
fn new(headers: Vec<HeaderValue>) -> Self {
let inner = Inner {
headers,
..Default::default()
};
Self {
inner: Arc::new(Mutex::new(inner)),
}
}
/// Adds [`Cookie`] to this jar. If a [`Cookie`] with the same name already exists, it is
/// replaced with provided cookie.
pub fn add(&self, cookie: Cookie<'static>) {
let mut inner = self.inner.lock();
inner.changed = true;
inner.jar().add(cookie);
}
/// Returns the [`Cookie`] with the given name. Returns [`None`] if it doesn't exist.
pub fn get(&self, name: &str) -> Option<Cookie<'_>> {
let mut inner = self.inner.lock();
inner.jar().get(name).cloned()
}
/// Removes [`Cookie`] from this jar.
///
/// **To properly generate the removal cookie, `cookie` must contain the same `path` and
/// `domain` as the cookie that was initially set.** In particular, this means that passing a
/// cookie from a browser to this method won't work because browsers don't set the cookie's
/// `path` attribute.
pub fn remove(&self, cookie: Cookie<'static>) {
let mut inner = self.inner.lock();
inner.changed = true;
inner.jar().remove(cookie);
}
/// Returns all the [`Cookie`]s present in this jar.
///
/// This method collects [`Cookie`]s into a vector instead of iterating through them to
/// minimize the mutex locking time.
pub fn list(&self) -> Vec<Cookie<'_>> {
let mut inner = self.inner.lock();
inner.jar().iter().cloned().collect()
}
/// Returns a child [`SignedCookies`] jar for interactions with signed by the `key` cookies.
///
/// # Example:
/// ```
/// use cookie::{Cookie, Key};
/// use tower_cookies::Cookies;
///
/// let cookies = Cookies::default();
/// let key = Key::generate();
/// let signed = cookies.signed(&key);
///
/// let foo = Cookie::new("foo", "bar");
/// signed.add(foo.clone());
///
/// assert_eq!(signed.get("foo"), Some(foo.clone()));
/// assert_ne!(cookies.get("foo"), Some(foo));
/// ```
#[cfg(feature = "signed")]
pub fn signed<'a>(&self, key: &'a cookie::Key) -> SignedCookies<'a> {
SignedCookies::new(self, key)
}
/// Returns a child [`PrivateCookies`] jar for encrypting and decrypting cookies.
///
/// # Example:
/// ```
/// use cookie::{Cookie, Key};
/// use tower_cookies::Cookies;
///
/// let cookies = Cookies::default();
/// let key = Key::generate();
/// let private = cookies.private(&key);
///
/// let foo = Cookie::new("foo", "bar");
/// private.add(foo.clone());
///
/// assert_eq!(private.get("foo"), Some(foo.clone()));
/// assert_ne!(cookies.get("foo"), Some(foo));
/// ```
#[cfg(feature = "private")]
pub fn private<'a>(&self, key: &'a cookie::Key) -> PrivateCookies<'a> {
PrivateCookies::new(self, key)
}
}
#[derive(Debug, Default)]
struct Inner {
headers: Vec<HeaderValue>,
jar: Option<CookieJar>,
changed: bool,
}
impl Inner {
fn jar(&mut self) -> &mut CookieJar {
if self.jar.is_none() {
let mut jar = CookieJar::new();
for header in &self.headers {
if let Ok(header_str) = std::str::from_utf8(header.as_bytes()) {
for cookie_str in header_str.split(';') {
if let Ok(cookie) = cookie::Cookie::parse_encoded(cookie_str.to_owned()) {
jar.add_original(cookie);
}
}
}
}
self.jar = Some(jar);
}
self.jar.as_mut().unwrap()
}
}
#[cfg(all(test, feature = "axum-core"))]
mod tests {
use crate::{CookieManagerLayer, Cookies};
use axum::{body::Body, routing::get, Router};
use cookie::Cookie;
use http::{header, Request};
use http_body_util::BodyExt;
use tower::ServiceExt;
fn app() -> Router {
Router::new()
.route(
"/list",
get(|cookies: Cookies| async move {
let mut items = cookies
.list()
.iter()
.map(|c| format!("{}={}", c.name(), c.value()))
.collect::<Vec<_>>();
items.sort();
items.join(", ")
}),
)
.route(
"/add",
get(|cookies: Cookies| async move {
cookies.add(Cookie::new("baz", "3"));
cookies.add(Cookie::new("spam", "4"));
}),
)
.route(
"/remove",
get(|cookies: Cookies| async move {
cookies.remove(Cookie::new("foo", ""));
}),
)
.layer(CookieManagerLayer::new())
}
async fn body_string(body: Body) -> String {
let bytes = body.collect().await.unwrap().to_bytes();
String::from_utf8_lossy(&bytes).into()
}
#[tokio::test]
async fn read_cookies() {
let req = Request::builder()
.uri("/list")
.header(header::COOKIE, "foo=1; bar=2")
.body(Body::empty())
.unwrap();
let res = app().oneshot(req).await.unwrap();
assert_eq!(body_string(res.into_body()).await, "bar=2, foo=1");
}
#[tokio::test]
async fn read_multi_header_cookies() {
let req = Request::builder()
.uri("/list")
.header(header::COOKIE, "foo=1")
.header(header::COOKIE, "bar=2")
.body(Body::empty())
.unwrap();
let res = app().oneshot(req).await.unwrap();
assert_eq!(body_string(res.into_body()).await, "bar=2, foo=1");
}
#[tokio::test]
async fn add_cookies() {
let req = Request::builder()
.uri("/add")
.header(header::COOKIE, "foo=1; bar=2")
.body(Body::empty())
.unwrap();
let res = app().oneshot(req).await.unwrap();
let mut hdrs: Vec<_> = res.headers().get_all(header::SET_COOKIE).iter().collect();
hdrs.sort();
assert_eq!(hdrs, ["baz=3", "spam=4"]);
}
#[tokio::test]
async fn remove_cookies() {
let req = Request::builder()
.uri("/remove")
.header(header::COOKIE, "foo=1; bar=2")
.body(Body::empty())
.unwrap();
let res = app().oneshot(req).await.unwrap();
let mut hdrs = res.headers().get_all(header::SET_COOKIE).iter();
let hdr = hdrs.next().unwrap().to_str().unwrap();
assert!(hdr.starts_with("foo=; Max-Age=0"));
assert_eq!(hdrs.next(), None);
}
}
| rust | MIT | 2910227be9735902bfc82715c3d6e30db78dd001 | 2026-01-04T20:20:12.890776Z | false |
imbolc/tower-cookies | https://github.com/imbolc/tower-cookies/blob/2910227be9735902bfc82715c3d6e30db78dd001/src/extract.rs | src/extract.rs | use crate::Cookies;
use axum_core::extract::FromRequestParts;
use http::{request::Parts, StatusCode};
impl<S> FromRequestParts<S> for Cookies
where
S: Sync + Send,
{
type Rejection = (http::StatusCode, &'static str);
async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result<Self, Self::Rejection> {
parts.extensions.get::<Cookies>().cloned().ok_or((
StatusCode::INTERNAL_SERVER_ERROR,
"Can't extract cookies. Is `CookieManagerLayer` enabled?",
))
}
}
| rust | MIT | 2910227be9735902bfc82715c3d6e30db78dd001 | 2026-01-04T20:20:12.890776Z | false |
imbolc/tower-cookies | https://github.com/imbolc/tower-cookies/blob/2910227be9735902bfc82715c3d6e30db78dd001/src/signed.rs | src/signed.rs | use crate::Cookies;
use cookie::{Cookie, Key};
/// A child cookie jar that authenticates its cookies.
/// It signs all the cookies added to it and verifies cookies retrieved from it.
/// Any cookies stored in `SignedCookies` are provided integrity and authenticity. In other
/// words, clients cannot tamper with the contents of a cookie nor can they fabricate cookie
/// values, but the data is visible in plaintext.
pub struct SignedCookies<'a> {
cookies: Cookies,
key: &'a Key,
}
impl<'a> SignedCookies<'a> {
/// Creates an instance of `SignedCookies` with parent `cookies` and key `key`. This method is
/// typically called indirectly via the `signed` method of [`Cookies`].
pub(crate) fn new(cookies: &Cookies, key: &'a Key) -> Self {
Self {
cookies: cookies.clone(),
key,
}
}
/// Adds cookie to the parent jar. The cookie’s value is signed assuring integrity and
/// authenticity.
pub fn add(&self, cookie: Cookie<'static>) {
let mut inner = self.cookies.inner.lock();
inner.changed = true;
inner.jar().signed_mut(self.key).add(cookie);
}
/// Returns `Cookie` with the `name` and verifies the authenticity and integrity of the
/// cookie’s value, returning a `Cookie` with the authenticated value. If the cookie cannot be
/// found, or the cookie fails to verify, None is returned.
pub fn get(&self, name: &str) -> Option<Cookie<'static>> {
let mut inner = self.cookies.inner.lock();
inner.jar().signed(self.key).get(name)
}
/// Removes the `cookie` from the parent jar.
///
/// **To properly generate the removal cookie, `cookie` must contain the same `path` and
/// `domain` as the cookie that was initially set.** In particular, this means that passing a
/// cookie from a browser to this method won't work because browsers don't set the cookie's
/// `path` attribute.
pub fn remove(&self, cookie: Cookie<'static>) {
self.cookies.remove(cookie);
}
}
#[cfg(all(test, feature = "signed"))]
mod tests {
use crate::Cookies;
use cookie::{Cookie, Key};
#[test]
fn get_absent() {
let key = Key::generate();
let cookies = Cookies::new(vec![]);
assert_eq!(cookies.signed(&key).get("foo"), None);
}
#[test]
fn add_get_signed() {
let key = Key::generate();
let cookies = Cookies::new(vec![]);
let cookie = Cookie::new("foo", "bar");
let signed = cookies.signed(&key);
signed.add(cookie.clone());
assert_eq!(signed.get("foo").unwrap(), cookie);
}
#[test]
fn add_signed_get_raw() {
let key = Key::generate();
let cookies = Cookies::new(vec![]);
let cookie = Cookie::new("foo", "bar");
cookies.signed(&key).add(cookie.clone());
assert_ne!(cookies.get("foo").unwrap(), cookie);
}
#[test]
fn add_raw_get_signed() {
let key = Key::generate();
let cookies = Cookies::new(vec![]);
let cookie = Cookie::new("foo", "bar");
cookies.add(cookie);
assert_eq!(cookies.signed(&key).get("foo"), None);
}
#[test]
fn messed_keys() {
let key1 = Key::generate();
let key2 = Key::generate();
let cookies = Cookies::new(vec![]);
let cookie = Cookie::new("foo", "bar");
cookies.signed(&key1).add(cookie);
assert_eq!(cookies.signed(&key2).get("foo"), None);
}
#[test]
fn remove() {
let key = Key::generate();
let cookies = Cookies::new(vec![]);
let signed = cookies.signed(&key);
signed.add(Cookie::new("foo", "bar"));
let cookie = signed.get("foo").unwrap();
signed.remove(cookie);
assert!(signed.get("foo").is_none());
}
}
| rust | MIT | 2910227be9735902bfc82715c3d6e30db78dd001 | 2026-01-04T20:20:12.890776Z | false |
imbolc/tower-cookies | https://github.com/imbolc/tower-cookies/blob/2910227be9735902bfc82715c3d6e30db78dd001/src/private.rs | src/private.rs | use crate::Cookies;
use cookie::{Cookie, Key};
/// A cookie jar that provides authenticated encryption for its cookies.
///
/// A _private_ child jar signs and encrypts all the cookies added to it and
/// verifies and decrypts cookies retrieved from it. Any cookies stored in
/// `PrivateCookies` are simultaneously assured confidentiality, integrity, and
/// authenticity. In other words, clients cannot discover nor tamper with the
/// contents of a cookie, nor can they fabricate cookie data.
pub struct PrivateCookies<'a> {
cookies: Cookies,
key: &'a Key,
}
impl<'a> PrivateCookies<'a> {
/// Creates an instance of `PrivateCookies` with parent `cookies` and key `key`.
/// This method is typically called indirectly via the `private`
/// method of [`Cookies`].
pub(crate) fn new(cookies: &Cookies, key: &'a Key) -> Self {
Self {
cookies: cookies.clone(),
key,
}
}
/// Adds `cookie` to the parent jar. The cookie's value is encrypted with
/// authenticated encryption assuring confidentiality, integrity, and
/// authenticity.
pub fn add(&self, cookie: Cookie<'static>) {
let mut inner = self.cookies.inner.lock();
inner.changed = true;
inner.jar().private_mut(self.key).add(cookie);
}
/// Returns a reference to the `Cookie` inside this jar with the name `name`
/// and authenticates and decrypts the cookie's value, returning a `Cookie`
/// with the decrypted value. If the cookie cannot be found, or the cookie
/// fails to authenticate or decrypt, `None` is returned.
pub fn get(&self, name: &str) -> Option<Cookie<'static>> {
let mut inner = self.cookies.inner.lock();
inner.jar().private(self.key).get(name)
}
/// Removes the `cookie` from the parent jar.
///
/// **To properly generate the removal cookie, `cookie` must contain the same `path` and
/// `domain` as the cookie that was initially set.** In particular, this means that passing a
/// cookie from a browser to this method won't work because browsers don't set the cookie's
/// `path` attribute.
pub fn remove(&self, cookie: Cookie<'static>) {
self.cookies.remove(cookie);
}
}
#[cfg(all(test, feature = "private"))]
mod tests {
use crate::Cookies;
use cookie::{Cookie, Key};
#[test]
fn get_absent() {
let key = Key::generate();
let cookies = Cookies::new(vec![]);
assert_eq!(cookies.private(&key).get("foo"), None);
}
#[test]
fn add_get_private() {
let key = Key::generate();
let cookies = Cookies::new(vec![]);
let cookie = Cookie::new("foo", "bar");
let private = cookies.private(&key);
private.add(cookie.clone());
assert_eq!(private.get("foo").unwrap(), cookie);
}
#[test]
fn add_private_get_raw() {
let key = Key::generate();
let cookies = Cookies::new(vec![]);
let cookie = Cookie::new("foo", "bar");
cookies.private(&key).add(cookie.clone());
assert_ne!(cookies.get("foo").unwrap(), cookie);
}
#[test]
fn add_raw_get_private() {
let key = Key::generate();
let cookies = Cookies::new(vec![]);
let cookie = Cookie::new("foo", "bar");
cookies.add(cookie);
assert_eq!(cookies.private(&key).get("foo"), None);
}
#[test]
fn messed_keys() {
let key1 = Key::generate();
let key2 = Key::generate();
let cookies = Cookies::new(vec![]);
let cookie = Cookie::new("foo", "bar");
cookies.private(&key1).add(cookie);
assert_eq!(cookies.private(&key2).get("foo"), None);
}
#[test]
fn remove() {
let key = Key::generate();
let cookies = Cookies::new(vec![]);
let private = cookies.private(&key);
private.add(Cookie::new("foo", "bar"));
let cookie = private.get("foo").unwrap();
private.remove(cookie);
assert!(private.get("foo").is_none());
}
}
| rust | MIT | 2910227be9735902bfc82715c3d6e30db78dd001 | 2026-01-04T20:20:12.890776Z | false |
imbolc/tower-cookies | https://github.com/imbolc/tower-cookies/blob/2910227be9735902bfc82715c3d6e30db78dd001/src/service/future.rs | src/service/future.rs | //! [`Future`] types.
use crate::Cookies;
use futures_util::ready;
use http::{header, HeaderValue, Response};
use pin_project_lite::pin_project;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
pin_project! {
/// Response future for [`CookieManager`].
#[derive(Debug)]
pub struct ResponseFuture<F> {
#[pin]
pub(crate) future: F,
pub(crate) cookies: Cookies,
}
}
impl<F, ResBody, E> Future for ResponseFuture<F>
where
F: Future<Output = Result<Response<ResBody>, E>>,
{
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let mut res = ready!(this.future.poll(cx)?);
let mut cookies = this.cookies.inner.lock();
if cookies.changed {
let values: Vec<_> = cookies
.jar()
.delta()
.filter_map(|c| HeaderValue::from_str(&c.to_string()).ok())
.collect();
let headers = res.headers_mut();
for value in values {
headers.append(header::SET_COOKIE, value);
}
}
Poll::Ready(Ok(res))
}
}
| rust | MIT | 2910227be9735902bfc82715c3d6e30db78dd001 | 2026-01-04T20:20:12.890776Z | false |
imbolc/tower-cookies | https://github.com/imbolc/tower-cookies/blob/2910227be9735902bfc82715c3d6e30db78dd001/src/service/mod.rs | src/service/mod.rs | //! Middleware to use [`Cookies`].
use self::future::ResponseFuture;
use crate::Cookies;
use http::{header, Request, Response};
use std::task::{Context, Poll};
use tower_layer::Layer;
use tower_service::Service;
pub mod future;
/// Middleware to use [`Cookies`].
#[derive(Clone, Debug)]
pub struct CookieManager<S> {
inner: S,
}
impl<S> CookieManager<S> {
/// Create a new cookie manager.
pub fn new(inner: S) -> Self {
Self { inner }
}
}
impl<ReqBody, ResBody, S> Service<Request<ReqBody>> for CookieManager<S>
where
S: Service<Request<ReqBody>, Response = Response<ResBody>>,
{
type Response = S::Response;
type Error = S::Error;
type Future = ResponseFuture<S::Future>;
#[inline]
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx)
}
fn call(&mut self, mut req: Request<ReqBody>) -> Self::Future {
let value = req
.headers()
.get_all(header::COOKIE)
.iter()
.cloned()
.collect();
let cookies = Cookies::new(value);
req.extensions_mut().insert(cookies.clone());
ResponseFuture {
future: self.inner.call(req),
cookies,
}
}
}
/// Layer to apply [`CookieManager`] middleware.
#[derive(Clone, Debug, Default)]
pub struct CookieManagerLayer {
_priv: (),
}
impl CookieManagerLayer {
/// Create a new cookie manager layer.
pub fn new() -> Self {
Self { _priv: () }
}
}
impl<S> Layer<S> for CookieManagerLayer {
type Service = CookieManager<S>;
fn layer(&self, inner: S) -> Self::Service {
CookieManager { inner }
}
}
| rust | MIT | 2910227be9735902bfc82715c3d6e30db78dd001 | 2026-01-04T20:20:12.890776Z | false |
imbolc/tower-cookies | https://github.com/imbolc/tower-cookies/blob/2910227be9735902bfc82715c3d6e30db78dd001/examples/hello_world.rs | examples/hello_world.rs | use axum::{routing::get, Router};
use std::net::SocketAddr;
use tower_cookies::{Cookie, CookieManagerLayer, Cookies};
#[tokio::main]
async fn main() {
let app = Router::new()
.route("/", get(handler))
.layer(CookieManagerLayer::new());
let addr = SocketAddr::from(([127, 0, 0, 1], 3000));
let listener = tokio::net::TcpListener::bind(&addr).await.unwrap();
axum::serve(listener, app.into_make_service())
.await
.unwrap();
}
async fn handler(cookies: Cookies) -> &'static str {
cookies.add(Cookie::new("hello_world", "hello_world"));
"Check your cookies."
}
| rust | MIT | 2910227be9735902bfc82715c3d6e30db78dd001 | 2026-01-04T20:20:12.890776Z | false |
imbolc/tower-cookies | https://github.com/imbolc/tower-cookies/blob/2910227be9735902bfc82715c3d6e30db78dd001/examples/signed_private.rs | examples/signed_private.rs | //! The counter-example using private / signed cookies instead of raw ones
//! Can be run by: `cargo run --all-features --example signed_private`
use axum::{routing::get, Router};
use std::net::SocketAddr;
use std::sync::OnceLock;
use tower_cookies::{Cookie, CookieManagerLayer, Cookies, Key};
const COOKIE_NAME: &str = "visited_private";
static KEY: OnceLock<Key> = OnceLock::new();
#[tokio::main]
async fn main() {
let my_key: &[u8] = &[0; 64]; // Your real key must be cryptographically random
KEY.set(Key::from(my_key)).ok();
let app = Router::new()
.route("/", get(handler))
.layer(CookieManagerLayer::new());
let addr = SocketAddr::from(([127, 0, 0, 1], 3000));
let listener = tokio::net::TcpListener::bind(&addr).await.unwrap();
axum::serve(listener, app.into_make_service())
.await
.unwrap();
}
async fn handler(cookies: Cookies) -> String {
let key = KEY.get().unwrap();
let private_cookies = cookies.private(key); // You can use `cookies.signed` as well
let visited = private_cookies
.get(COOKIE_NAME)
.and_then(|c| c.value().parse().ok())
.unwrap_or(0);
if visited > 10 {
cookies.remove(Cookie::new(COOKIE_NAME, ""));
"Counter has been reset".into()
} else {
private_cookies.add(Cookie::new(COOKIE_NAME, (visited + 1).to_string()));
format!("You've been here {} times before", visited)
}
}
| rust | MIT | 2910227be9735902bfc82715c3d6e30db78dd001 | 2026-01-04T20:20:12.890776Z | false |
imbolc/tower-cookies | https://github.com/imbolc/tower-cookies/blob/2910227be9735902bfc82715c3d6e30db78dd001/examples/counter.rs | examples/counter.rs | use axum::{routing::get, Router};
use std::net::SocketAddr;
use tower_cookies::{Cookie, CookieManagerLayer, Cookies};
const COOKIE_NAME: &str = "visited";
#[tokio::main]
async fn main() {
let app = Router::new()
.route("/", get(handler))
.layer(CookieManagerLayer::new());
let addr = SocketAddr::from(([127, 0, 0, 1], 3000));
let listener = tokio::net::TcpListener::bind(&addr).await.unwrap();
axum::serve(listener, app.into_make_service())
.await
.unwrap();
}
async fn handler(cookies: Cookies) -> String {
let visited = cookies
.get(COOKIE_NAME)
.and_then(|c| c.value().parse().ok())
.unwrap_or(0);
if visited > 10 {
cookies.remove(Cookie::new(COOKIE_NAME, ""));
"Counter has been reset".into()
} else {
cookies.add(Cookie::new(COOKIE_NAME, (visited + 1).to_string()));
format!("You've been here {} times before", visited)
}
}
| rust | MIT | 2910227be9735902bfc82715c3d6e30db78dd001 | 2026-01-04T20:20:12.890776Z | false |
imbolc/tower-cookies | https://github.com/imbolc/tower-cookies/blob/2910227be9735902bfc82715c3d6e30db78dd001/examples/counter-extractor.rs | examples/counter-extractor.rs | //! The example illustrates accessing cookies from an
//! [`axum_core::extract::FromRequest::from_request`] implementation.
//! The behavior is the same as `examples/counter.rs` but cookies leveraging is moved into an
//! extractor.
use axum::{routing::get, Router};
use axum_core::extract::FromRequestParts;
use http::request::Parts;
use std::net::SocketAddr;
use tower_cookies::{Cookie, CookieManagerLayer, Cookies};
const COOKIE_NAME: &str = "visited";
struct Counter(usize);
impl<S> FromRequestParts<S> for Counter
where
S: Send + Sync,
{
type Rejection = (http::StatusCode, &'static str);
async fn from_request_parts(req: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
let cookies = Cookies::from_request_parts(req, state).await?;
let visited = cookies
.get(COOKIE_NAME)
.and_then(|c| c.value().parse().ok())
.unwrap_or(0)
+ 1;
cookies.add(Cookie::new(COOKIE_NAME, visited.to_string()));
Ok(Counter(visited))
}
}
#[tokio::main]
async fn main() {
let app = Router::new()
.route("/", get(handler))
.layer(CookieManagerLayer::new());
let addr = SocketAddr::from(([127, 0, 0, 1], 3000));
let listener = tokio::net::TcpListener::bind(&addr).await.unwrap();
axum::serve(listener, app.into_make_service())
.await
.unwrap();
}
async fn handler(counter: Counter) -> String {
format!("You have visited this page {} times", counter.0)
}
| rust | MIT | 2910227be9735902bfc82715c3d6e30db78dd001 | 2026-01-04T20:20:12.890776Z | false |
softprops/openapi | https://github.com/softprops/openapi/blob/0a7c95e34e90541e572d762878d4fe762c8d4048/src/lib.rs | src/lib.rs | //! Openapi provides structures and support for serializing and deserializing [openapi](https://github.com/OAI/OpenAPI-Specification) specifications
//!
//! # Examples
//!
//! Typical use deserialing an existing to a persisted spec to rust form or
//! visa versa
//!
//! The hyper client should be configured with tls.
//!
//! ```no_run
//! extern crate openapi;
//!
//! fn main() {
//! match openapi::from_path("path/to/openapi.yaml") {
//! Ok(spec) => println!("spec: {:?}", spec),
//! Err(err) => println!("error: {}", err)
//! }
//! }
//! ```
//!
//! # Errors
//!
//! Operations typically result in a [`Result`] type, an alias for
//! [`std::result::Result`] with the `Err` type fixed to [`Error`],
//! which implements [`std::error::Error`].
//!
use serde::{Deserialize, Serialize};
use std::{fs::File, io::Read, path::Path, result::Result as StdResult};
pub mod error;
pub mod v2;
pub mod v3_0;
pub use error::Error;
const MINIMUM_OPENAPI30_VERSION: &str = ">= 3.0";
pub type Result<T> = StdResult<T, Error>;
/// Supported versions of the OpenApi.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(untagged)]
pub enum OpenApi {
/// Version 2.0 of the OpenApi specification.
///
/// Refer to the official
/// [specification](https://github.com/OAI/OpenAPI-Specification/blob/0dd79f6/versions/2.0.md)
/// for more information.
V2(v2::Spec),
/// Version 3.0.1 of the OpenApi specification.
///
/// Refer to the official
/// [specification](https://github.com/OAI/OpenAPI-Specification/blob/0dd79f6/versions/3.0.1.md)
/// for more information.
#[allow(non_camel_case_types)]
V3_0(v3_0::Spec),
}
/// deserialize an open api spec from a path
pub fn from_path<P>(path: P) -> Result<OpenApi>
where
P: AsRef<Path>,
{
from_reader(File::open(path)?)
}
/// deserialize an open api spec from type which implements Read
pub fn from_reader<R>(read: R) -> Result<OpenApi>
where
R: Read,
{
Ok(serde_yaml::from_reader::<R, OpenApi>(read)?)
}
/// serialize to a yaml string
pub fn to_yaml(spec: &OpenApi) -> Result<String> {
Ok(serde_yaml::to_string(spec)?)
}
/// serialize to a json string
pub fn to_json(spec: &OpenApi) -> Result<String> {
Ok(serde_json::to_string_pretty(spec)?)
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use std::{
fs::{self, read_to_string, File},
io::Write,
};
/// Helper function to write string to file.
fn write_to_file<P>(path: P, filename: &str, data: &str)
where
P: AsRef<Path> + std::fmt::Debug,
{
println!(" Saving string to {:?}...", path);
std::fs::create_dir_all(&path).unwrap();
let full_filename = path.as_ref().to_path_buf().join(filename);
let mut f = File::create(&full_filename).unwrap();
f.write_all(data.as_bytes()).unwrap();
}
/// Convert a YAML `&str` to a JSON `String`.
fn convert_yaml_str_to_json(yaml_str: &str) -> String {
let yaml: serde_yaml::Value = serde_yaml::from_str(yaml_str).unwrap();
let json: serde_json::Value = serde_yaml::from_value(yaml).unwrap();
serde_json::to_string_pretty(&json).unwrap()
}
/// Deserialize and re-serialize the input file to a JSON string through two different
/// paths, comparing the result.
/// 1. File -> `String` -> `serde_yaml::Value` -> `serde_json::Value` -> `String`
/// 2. File -> `Spec` -> `serde_json::Value` -> `String`
/// Both conversion of `serde_json::Value` -> `String` are done
/// using `serde_json::to_string_pretty`.
/// Since the first conversion is independant of the current crate (and only
/// uses serde's json and yaml support), no information should be lost in the final
/// JSON string. The second conversion goes through our `OpenApi`, so the final JSON
/// string is a representation of _our_ implementation.
/// By comparing those two JSON conversions, we can validate our implementation.
fn compare_spec_through_json(
input_file: &Path,
save_path_base: &Path,
) -> (String, String, String) {
// First conversion:
// File -> `String` -> `serde_yaml::Value` -> `serde_json::Value` -> `String`
// Read the original file to string
let spec_yaml_str = read_to_string(&input_file)
.unwrap_or_else(|e| panic!("failed to read contents of {:?}: {}", input_file, e));
// Convert YAML string to JSON string
let spec_json_str = convert_yaml_str_to_json(&spec_yaml_str);
// Second conversion:
// File -> `Spec` -> `serde_json::Value` -> `String`
// Parse the input file
let parsed_spec = from_path(&input_file).unwrap();
// Convert to serde_json::Value
let parsed_spec_json = serde_json::to_value(parsed_spec).unwrap();
// Convert to a JSON string
let parsed_spec_json_str: String = serde_json::to_string_pretty(&parsed_spec_json).unwrap();
// Save JSON strings to file
let api_filename = input_file
.file_name()
.unwrap()
.to_str()
.unwrap()
.replace(".yaml", ".json");
let mut save_path = save_path_base.to_path_buf();
save_path.push("yaml_to_json");
write_to_file(&save_path, &api_filename, &spec_json_str);
let mut save_path = save_path_base.to_path_buf();
save_path.push("yaml_to_spec_to_json");
write_to_file(&save_path, &api_filename, &parsed_spec_json_str);
// Return the JSON filename and the two JSON strings
(api_filename, parsed_spec_json_str, spec_json_str)
}
// Just tests if the deserialization does not blow up. But does not test correctness
#[test]
fn can_deserialize() {
for entry in fs::read_dir("data/v2").unwrap() {
let path = entry.unwrap().path();
// cargo test -- --nocapture to see this message
println!("Testing if {:?} is deserializable", path);
from_path(path).unwrap();
}
}
#[test]
fn can_deserialize_and_reserialize_v2() {
let save_path_base: std::path::PathBuf =
["target", "tests", "can_deserialize_and_reserialize_v2"]
.iter()
.collect();
for entry in fs::read_dir("data/v2").unwrap() {
let path = entry.unwrap().path();
println!("Testing if {:?} is deserializable", path);
let (api_filename, parsed_spec_json_str, spec_json_str) =
compare_spec_through_json(&path, &save_path_base);
assert_eq!(
parsed_spec_json_str.lines().collect::<Vec<_>>(),
spec_json_str.lines().collect::<Vec<_>>(),
"contents did not match for api {}",
api_filename
);
}
}
#[test]
fn can_deserialize_and_reserialize_v3() {
let save_path_base: std::path::PathBuf =
["target", "tests", "can_deserialize_and_reserialize_v3"]
.iter()
.collect();
for entry in fs::read_dir("data/v3.0").unwrap() {
let entry = entry.unwrap();
let path = entry.path();
println!("Testing if {:?} is deserializable", path);
let (api_filename, parsed_spec_json_str, spec_json_str) =
compare_spec_through_json(&path, &save_path_base);
assert_eq!(
parsed_spec_json_str.lines().collect::<Vec<_>>(),
spec_json_str.lines().collect::<Vec<_>>(),
"contents did not match for api {}",
api_filename
);
}
}
#[test]
fn can_deserialize_one_of_v3() {
let openapi = from_path("data/v3.0/petstore-expanded.yaml").unwrap();
if let OpenApi::V3_0(spec) = openapi {
let components = spec.components.unwrap();
let schemas = components.schemas.unwrap();
let obj_or_ref = schemas.get("PetSpecies");
if let Some(v3_0::ObjectOrReference::Object(schema)) = obj_or_ref {
// there should be 2 schemas in there
assert_eq!(schema.one_of.as_ref().unwrap().len(), 2);
} else {
panic!("object should have been schema");
}
}
}
}
| rust | MIT | 0a7c95e34e90541e572d762878d4fe762c8d4048 | 2026-01-04T20:20:15.952889Z | false |
softprops/openapi | https://github.com/softprops/openapi/blob/0a7c95e34e90541e572d762878d4fe762c8d4048/src/error.rs | src/error.rs | //! Error types
use semver::{SemVerError, Version};
use serde_json::Error as JsonError;
use serde_yaml::Error as YamlError;
use std::io::Error as IoError;
use thiserror::Error;
/// errors that openapi functions may return
#[derive(Error, Debug)]
pub enum Error {
#[error("I/O error")]
Io(#[from] IoError),
#[error("YAML serialization or deserialization error")]
Yaml(#[from] YamlError),
#[error("JSON serialization error")]
Serialize(#[from] JsonError),
#[error("Semantic Versioning parsing error")]
SemVerError(#[from] SemVerError),
#[error("Unsupported spec file version ({0})")]
UnsupportedSpecFileVersion(Version),
}
| rust | MIT | 0a7c95e34e90541e572d762878d4fe762c8d4048 | 2026-01-04T20:20:15.952889Z | false |
softprops/openapi | https://github.com/softprops/openapi/blob/0a7c95e34e90541e572d762878d4fe762c8d4048/src/v3_0/extension.rs | src/v3_0/extension.rs | use std::collections::HashMap;
use std::fmt;
use serde::de::{MapAccess, Visitor};
use serde::ser::SerializeMap;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
/// Contains openapi specification extensions
/// see https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#specificationExtensions
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct Extensions(HashMap<String, serde_json::Value>);
impl Extensions {
fn add(&mut self, ext_id: String, value: serde_json::Value) {
self.0.insert(ext_id, value);
}
/// Fetch extension by name
pub fn get(&self, ext_id: &str) -> Option<&serde_json::Value> {
self.0.get(ext_id)
}
/// A reference to all the captured extensions
pub fn all(&self) -> &HashMap<String, serde_json::Value> {
&self.0
}
}
impl Default for Extensions {
fn default() -> Self {
Self(HashMap::new())
}
}
impl<'de> Deserialize<'de> for Extensions {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ExtensionsVisitor;
impl<'de> Visitor<'de> for ExtensionsVisitor {
type Value = Extensions;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("struct Extensions")
}
fn visit_map<V>(self, mut map: V) -> Result<Extensions, V::Error>
where
V: MapAccess<'de>,
{
let mut extensions = Extensions::default();
while let Some(key) = map.next_key::<String>()? {
if key.starts_with("x-") {
extensions.add(key, map.next_value()?);
}
}
Ok(extensions)
}
}
deserializer.deserialize_map(ExtensionsVisitor)
}
}
impl Serialize for Extensions {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut map = serializer.serialize_map(Some(self.0.len()))?;
for (k, v) in self.0.clone() {
map.serialize_entry(&k, &v)?;
}
map.end()
}
}
#[cfg(test)]
mod tests {
use serde_json::Value;
use serde_test::{assert_tokens, Token};
use crate::v3_0::extension::Extensions;
#[test]
fn test_serde_extensions() {
let mut extensions = Extensions::default();
extensions.add(String::from("x-test"), Value::from("val"));
assert_tokens(
&extensions,
&[
Token::Map { len: Some(1) },
Token::String("x-test"),
Token::String("val"),
Token::MapEnd,
],
)
}
#[test]
fn test_get_extension() {
let value = Value::from("val");
let mut extensions = Extensions::default();
extensions.add(String::from("x-test"), value.clone());
assert_eq!(extensions.get("x-test"), Some(&value));
}
#[test]
fn test_all_extensions() {
let value = Value::from("val");
let mut extensions = Extensions::default();
extensions.add(String::from("x-test"), value.clone());
assert_eq!(
extensions.all().get_key_value("x-test"),
Some((&"x-test".to_string(), &value))
);
}
}
| rust | MIT | 0a7c95e34e90541e572d762878d4fe762c8d4048 | 2026-01-04T20:20:15.952889Z | false |
softprops/openapi | https://github.com/softprops/openapi/blob/0a7c95e34e90541e572d762878d4fe762c8d4048/src/v3_0/schema.rs | src/v3_0/schema.rs | //! Schema specification for [OpenAPI 3.0.0](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md)
use crate::v3_0::extension::Extensions;
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, HashMap};
use url::Url;
use crate::{
v3_0::components::{BooleanObjectOrReference, Components, ObjectOrReference},
Error, Result, MINIMUM_OPENAPI30_VERSION,
};
impl Spec {
pub fn validate_version(&self) -> Result<semver::Version> {
let spec_version = &self.openapi;
let sem_ver = semver::Version::parse(spec_version)?;
let required_version = semver::VersionReq::parse(MINIMUM_OPENAPI30_VERSION).unwrap();
if required_version.matches(&sem_ver) {
Ok(sem_ver)
} else {
Err(Error::UnsupportedSpecFileVersion(sem_ver))
}
}
}
/// top level document
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Spec {
/// This string MUST be the [semantic version number](https://semver.org/spec/v2.0.0.html)
/// of the
/// [OpenAPI Specification version](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#versions)
/// that the OpenAPI document uses. The `openapi` field SHOULD be used by tooling
/// specifications and clients to interpret the OpenAPI document. This is not related to
/// the API
/// [`info.version`](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#infoVersion)
/// string.
pub openapi: String,
/// Provides metadata about the API. The metadata MAY be used by tooling as required.
pub info: Info,
/// An array of Server Objects, which provide connectivity information to a target server.
/// If the `servers` property is not provided, or is an empty array, the default value would
/// be a
/// [Server Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#serverObject)
/// with a
/// [url](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#serverUrl)
/// value of `/`.
// FIXME: Provide a default value as specified in documentation instead of `None`.
#[serde(skip_serializing_if = "Option::is_none")]
pub servers: Option<Vec<Server>>,
/// Holds the relative paths to the individual endpoints and their operations. The path is
/// appended to the URL from the
/// [`Server Object`](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#serverObject)
/// in order to construct the full URL. The Paths MAY be empty, due to
/// [ACL constraints](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#securityFiltering).
pub paths: BTreeMap<String, PathItem>,
/// An element to hold various schemas for the specification.
#[serde(skip_serializing_if = "Option::is_none")]
pub components: Option<Components>,
// FIXME: Implement
// /// A declaration of which security mechanisms can be used across the API.
// /// The list of values includes alternative security requirement objects that can be used.
// /// Only one of the security requirement objects need to be satisfied to authorize a request.
// /// Individual operations can override this definition.
// #[serde(skip_serializing_if = "Option::is_none")]
// pub security: Option<SecurityRequirement>,
/// A list of tags used by the specification with additional metadata.
///The order of the tags can be used to reflect on their order by the parsing tools.
/// Not all tags that are used by the
/// [Operation Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#operationObject)
/// must be declared. The tags that are not declared MAY be organized randomly or
/// based on the tools' logic. Each tag name in the list MUST be unique.
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<Vec<Tag>>,
/// Additional external documentation.
#[serde(skip_serializing_if = "Option::is_none", rename = "externalDocs")]
pub external_docs: Option<ExternalDoc>,
#[serde(flatten)]
pub extensions: Extensions,
}
/// General information about the API.
///
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#infoObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
// #[serde(rename_all = "lowercase")]
pub struct Info {
/// The title of the application.
pub title: String,
/// A short description of the application. CommonMark syntax MAY be used for rich text representation.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// A URL to the Terms of Service for the API. MUST be in the format of a URL.
#[serde(rename = "termsOfService", skip_serializing_if = "Option::is_none")]
pub terms_of_service: Option<Url>,
/// The version of the OpenAPI document (which is distinct from the [OpenAPI Specification
/// version](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#oasVersion)
/// or the API implementation version).
pub version: String,
/// The contact information for the exposed API.
#[serde(skip_serializing_if = "Option::is_none")]
pub contact: Option<Contact>,
/// The license information for the exposed API.
#[serde(skip_serializing_if = "Option::is_none")]
pub license: Option<License>,
}
/// Contact information for the exposed API.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#contactObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Contact {
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub url: Option<Url>,
// TODO: Make sure the email is a valid email
#[serde(skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
#[serde(flatten)]
pub extensions: Extensions,
}
/// License information for the exposed API.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#licenseObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct License {
/// The license name used for the API.
pub name: String,
/// A URL to the license used for the API.
#[serde(skip_serializing_if = "Option::is_none")]
pub url: Option<Url>,
#[serde(flatten)]
pub extensions: Extensions,
}
/// An object representing a Server.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#serverObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Server {
/// A URL to the target host. This URL supports Server Variables and MAY be relative, to
/// indicate that the host location is relative to the location where the OpenAPI document
/// is being served. Variable substitutions will be made when a variable is named
/// in {brackets}.
pub url: String,
/// An optional string describing the host designated by the URL. CommonMark syntax MAY be used for rich text representation.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// A map between a variable name and its value. The value is used for substitution in
/// the server's URL template.
#[serde(skip_serializing_if = "Option::is_none")]
pub variables: Option<BTreeMap<String, ServerVariable>>,
}
/// An object representing a Server Variable for server URL template substitution.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#serverVariableObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct ServerVariable {
/// The default value to use for substitution, and to send, if an alternate value is not
/// supplied. Unlike the Schema Object's default, this value MUST be provided by the consumer.
pub default: String,
/// An enumeration of string values to be used if the substitution options are from a limited
/// set.
#[serde(rename = "enum", skip_serializing_if = "Option::is_none")]
pub substitutions_enum: Option<Vec<String>>,
/// An optional description for the server variable. [CommonMark] syntax MAY be used for rich
/// text representation.
///
/// [CommonMark]: https://spec.commonmark.org/
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
/// Describes the operations available on a single path.
///
/// A Path Item MAY be empty, due to [ACL
/// constraints](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#securityFiltering).
/// The path itself is still exposed to the documentation viewer but they will not know which
/// operations and parameters are available.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct PathItem {
/// Allows for an external definition of this path item. The referenced structure MUST be
/// in the format of a
/// [Path Item Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#pathItemObject).
/// If there are conflicts between the referenced definition and this Path Item's definition,
/// the behavior is undefined.
// FIXME: Should this ref be moved to an enum?
#[serde(skip_serializing_if = "Option::is_none", rename = "$ref")]
pub reference: Option<String>,
/// An optional, string summary, intended to apply to all operations in this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub summary: Option<String>,
/// An optional, string description, intended to apply to all operations in this path.
/// [CommonMark syntax](http://spec.commonmark.org/) MAY be used for rich text representation.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// A definition of a GET operation on this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub get: Option<Operation>,
/// A definition of a PUT operation on this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub put: Option<Operation>,
/// A definition of a POST operation on this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub post: Option<Operation>,
/// A definition of a DELETE operation on this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub delete: Option<Operation>,
/// A definition of a OPTIONS operation on this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub options: Option<Operation>,
/// A definition of a HEAD operation on this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub head: Option<Operation>,
/// A definition of a PATCH operation on this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub patch: Option<Operation>,
/// A definition of a TRACE operation on this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub trace: Option<Operation>,
/// An alternative `server` array to service all operations in this path.
#[serde(skip_serializing_if = "Option::is_none")]
pub servers: Option<Vec<Server>>,
/// A list of parameters that are applicable for all the operations described under this
/// path. These parameters can be overridden at the operation level, but cannot be removed
/// there. The list MUST NOT include duplicated parameters. A unique parameter is defined by
/// a combination of a
/// [name](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterName)
/// and
/// [location](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterIn).
/// The list can use the
/// [Reference Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#referenceObject)
/// to link to parameters that are defined at the
/// [OpenAPI Object's components/parameters](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#componentsParameters).
#[serde(skip_serializing_if = "Option::is_none")]
pub parameters: Option<Vec<ObjectOrReference<Parameter>>>,
#[serde(flatten)]
pub extensions: Extensions,
}
/// Describes a single API operation on a path.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#operationObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
// #[serde(rename_all = "lowercase")]
pub struct Operation {
/// A list of tags for API documentation control. Tags can be used for logical grouping of
/// operations by resources or any other qualifier.
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<Vec<String>>,
/// A short summary of what the operation does.
#[serde(skip_serializing_if = "Option::is_none")]
pub summary: Option<String>,
/// A verbose explanation of the operation behavior.
/// [CommonMark syntax](http://spec.commonmark.org/) MAY be used for rich text representation.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// Additional external documentation for this operation.
#[serde(skip_serializing_if = "Option::is_none", rename = "externalDocs")]
pub external_docs: Option<ExternalDoc>,
/// Unique string used to identify the operation. The id MUST be unique among all operations
/// described in the API. Tools and libraries MAY use the operationId to uniquely identify an
/// operation, therefore, it is RECOMMENDED to follow common programming naming conventions.
#[serde(skip_serializing_if = "Option::is_none", rename = "operationId")]
pub operation_id: Option<String>,
/// A list of parameters that are applicable for this operation. If a parameter is already
/// defined at the
/// [Path Item](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#pathItemParameters),
/// the new definition will override it but can never remove it. The list MUST NOT
/// include duplicated parameters. A unique parameter is defined by a combination of a
/// [name](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterName)
/// and
/// [location](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterIn).
/// The list can use the
/// [Reference Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#referenceObject)
/// to link to parameters that are defined at the
/// [OpenAPI Object's components/parameters](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#componentsParameters).
#[serde(skip_serializing_if = "Option::is_none")]
pub parameters: Option<Vec<ObjectOrReference<Parameter>>>,
/// The request body applicable for this operation. The requestBody is only supported in HTTP methods where the HTTP 1.1 specification RFC7231 has explicitly defined semantics for request bodies. In other cases where the HTTP spec is vague, requestBody SHALL be ignored by consumers.
#[serde(skip_serializing_if = "Option::is_none", rename = "requestBody")]
pub request_body: Option<ObjectOrReference<RequestBody>>,
/// The list of possible responses as they are returned from executing this operation.
///
/// A container for the expected responses of an operation. The container maps a HTTP
/// response code to the expected response.
///
/// The documentation is not necessarily expected to cover all possible HTTP response codes
/// because they may not be known in advance. However, documentation is expected to cover
/// a successful operation response and any known errors.
///
/// The `default` MAY be used as a default response object for all HTTP codes that are not
/// covered individually by the specification.
///
/// The `Responses Object` MUST contain at least one response code, and it SHOULD be the
/// response for a successful operation call.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#responsesObject>.
pub responses: BTreeMap<String, Response>,
/// A map of possible out-of band callbacks related to the parent operation. The key is
/// a unique identifier for the Callback Object. Each value in the map is a
/// [Callback Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#callbackObject)
/// that describes a request that may be initiated by the API provider and the
/// expected responses. The key value used to identify the callback object is
/// an expression, evaluated at runtime, that identifies a URL to use for the
/// callback operation.
#[serde(skip_serializing_if = "Option::is_none")]
pub callbacks: Option<BTreeMap<String, Callback>>,
/// Declares this operation to be deprecated. Consumers SHOULD refrain from usage
/// of the declared operation. Default value is `false`.
#[serde(skip_serializing_if = "Option::is_none")]
pub deprecated: Option<bool>,
// FIXME: Implement
// /// A declaration of which security mechanisms can be used for this operation. The list of
// /// values includes alternative security requirement objects that can be used. Only one
// /// of the security requirement objects need to be satisfied to authorize a request.
// /// This definition overrides any declared top-level
// /// [`security`](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#oasSecurity).
// /// To remove a top-level security declaration, an empty array can be used.
// pub security: Option<SecurityRequirement>,
/// An alternative `server` array to service this operation. If an alternative `server`
/// object is specified at the Path Item Object or Root level, it will be overridden by
/// this value.
#[serde(skip_serializing_if = "Option::is_none")]
pub servers: Option<Vec<Server>>,
#[serde(flatten)]
pub extensions: Extensions,
}
// FIXME: Verify against OpenAPI 3.0
/// Describes a single operation parameter.
/// A unique parameter is defined by a combination of a
/// [name](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterName)
/// and [location](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterIn).
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Parameter {
/// The name of the parameter.
pub name: String,
/// values depend on parameter type
/// may be `header`, `query`, 'path`, `formData`
#[serde(rename = "in")]
pub location: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub required: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub schema: Option<Schema>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "uniqueItems")]
pub unique_items: Option<bool>,
/// string, number, boolean, integer, array, file ( only for formData )
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "type")]
pub param_type: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub format: Option<String>,
/// A brief description of the parameter. This could contain examples
/// of use. GitHub Flavored Markdown is allowed.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
// collectionFormat: ???
// default: ???
// maximum ?
// exclusiveMaximum ??
// minimum ??
// exclusiveMinimum ??
// maxLength ??
// minLength ??
// pattern ??
// maxItems ??
// minItems ??
// enum ??
// multipleOf ??
// allowEmptyValue ( for query / body params )
/// Describes how the parameter value will be serialized depending on the type of the parameter
/// value. Default values (based on value of in): for `query` - `form`; for `path` - `simple`; for
/// `header` - `simple`; for cookie - `form`.
#[serde(skip_serializing_if = "Option::is_none")]
style: Option<ParameterStyle>,
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "camelCase")]
enum ParameterStyle {
Matrix,
Label,
Form,
Simple,
SpaceDelimited,
PipeDelimited,
DeepObject,
}
// FIXME: Verify against OpenAPI 3.0
/// The Schema Object allows the definition of input and output data types.
/// These types can be objects, but also primitives and arrays.
/// This object is an extended subset of the
/// [JSON Schema Specification Wright Draft 00](http://json-schema.org/).
/// For more information about the properties, see
/// [JSON Schema Core](https://tools.ietf.org/html/draft-wright-json-schema-00) and
/// [JSON Schema Validation](https://tools.ietf.org/html/draft-wright-json-schema-validation-00).
/// Unless stated otherwise, the property definitions follow the JSON Schema.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#schemaObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Schema {
/// [JSON reference](https://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03)
/// path to another definition
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "$ref")]
pub ref_path: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "type")]
pub schema_type: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub format: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "enum")]
pub enum_values: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub required: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub items: Option<Box<Schema>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<BTreeMap<String, Schema>>,
#[serde(skip_serializing_if = "Option::is_none", rename = "readOnly")]
pub read_only: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub nullable: Option<bool>,
/// Value can be boolean or object. Inline or referenced schema MUST be of a
/// [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#schemaObject)
/// and not a standard JSON Schema.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#properties>.
#[serde(
skip_serializing_if = "Option::is_none",
rename = "additionalProperties"
)]
pub additional_properties: Option<BooleanObjectOrReference<Box<Schema>>>,
/// A free-form property to include an example of an instance for this schema.
/// To represent examples that cannot be naturally represented in JSON or YAML,
/// a string value can be used to contain the example with escaping where necessary.
/// NOTE: According to [spec], _Primitive data types in the OAS are based on the
/// types supported by the JSON Schema Specification Wright Draft 00._
/// This suggest using
/// [`serde_json::Value`](https://docs.serde.rs/serde_json/value/enum.Value.html). [spec][https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#data-types]
#[serde(skip_serializing_if = "Option::is_none")]
pub example: Option<serde_json::value::Value>,
#[serde(skip_serializing_if = "Option::is_none")]
pub title: Option<String>,
// The following properties are taken directly from the JSON Schema definition and
// follow the same specifications:
#[serde(skip_serializing_if = "Option::is_none", rename = "multipleOf")]
pub multiple_of: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub maximum: Option<i32>,
#[serde(skip_serializing_if = "Option::is_none", rename = "exclusiveMaximum")]
pub exclusive_maximum: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub minimum: Option<i32>,
#[serde(skip_serializing_if = "Option::is_none", rename = "exclusiveMinimum")]
pub exclusive_minimum: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none", rename = "maxLength")]
pub max_length: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none", rename = "minLength")]
pub min_length: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub pattern: Option<String>,
#[serde(skip_serializing_if = "Option::is_none", rename = "maxItems")]
pub max_items: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none", rename = "minItems")]
pub min_items: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none", rename = "uniqueItems")]
pub unique_items: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none", rename = "maxProperties")]
pub max_properties: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none", rename = "minProperties")]
pub min_properties: Option<u32>,
// The following properties are taken from the JSON Schema definition but their
// definitions were adjusted to the OpenAPI Specification.
// - type - Value MUST be a string. Multiple types via an array are not supported.
// - allOf - Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema.
// - oneOf - Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema.
// - anyOf - Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema.
// - not - Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema.
// - items - Value MUST be an object and not an array. Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema. `items` MUST be present if the `type` is `array`.
// - properties - Property definitions MUST be a [Schema Object](#schemaObject) and not a standard JSON Schema (inline or referenced).
// - additionalProperties - Value can be boolean or object. Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema.
// - description - [CommonMark syntax](http://spec.commonmark.org/) MAY be used for rich text representation.
// - format - See [Data Type Formats](#dataTypeFormat) for further details. While relying on JSON Schema's defined formats, the OAS offers a few additional predefined formats.
// - default - The default value represents what would be assumed by the consumer of the input as the value of the schema if one is not provided. Unlike JSON Schema, the value MUST conform to the defined type for the Schema Object defined at the same level. For example, if `type` is `string`, then `default` can be `"foo"` but cannot be `1`.
/// The default value represents what would be assumed by the consumer of the input as the value
/// of the schema if one is not provided. Unlike JSON Schema, the value MUST conform to the
/// defined type for the Schema Object defined at the same level. For example, if type is
/// `string`, then `default` can be `"foo"` but cannot be `1`.
#[serde(skip_serializing_if = "Option::is_none")]
pub default: Option<serde_json::Value>,
/// Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard
/// JSON Schema.
/// [allOf](https://swagger.io/docs/specification/data-models/oneof-anyof-allof-not/#allof)
#[serde(rename = "allOf", skip_serializing_if = "Option::is_none")]
pub all_of: Option<Vec<ObjectOrReference<Schema>>>,
/// Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard
/// JSON Schema.
/// [oneOf](https://swagger.io/docs/specification/data-models/oneof-anyof-allof-not/#oneof)
#[serde(rename = "oneOf", skip_serializing_if = "Option::is_none")]
pub one_of: Option<Vec<ObjectOrReference<Schema>>>,
/// Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard
/// JSON Schema.
/// [anyOf](https://swagger.io/docs/specification/data-models/oneof-anyof-allof-not/#anyof)
#[serde(rename = "anyOf", skip_serializing_if = "Option::is_none")]
pub any_of: Option<Vec<ObjectOrReference<Schema>>>,
/// Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard
/// JSON Schema.
/// [not](https://swagger.io/docs/specification/data-models/oneof-anyof-allof-not/#not)
#[serde(rename = "not", skip_serializing_if = "Option::is_none")]
pub not: Option<Vec<ObjectOrReference<Schema>>>,
/// [Specification extensions](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#specificationExtensions)
#[serde(flatten)]
pub extensions: HashMap<String, String>,
}
/// Describes a single response from an API Operation, including design-time, static `links`
/// to operations based on the response.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#responseObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Response {
/// A short description of the response.
/// [CommonMark syntax](http://spec.commonmark.org/) MAY be used for rich text representation.
pub description: Option<String>,
/// Maps a header name to its definition.
/// [RFC7230](https://tools.ietf.org/html/rfc7230#page-22) states header names are case
/// insensitive. If a response header is defined with the name `"Content-Type"`, it SHALL
/// be ignored.
#[serde(skip_serializing_if = "Option::is_none")]
pub headers: Option<BTreeMap<String, ObjectOrReference<Header>>>,
/// A map containing descriptions of potential response payloads. The key is a media type
/// or [media type range](https://tools.ietf.org/html/rfc7231#appendix-D) and the value
/// describes it. For responses that match multiple keys, only the most specific key is
/// applicable. e.g. text/plain overrides text/*
#[serde(skip_serializing_if = "Option::is_none")]
pub content: Option<BTreeMap<String, MediaType>>,
/// A map of operations links that can be followed from the response. The key of the map
/// is a short name for the link, following the naming constraints of the names for
/// [Component Objects](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#componentsObject).
#[serde(skip_serializing_if = "Option::is_none")]
pub links: Option<BTreeMap<String, ObjectOrReference<Link>>>,
#[serde(flatten)]
pub extensions: Extensions,
}
/// The Header Object follows the structure of the
/// [Parameter Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterObject)
/// with the following changes:
/// 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map.
/// 1. `in` MUST NOT be specified, it is implicitly in `header`.
/// 1. All traits that are affected by the location MUST be applicable to a location of
/// `header` (for example, [`style`](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#parameterStyle)).
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#headerObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Header {
// FIXME: Is the third change properly implemented?
// FIXME: Merge `ObjectOrReference<Header>::Reference` and `ParameterOrRef::Reference`
#[serde(skip_serializing_if = "Option::is_none")]
pub required: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub schema: Option<Schema>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "uniqueItems")]
pub unique_items: Option<bool>,
/// string, number, boolean, integer, array, file ( only for formData )
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "type")]
pub param_type: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
| rust | MIT | 0a7c95e34e90541e572d762878d4fe762c8d4048 | 2026-01-04T20:20:15.952889Z | true |
softprops/openapi | https://github.com/softprops/openapi/blob/0a7c95e34e90541e572d762878d4fe762c8d4048/src/v3_0/mod.rs | src/v3_0/mod.rs | //! Support for OpenApi version 3.0.1 specification.
//!
//! See the
//! [specification](https://github.com/OAI/OpenAPI-Specification/blob/0dd79f6/versions/3.0.1.md)
//! for more information.
mod components;
mod extension;
mod schema;
pub use crate::v3_0::{components::*, extension::*, schema::*};
// Yet OpenAPI dont have an implemented representation
// the `serde_json::Value` is used in place of a custom enum
// We re-expose the `serde_json::Value`, this way users does not have to include the dependency.
pub use serde_json::Value;
| rust | MIT | 0a7c95e34e90541e572d762878d4fe762c8d4048 | 2026-01-04T20:20:15.952889Z | false |
softprops/openapi | https://github.com/softprops/openapi/blob/0a7c95e34e90541e572d762878d4fe762c8d4048/src/v3_0/components.rs | src/v3_0/components.rs | use crate::v3_0::{
Callback, Example, Extensions, Header, Link, Parameter, RequestBody, Response, Schema,
SecurityScheme,
};
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(untagged)]
pub enum ObjectOrReference<T> {
Object(T),
Ref {
#[serde(rename = "$ref")]
ref_path: String,
},
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(untagged)]
pub enum BooleanObjectOrReference<T> {
Boolean(bool),
Object(T),
Ref {
#[serde(rename = "$ref")]
ref_path: String,
},
}
/// Holds a set of reusable objects for different aspects of the OAS.
///
/// All objects defined within the components object will have no effect on the API unless
/// they are explicitly referenced from properties outside the components object.
///
/// See <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#componentsObject>.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Components {
/// An object to hold reusable Schema Objects.
#[serde(skip_serializing_if = "Option::is_none")]
pub schemas: Option<BTreeMap<String, ObjectOrReference<Schema>>>,
/// An object to hold reusable Response Objects.
#[serde(skip_serializing_if = "Option::is_none")]
pub responses: Option<BTreeMap<String, ObjectOrReference<Response>>>,
/// An object to hold reusable Parameter Objects.
#[serde(skip_serializing_if = "Option::is_none")]
pub parameters: Option<BTreeMap<String, ObjectOrReference<Parameter>>>,
/// An object to hold reusable Example
#[serde(skip_serializing_if = "Option::is_none")]
pub examples: Option<BTreeMap<String, ObjectOrReference<Example>>>,
/// An object to hold reusable Request Body Objects.
#[serde(skip_serializing_if = "Option::is_none", rename = "requestBodies")]
pub request_bodies: Option<BTreeMap<String, ObjectOrReference<RequestBody>>>,
/// An object to hold reusable Header Objects.
#[serde(skip_serializing_if = "Option::is_none")]
pub headers: Option<BTreeMap<String, ObjectOrReference<Header>>>,
/// An object to hold reusable Security Scheme Objects.
#[serde(skip_serializing_if = "Option::is_none", rename = "securitySchemes")]
pub security_schemes: Option<BTreeMap<String, ObjectOrReference<SecurityScheme>>>,
/// An object to hold reusable Link Objects.
#[serde(skip_serializing_if = "Option::is_none")]
pub links: Option<BTreeMap<String, ObjectOrReference<Link>>>,
/// An object to hold reusable Callback Objects.
#[serde(skip_serializing_if = "Option::is_none")]
pub callbacks: Option<BTreeMap<String, ObjectOrReference<Callback>>>,
#[serde(flatten)]
pub extensions: Extensions,
}
| rust | MIT | 0a7c95e34e90541e572d762878d4fe762c8d4048 | 2026-01-04T20:20:15.952889Z | false |
softprops/openapi | https://github.com/softprops/openapi/blob/0a7c95e34e90541e572d762878d4fe762c8d4048/src/v2/schema.rs | src/v2/schema.rs | use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
// http://json.schemastore.org/swagger-2.0
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum Scheme {
Http,
Https,
Ws,
Wss,
}
impl Default for Scheme {
fn default() -> Self {
Scheme::Http
}
}
/// top level document
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
#[serde(rename_all = "camelCase")]
pub struct Spec {
/// The Swagger version of this document.
pub swagger: String,
pub info: Info,
/// The host (name or ip) of the API. Example: 'swagger.io'
/// ^[^{}/ :\\\\]+(?::\\d+)?$
#[serde(skip_serializing_if = "Option::is_none")]
pub host: Option<String>,
/// The base path to the API. Example: '/api'.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "basePath")]
pub base_path: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub schemes: Option<Vec<Scheme>>,
/// A list of MIME types accepted by the API.
#[serde(skip_serializing_if = "Option::is_none")]
pub consumes: Option<Vec<String>>,
/// A list of MIME types the API can produce.
#[serde(skip_serializing_if = "Option::is_none")]
pub produces: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<Vec<Tag>>,
/// Relative paths to the individual endpoints. They must be relative
/// to the 'basePath'.
pub paths: BTreeMap<String, PathItem>,
#[serde(skip_serializing_if = "Option::is_none")]
pub definitions: Option<BTreeMap<String, Schema>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub parameters: Option<BTreeMap<String, Parameter>>,
/// mappings to http response codes or "default"
#[serde(skip_serializing_if = "Option::is_none")]
pub responses: Option<BTreeMap<String, Response>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub security_definitions: Option<BTreeMap<String, Security>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub security: Option<Vec<BTreeMap<String, Vec<String>>>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub external_docs: Option<Vec<ExternalDoc>>,
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
#[serde(rename_all = "lowercase")]
pub struct Tag {
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub external_docs: Option<Vec<ExternalDoc>>,
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct ExternalDoc {
pub url: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
/// General information about the API.
///
/// https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#info-object
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
#[serde(rename_all = "lowercase")]
pub struct Info {
/// A unique and precise title of the API.
#[serde(skip_serializing_if = "Option::is_none")]
pub title: Option<String>,
/// A semantic version number of the API.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "termsOfService", skip_serializing_if = "Option::is_none")]
pub terms_of_service: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub contact: Option<Contact>,
#[serde(skip_serializing_if = "Option::is_none")]
pub license: Option<License>,
#[serde(skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Contact {
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
// TODO: Make sure the url is a valid URL
#[serde(skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
// TODO: Make sure the email is a valid email
#[serde(skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
}
/// todo x-* properties
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct License {
/// The name of the license type. It's encouraged to use an OSI
/// compatible license.
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
/// The URL pointing to the license.
// TODO: Make sure the url is a valid URL
#[serde(skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
}
/// todo support x-* properties
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct PathItem {
#[serde(skip_serializing_if = "Option::is_none")]
pub get: Option<Operation>,
#[serde(skip_serializing_if = "Option::is_none")]
pub post: Option<Operation>,
#[serde(skip_serializing_if = "Option::is_none")]
pub put: Option<Operation>,
#[serde(skip_serializing_if = "Option::is_none")]
pub patch: Option<Operation>,
#[serde(skip_serializing_if = "Option::is_none")]
pub delete: Option<Operation>,
#[serde(skip_serializing_if = "Option::is_none")]
pub options: Option<Operation>,
#[serde(skip_serializing_if = "Option::is_none")]
pub head: Option<Operation>,
#[serde(skip_serializing_if = "Option::is_none")]
pub parameters: Option<Vec<ParameterOrRef>>,
}
/// https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#operation-object
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
#[serde(rename_all = "lowercase")]
pub struct Operation {
#[serde(skip_serializing_if = "Option::is_none")]
pub summary: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub consumes: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub produces: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub schemes: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<Vec<String>>,
#[serde(rename = "operationId", skip_serializing_if = "Option::is_none")]
pub operation_id: Option<String>,
pub responses: BTreeMap<String, Response>,
#[serde(skip_serializing_if = "Option::is_none")]
pub parameters: Option<Vec<ParameterOrRef>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub security: Option<Vec<SecurityRequirement>>,
}
/// https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#securityRequirementObject
pub type SecurityRequirement = BTreeMap<String, Vec<String>>;
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
#[serde(rename_all = "camelCase")]
pub struct Parameter {
pub name: String,
#[serde(rename = "in")]
pub location: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub required: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub schema: Option<Schema>,
#[serde(skip_serializing_if = "Option::is_none")]
pub unique_items: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "type")]
pub param_type: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub format: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub items: Option<Schema>,
#[serde(skip_serializing_if = "Option::is_none")]
default: Option<serde_json::Value>,
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Response {
pub description: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub schema: Option<Schema>,
}
// todo: support x-* fields
/// https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#parameter-object
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(untagged)]
pub enum ParameterOrRef {
/// both bodyParameter and nonBodyParameter in one for now
Parameter {
/// The name of the parameter.
name: String,
/// values depend on parameter type
/// may be `header`, `query`, 'path`, `formData`
#[serde(rename = "in")]
location: String,
#[serde(skip_serializing_if = "Option::is_none")]
required: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
schema: Option<Schema>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "uniqueItems")]
unique_items: Option<bool>,
/// string, number, boolean, integer, array, file ( only for formData )
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "type")]
param_type: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
format: Option<String>,
/// A brief description of the parameter. This could contain examples
/// of use. GitHub Flavored Markdown is allowed.
#[serde(skip_serializing_if = "Option::is_none")]
description: Option<String>,
#[serde(rename = "collectionFormat", skip_serializing_if = "Option::is_none")]
collection_format: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
default: Option<serde_json::Value>,
// maximum ?
// exclusiveMaximum ??
// minimum ??
// exclusiveMinimum ??
// maxLength ??
// minLength ??
// pattern ??
// maxItems ??
// minItems ??
// enum ??
// multipleOf ??
// allowEmptyValue ( for query / body params )
#[serde(skip_serializing_if = "Option::is_none")]
items: Option<Schema>,
#[serde(
rename = "additionalProperties",
skip_serializing_if = "Option::is_none"
)]
additional_properties: Option<Schema>,
},
Ref {
#[serde(rename = "$ref")]
ref_path: String,
},
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(tag = "type")]
pub enum Security {
#[serde(rename = "apiKey")]
ApiKey {
name: String,
#[serde(rename = "in")]
location: String,
#[serde(skip_serializing_if = "Option::is_none")]
description: Option<String>,
},
#[serde(rename = "oauth2")]
Oauth2 {
flow: Flow,
#[serde(rename = "authorizationUrl")]
authorization_url: String,
#[serde(rename = "tokenUrl")]
#[serde(skip_serializing_if = "Option::is_none")]
token_url: Option<String>,
scopes: BTreeMap<String, String>,
#[serde(skip_serializing_if = "Option::is_none")]
description: Option<String>,
},
#[serde(rename = "basic")]
Basic {
#[serde(skip_serializing_if = "Option::is_none")]
description: Option<String>,
},
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub enum Flow {
Implicit,
Password,
Application,
AccessCode,
}
/// A [JSON schema](http://json-schema.org/) definition describing
/// the shape and properties of an object.
///
/// This may also contain a `$ref` to another definition
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Default)]
pub struct Schema {
#[serde(skip_serializing_if = "Option::is_none")]
/// [JSON reference](https://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03)
/// path to another defintion
#[serde(rename = "$ref")]
pub ref_path: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "type")]
pub schema_type: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub format: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "enum")]
pub enum_values: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub required: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub items: Option<Box<Schema>>,
// implies object
#[serde(skip_serializing_if = "Option::is_none")]
pub properties: Option<BTreeMap<String, Schema>>,
// composition
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "allOf")]
pub all_of: Option<Vec<Box<Schema>>>,
// TODO: we need a validation step that we only collect x-* properties here.
#[serde(flatten)]
pub other: BTreeMap<String, serde_json::Value>,
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
use serde_yaml;
use std::collections::BTreeMap;
#[test]
fn security_api_deserializes() {
let json = r#"{"type":"apiKey", "name":"foo", "in": "query"}"#;
assert_eq!(
serde_yaml::from_str::<Security>(&json).unwrap(),
Security::ApiKey {
name: "foo".into(),
location: "query".into(),
description: None,
}
);
}
#[test]
fn security_api_serializes() {
let json = r#"{"type":"apiKey","name":"foo","in":"query"}"#;
assert_eq!(
serde_json::to_string(&Security::ApiKey {
name: "foo".into(),
location: "query".into(),
description: None,
})
.unwrap(),
json
);
}
#[test]
fn security_basic_deserializes() {
let json = r#"{"type":"basic"}"#;
assert_eq!(
serde_yaml::from_str::<Security>(&json).unwrap(),
Security::Basic { description: None }
);
}
#[test]
fn security_basic_serializes() {
let json = r#"{"type":"basic"}"#;
assert_eq!(
json,
serde_json::to_string(&Security::Basic { description: None }).unwrap()
);
}
#[test]
fn security_oauth_deserializes() {
let json = r#"{"type":"oauth2","flow":"implicit","authorizationUrl":"foo/bar","scopes":{"foo":"bar"}}"#;
let mut scopes = BTreeMap::new();
scopes.insert("foo".into(), "bar".into());
assert_eq!(
serde_yaml::from_str::<Security>(&json).unwrap(),
Security::Oauth2 {
flow: Flow::Implicit,
authorization_url: "foo/bar".into(),
token_url: None,
scopes: scopes,
description: None,
}
);
}
#[test]
fn security_oauth_serializes() {
let json = r#"{"type":"oauth2","flow":"implicit","authorizationUrl":"foo/bar","scopes":{"foo":"bar"}}"#;
let mut scopes = BTreeMap::new();
scopes.insert("foo".into(), "bar".into());
assert_eq!(
json,
serde_json::to_string(&Security::Oauth2 {
flow: Flow::Implicit,
authorization_url: "foo/bar".into(),
token_url: None,
scopes: scopes,
description: None,
})
.unwrap()
);
}
#[test]
fn parameter_or_ref_deserializes_ref() {
let json = r#"{"$ref":"foo/bar"}"#;
assert_eq!(
serde_yaml::from_str::<ParameterOrRef>(&json).unwrap(),
ParameterOrRef::Ref {
ref_path: "foo/bar".into()
}
);
}
#[test]
fn parameter_or_ref_serializes_pref() {
let json = r#"{"$ref":"foo/bar"}"#;
assert_eq!(
json,
serde_json::to_string(&ParameterOrRef::Ref {
ref_path: "foo/bar".into()
},)
.unwrap()
);
}
}
| rust | MIT | 0a7c95e34e90541e572d762878d4fe762c8d4048 | 2026-01-04T20:20:15.952889Z | false |
softprops/openapi | https://github.com/softprops/openapi/blob/0a7c95e34e90541e572d762878d4fe762c8d4048/src/v2/mod.rs | src/v2/mod.rs | //! Support for OpenApi version 2.0 specification.
//!
//! See the
//! [specification](https://github.com/OAI/OpenAPI-Specification/blob/0dd79f6/versions/2.0.md)
//! for more information.
mod schema;
pub use crate::v2::schema::*;
| rust | MIT | 0a7c95e34e90541e572d762878d4fe762c8d4048 | 2026-01-04T20:20:15.952889Z | false |
softprops/openapi | https://github.com/softprops/openapi/blob/0a7c95e34e90541e572d762878d4fe762c8d4048/examples/printer.rs | examples/printer.rs | use anyhow::Result;
fn main() -> Result<()> {
if let Some(path) = std::env::args().nth(1) {
let spec = openapi::from_path(path)?;
/*for (path, op) in spec.paths {
println!("{}", path);
println!("{:#?}", op);
}
for (name, definition) in spec.definitions {
println!("{}", name);
println!("{:#?}", definition);
}*/
println!("{}", openapi::to_json(&spec)?);
}
Ok(())
}
| rust | MIT | 0a7c95e34e90541e572d762878d4fe762c8d4048 | 2026-01-04T20:20:15.952889Z | false |
a137x/plutus-rustus | https://github.com/a137x/plutus-rustus/blob/8684db39b1e1207013938edb7f692caef8351912/src/main.rs | src/main.rs | extern crate bitcoin;
extern crate num_cpus;
extern crate secp256k1;
use std::fs::{self, OpenOptions};
use std::sync::{Arc, RwLock};
use std::{
collections::HashSet,
fs::File,
io::{Read, Write},
time::Instant,
};
use bitcoin::Address;
use bitcoin::{network::constants::Network, PrivateKey, PublicKey};
use secp256k1::{rand, Secp256k1, SecretKey};
use tokio::task;
const DB_VER: &str = "MAR_15_2021";
#[tokio::main]
async fn main() {
// creating empty database
let mut database = HashSet::new();
let timer = Instant::now();
let files = fs::read_dir(get_db_dir().as_str()).unwrap();
for file in files {
let file = file.unwrap();
let file_name = file.file_name().into_string().unwrap();
if file_name.ends_with(".pickle") {
println!("Loading pickle slice from file {:?}", file);
let data = load_pickle_slice(file.path().to_str().unwrap());
// adding addresses to database
for ad in data.iter() {
database.insert(ad.to_string());
}
//database size
println!("Database size {:?} addresses.", database.len());
}
}
println!(
"Load of pickle files completed in {:.2?}, database size: {:?}",
timer.elapsed(),
database.len()
);
// single thread version of processing
// process(&database);
// Multithread version of processing using tokio
// atomic reference counting of database
let database_ = Arc::new(RwLock::new(database));
//get number of logical cores
let num_cores = num_cpus::get();
println!("Running on {} logical cores", num_cores);
//run process on all available cores
for _ in 0..num_cores {
let clone_database_ = Arc::clone(&database_);
task::spawn_blocking(move || {
let current_core = std::thread::current().id();
println!("Core {:?} started", current_core);
let db = clone_database_.read().unwrap();
process(&db);
});
}
}
// write data to file
fn write_to_file(data: &str, file_name: &str) {
let mut file = OpenOptions::new()
.append(true)
.open(file_name)
.expect("Unable to open file");
file.write_all(data.as_bytes()).unwrap();
}
// function that checks address in database and if finds it, writes data to file
fn check_address(
private_key: &PrivateKey,
secret_key: SecretKey,
address: &Address,
database: &HashSet<String>,
public_key: PublicKey,
) {
let address_string = address.to_string();
let _control_address = "15x5ugXCVkzTbs24mG2bu1RkpshW3FTYW8".to_string();
if database.contains(&address_string) {
let data = format!(
"{}{}{}{}{}{}{}{}{}",
secret_key.display_secret(),
"\n",
private_key.to_wif(),
"\n",
public_key.to_string(),
"\n",
address_string.as_str(),
"\n",
"\n",
);
write_to_file(data.as_str(), found_file_path().as_str());
}
}
// load single pickle file from database directory
fn load_pickle_slice(path: &str) -> Vec<String> {
let mut bytes = Vec::new();
File::open(path).unwrap().read_to_end(&mut bytes).unwrap();
let data: Vec<String> =
serde_pickle::from_slice(&bytes, Default::default()).expect("couldn't load pickle");
data
}
// get project dir
fn get_db_dir() -> String {
let mut path = std::env::current_dir().unwrap();
path.push("database");
path.push(DB_VER);
path.to_str().unwrap().to_string()
}
// get found.txt file path
fn found_file_path() -> String {
let mut path = std::env::current_dir().unwrap();
path.push("plutus.txt");
path.to_str().unwrap().to_string()
}
// infinite loop processing function
fn process(database: &HashSet<String>) {
let mut count: f64 = 0.0;
let start = Instant::now();
loop {
// Generating secret key
let secp = Secp256k1::new();
let secret_key = SecretKey::new(&mut rand::thread_rng());
let private_key = PrivateKey::new(secret_key, Network::Bitcoin);
let public_key = PublicKey::from_private_key(&secp, &private_key);
// Generate pay-to-pubkey-hash (P2PKH) wallet address
let address = Address::p2pkh(&public_key, Network::Bitcoin);
// check address against database
check_address(&private_key, secret_key, &address, database, public_key);
// FOR BENCHMARKING ONLY! (has to be commented out for performance gain)
count += 1.0;
if count % 100000.0 == 0.0 {
let current_core = std::thread::current().id();
let elapsed = start.elapsed().as_secs_f64();
println!(
"Core {:?} checked {} addresses in {:.2?}, iter/sec: {}",
current_core,
count,
elapsed,
count / elapsed
);
}
}
}
| rust | MIT | 8684db39b1e1207013938edb7f692caef8351912 | 2026-01-04T20:07:04.785180Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/build.rs | build.rs | use vergen_gitcl::{Emitter, GitclBuilder};
/// This build script will query your `git` executable to fetch the current commit hash, and make
/// it available to the application using an environment variable. This is used to show the commit
/// hash that diff.rs was built with in the footer.
fn main() {
let gitcl = GitclBuilder::all_git().unwrap();
Emitter::default()
.add_instructions(&gitcl)
.unwrap()
.emit()
.unwrap();
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/lib.rs | src/lib.rs | //! # diff.rs
//!
//! Web application to visualize code differences between different versions of Rust crates. Fully
//! backend-less, works by downloading crate sources in the browser, validating the hash, unpacking
//! it, running a diff algorithm over the files and rendering the diff. Support syntax highlighting
//! provided by the `syntect` crate.
mod cache;
pub mod components;
mod data;
mod syntax;
#[cfg(test)]
mod tests;
mod version;
pub mod views;
use crate::{
version::{VersionId, VersionNamed},
views::*,
};
use camino::Utf8PathBuf;
use yew::prelude::*;
use yew_router::prelude::*;
/// Link component which uses this crate's [Route].
pub type Link<R = Route> = yew_router::components::Link<R>;
/// Application routes.
///
/// This struct declares all valid routes in the app, and has a [render()](Self::render) method to render the
/// appropriate view.
///
/// The default route for `/:name` is to render a crate. Therefore, when adding new routes, one
/// must be careful to not alias an existing crate name. For example, adding a route with the path
/// `/serde` would mask the crate view for the `serde` crate.
#[derive(Clone, Routable, PartialEq)]
pub enum Route {
/// Home view, shows search bar and summary.
#[at("/")]
Home,
/// About view, shows information about the application.
#[at("/about")]
About,
/// Search view, shows search results.
#[at("/search/:query")]
Search { query: String },
/// Browse view, will load crate source and redirect to default file.
#[at("/browse/:krate/:version")]
Browse { krate: String, version: VersionId },
/// File browse view.
#[at("/browse/:krate/:version/*path")]
BrowseFile {
krate: String,
version: VersionId,
path: Utf8PathBuf,
},
/// Crate view, will make request to get most recent version and redirect.
#[at("/:krate/")]
Crate { krate: String },
/// Crates view, allows for diffing two crates.
#[at("/:old_krate/:new_krate")]
Crates {
old_krate: String,
new_krate: String,
},
/// File diff view between `old` and `new` versions.
#[at("/:krate/:old_version/:new_version")]
SingleSourceDiff {
krate: String,
old_version: VersionId,
new_version: VersionId,
},
/// File diff view between `old` and `new` versions.
#[at("/:krate/:old_version/:new_version/*path")]
SingleSourceFile {
krate: String,
old_version: VersionId,
new_version: VersionId,
path: Utf8PathBuf,
},
/// File diff view, render differences in the file path between the crate versions.
#[at("/:old_krate/:old_version/:new_krate/:new_version/*path")]
File {
old_krate: String,
old_version: VersionId,
new_krate: String,
new_version: VersionId,
path: Utf8PathBuf,
},
#[at("/repo/:krate/:version/files/*path")]
RepoFile {
krate: String,
version: VersionId,
path: Utf8PathBuf,
},
/// Route that is matched if no other route matches, shows error message.
#[not_found]
#[at("/404")]
NotFound,
}
impl Route {
/// Render this route to a view.
pub fn render(route: Route) -> Html {
match route {
Route::Home => html! { <Home /> },
Route::About => html! { <About /> },
Route::Browse { krate, version } => html! {
<Diff
src_name={krate.clone()}
dst_name={krate}
old={version.clone()}
new={version}
/>
},
Route::BrowseFile {
krate,
version,
path,
} => html! {
<Diff
src_name={krate.clone()}
dst_name={krate}
old={version.clone()}
new={version}
{path}
/>
},
Route::Crate { krate } => html! {
<Diff
src_name={krate.clone()}
dst_name={krate}
old={VersionId::Named(VersionNamed::Previous)}
new={VersionId::Named(VersionNamed::Latest)}
/>
},
Route::Crates {
old_krate,
new_krate,
} => html! {
<Diff
src_name={old_krate}
dst_name={new_krate}
old={VersionId::Named(VersionNamed::Latest)}
new={VersionId::Named(VersionNamed::Latest)}
/>
},
Route::SingleSourceDiff {
krate,
old_version,
new_version,
} => html! {
<Diff src_name={krate.clone()} dst_name={krate} old={old_version} new={new_version} />
},
Route::SingleSourceFile {
krate,
old_version,
new_version,
path,
} => html! {
<Diff src_name={krate.clone()} dst_name={krate} old={old_version} new={new_version} {path} />
},
Route::File {
old_krate,
old_version,
new_krate,
new_version,
path,
} => html! {
<Diff src_name={old_krate} dst_name={new_krate} old={old_version} new={new_version} {path} />
},
Route::NotFound => html! { <NotFound /> },
Route::Search { query } => html! { <Search search={query} /> },
Route::RepoFile {
krate,
version,
path,
} => html! {
<RepoFileView {krate} {version} {path} />
},
}
}
/// Try to simplify a route.
///
/// If the route is a multi-crate route, and the crates are identical, then simplify it to
/// using a single-crate route.
pub fn simplify(self) -> Self {
match self {
Route::File {
old_krate,
old_version,
new_krate,
new_version,
path,
} if old_krate == new_krate => Route::SingleSourceFile {
krate: old_krate,
old_version,
new_version,
path,
},
Route::Crates {
old_krate,
new_krate,
} if old_krate == new_krate => Route::Crate { krate: old_krate },
other => other,
}
}
}
/// Render application.
#[function_component]
pub fn App() -> Html {
html! {
<BrowserRouter>
<Switch<Route> render={Route::render} />
</BrowserRouter>
}
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/version.rs | src/version.rs | use semver::{Error, Version, VersionReq};
use std::{
fmt::{Display, Formatter, Result as FmtResult},
str::FromStr,
};
use strum::EnumString;
#[derive(Debug, PartialEq, Eq, EnumString, Clone, strum::Display)]
#[strum(serialize_all = "kebab-case")]
pub enum VersionNamed {
Latest,
Previous,
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum VersionId {
Named(VersionNamed),
Exact(Version),
Requirement(VersionReq),
}
impl FromStr for VersionId {
type Err = Error;
fn from_str(input: &str) -> Result<Self, Self::Err> {
if let Ok(named) = VersionNamed::from_str(input) {
return Ok(named.into());
}
if let Ok(exact) = Version::from_str(input) {
return Ok(exact.into());
}
Ok(VersionReq::from_str(input)?.into())
}
}
impl Display for VersionId {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
match self {
Self::Named(named) => Display::fmt(named, f),
Self::Exact(version) => Display::fmt(version, f),
Self::Requirement(req) => Display::fmt(req, f),
}
}
}
macro_rules! from {
($ty:ty, $fn:expr) => {
impl From<$ty> for VersionId {
fn from(version: $ty) -> Self {
$fn(version)
}
}
};
}
from!(Version, Self::Exact);
from!(VersionReq, Self::Requirement);
from!(VersionNamed, Self::Named);
#[test]
fn can_parse_version_id() {
assert_eq!(
"latest".parse::<VersionId>().unwrap(),
VersionId::Named(VersionNamed::Latest)
);
assert_eq!(
"previous".parse::<VersionId>().unwrap(),
VersionId::Named(VersionNamed::Previous)
);
assert_eq!(
"0.1.0".parse::<VersionId>().unwrap(),
VersionId::Exact("0.1.0".parse().unwrap()),
);
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/tests.rs | src/tests.rs | use crate::data::*;
use anyhow::Result;
use serde_json::from_reader;
use std::fs::File;
fn parse_canned_response(name: &str) -> Result<CrateResponse> {
let response = File::open(format!("data/{name}.json"))?;
let response: CrateResponse = from_reader(response)?;
Ok(response)
}
fn parse_canned_source(version: &VersionInfo) -> Result<CrateSource> {
let data = std::fs::read(format!("data/{}-{}.crate", version.krate, version.version))?;
let source = CrateSource::new(version.clone(), &data[..])?;
Ok(source)
}
#[test]
fn test_crate_response_decode_serde() {
let response = parse_canned_response("serde").unwrap();
assert_eq!(response.krate.id, "serde");
}
#[test]
fn test_crate_response_decode_axum() {
let response = parse_canned_response("axum").unwrap();
assert_eq!(response.krate.id, "axum");
}
#[test]
fn test_crate_response_decode_reqwest() {
let response = parse_canned_response("reqwest").unwrap();
assert_eq!(response.krate.id, "reqwest");
}
#[test]
fn test_crate_response_decode_log() {
let response = parse_canned_response("log").unwrap();
assert_eq!(response.krate.id, "log");
}
#[test]
fn can_parse_crate_source_log_0_4_15() {
let log = parse_canned_response("log").unwrap();
let version = log.version("0.4.15".parse().unwrap()).unwrap();
let _ = parse_canned_source(version).unwrap();
}
#[test]
fn can_parse_crate_source_log_0_4_16() {
let log = parse_canned_response("log").unwrap();
let version = log.version("0.4.16".parse().unwrap()).unwrap();
let _ = parse_canned_source(version).unwrap();
}
#[test]
fn can_parse_crate_source_log_0_4_17() {
let log = parse_canned_response("log").unwrap();
let version = log.version("0.4.17".parse().unwrap()).unwrap();
let _ = parse_canned_source(version).unwrap();
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/views.rs | src/views.rs | //! # Application views
//!
//! This module contains all of the views of the application. Views are pages that
//! can be rendered (selected via the active route). Any components which are used
//! by more than one view (or are sufficiently complex) should go into the `components`
//! module, which contains components shared between views.
mod about;
mod diff;
mod home;
mod not_found;
mod repo;
mod search;
pub use self::{about::*, diff::*, home::Home, not_found::NotFound, repo::*, search::Search};
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/syntax.rs | src/syntax.rs | use similar::ChangeTag;
use subslice_offset::SubsliceOffset;
use syntect::{
easy::HighlightLines,
highlighting::{Color, FontStyle, Style, Theme, ThemeSet},
parsing::{SyntaxReference, SyntaxSet},
};
lazy_static::lazy_static! {
/// The default syntect syntax set, used for parsing language definitions.
static ref SYNTAX_SET: SyntaxSet = SyntaxSet::load_defaults_newlines();
/// The default syntect theme set, currently only one theme is ever used.
static ref THEME_SET: ThemeSet = ThemeSet::load_defaults();
/// Theme definition from the default syntect theme set.
static ref THEME: &'static Theme = &THEME_SET.themes["InspiredGitHub"];
}
/// Get the `SyntaxReference` from `SYNTAX_SET` to use for syntax highlighting
/// the given file.
///
/// It will be based first on the file's name, then the file's extension, and
/// finally based on the first line of the file.
pub fn infer_syntax_for_file(path: &str, first_line: Option<&str>) -> &'static SyntaxReference {
// Determine which syntax should be used for this file. It will be based
// first on the file's name, then the file's extension, then the first line.
let (_, file_name) = path.rsplit_once('/').unwrap_or(("", path));
let (_, extension) = file_name.rsplit_once('.').unwrap_or(("", file_name));
SYNTAX_SET
.find_syntax_by_extension(file_name)
.or_else(|| SYNTAX_SET.find_syntax_by_extension(extension))
.or_else(|| first_line.and_then(|line| SYNTAX_SET.find_syntax_by_first_line(line)))
.unwrap_or_else(|| SYNTAX_SET.find_syntax_plain_text())
}
/// Highlight a single line as a bytes slice, avoiding extra copies.
fn highlight_bytes_line(
highlight_lines: &mut HighlightLines<'_>,
tag: ChangeTag,
bytes: &bytes::Bytes,
) -> Option<Vec<(Style, bytes::Bytes)>> {
// Don't highlight removal lines, as it could confuse the parser.
if tag == ChangeTag::Delete {
return None;
}
let line = std::str::from_utf8(&bytes[..]).ok()?;
let styles = highlight_lines.highlight_line(line, &SYNTAX_SET).ok()?;
// Map each chunk back to the bytes slice to avoid unnecessary copies.
Some(
styles
.into_iter()
.map(|(style, chunk)| {
let start = line[..].subslice_offset(chunk).unwrap();
(style, bytes.slice(start..start + chunk.len()))
})
.collect::<Vec<_>>(),
)
}
/// Apply syntax highlighting to a list of changes using the listed syntax.
pub fn highlight_changes(
syntax: &'static SyntaxReference,
changes: &[(ChangeTag, bytes::Bytes)],
) -> Vec<(ChangeTag, Vec<(Style, bytes::Bytes)>)> {
let default_style = Style {
foreground: THEME.settings.foreground.unwrap_or(Color::BLACK),
background: THEME.settings.background.unwrap_or(Color::WHITE),
font_style: FontStyle::empty(),
};
let mut highlight_lines = HighlightLines::new(syntax, &THEME);
changes
.iter()
.map(|(tag, bytes)| {
let styled = highlight_bytes_line(&mut highlight_lines, *tag, bytes)
.unwrap_or_else(|| vec![(default_style, bytes.clone())]);
(*tag, styled)
})
.collect()
}
/// Convert the given syntect style to inline `style` attribute formatting.
///
/// Does not apply background colors.
pub fn syntect_style_to_css(style: &Style) -> String {
let mut css = format!(
"color:#{:02x}{:02x}{:02x};",
style.foreground.r, style.foreground.g, style.foreground.b
);
if style.font_style.contains(FontStyle::UNDERLINE) {
css.push_str("text-decoration:underline;");
}
if style.font_style.contains(FontStyle::BOLD) {
css.push_str("font-weight:bold;");
}
if style.font_style.contains(FontStyle::BOLD) {
css.push_str("font-style:italic;");
}
css
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/main.rs | src/main.rs | /// Initialize logging.
fn init_logging() {
use log::Level;
use wasm_logger::Config;
// use debug level for debug builds, warn level for production builds.
#[cfg(debug_assertions)]
let level = Level::Debug;
#[cfg(not(debug_assertions))]
let level = Level::Warn;
wasm_logger::init(Config::new(level));
}
fn main() {
init_logging();
yew::Renderer::<diff_rs::App>::new().render();
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/components.rs | src/components.rs | //! # Shared components
//!
//! This module contains shared components. These are components which are shared between multiple
//! views. Components which are only used by a single view can be kept inside the view's definition
//! itself, unless they are generic to too complex.
mod diff_view;
mod file_tree;
mod footer;
mod layout;
mod navigation;
mod non_ideal;
mod search;
pub use self::{
diff_view::*, file_tree::*, footer::*, layout::*, navigation::*, non_ideal::*, search::*,
};
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/data.rs | src/data.rs | use crate::version::{VersionId, VersionNamed};
use anyhow::{anyhow, Result};
use bytes::Bytes;
use camino::{Utf8Component, Utf8Path, Utf8PathBuf};
use flate2::bufread::GzDecoder;
use gloo_net::http::Request;
use log::*;
use semver::Version;
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use similar::{ChangeTag, TextDiff};
use std::{
collections::{BTreeMap, BTreeSet},
io::Read,
ops::Range,
rc::Rc,
sync::Arc,
};
use subslice_offset::SubsliceOffset;
use tar::Archive;
use url::Url;
/// Crates.io response type for crate search
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
pub struct SearchResponse {
pub crates: Vec<CrateDetail>,
}
/// Crates.io response for summary fetch
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
pub struct SummaryResponse {
pub just_updated: Vec<CrateDetail>,
pub most_downloaded: Vec<CrateDetail>,
pub most_recently_downloaded: Vec<CrateDetail>,
pub new_crates: Vec<CrateDetail>,
}
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum SummaryCategory {
MostDownloaded,
MostRecent,
JustUpdated,
RecentDownloads,
}
impl SummaryCategory {
pub fn title(&self) -> &str {
match self {
SummaryCategory::MostDownloaded => "Most Downloaded",
SummaryCategory::MostRecent => "New Crates",
SummaryCategory::JustUpdated => "Just Updated",
SummaryCategory::RecentDownloads => "Most Recent Downloads",
}
}
}
impl SummaryResponse {
pub fn get(&self, cat: SummaryCategory) -> &Vec<CrateDetail> {
match cat {
SummaryCategory::JustUpdated => &self.just_updated,
SummaryCategory::MostDownloaded => &self.most_downloaded,
SummaryCategory::RecentDownloads => &self.most_recently_downloaded,
SummaryCategory::MostRecent => &self.new_crates,
}
}
}
/// Create info struct, returned as part of the crates.io response.
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
pub struct CrateDetail {
pub id: String,
pub max_version: Version,
pub max_stable_version: Option<Version>,
pub newest_version: Version,
pub description: String,
pub downloads: u64,
pub recent_downloads: Option<u64>,
pub exact_match: bool,
pub homepage: Option<Url>,
pub repository: Option<Url>,
pub documentation: Option<Url>,
}
/// Crates.io response type for crate lookup
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
pub struct CrateResponse {
//pub categories: BTreeSet<String>,
#[serde(rename = "crate")]
pub krate: CrateDetail,
pub versions: Vec<VersionInfo>,
}
/// Version info struct, returned as part of the crates.io response.
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
pub struct VersionInfo {
#[serde(with = "hex")]
pub checksum: Vec<u8>,
#[serde(rename = "crate")]
pub krate: String,
pub dl_path: String,
pub yanked: bool,
#[serde(rename = "num")]
pub version: Version,
//pub id: u64,
//pub crate_size: Option<u64>,
//pub downloads: u64,
//pub license: Option<String>,
}
impl CrateResponse {
/// Fetch a CrateResponse for the given crate.
pub async fn fetch(name: &str) -> Result<Self> {
info!("Fetching crate metadata for {name} from network");
let base: Url = "https://crates.io/api/v1/crates/".parse()?;
let url = base.join(name)?;
let response = Request::get(url.as_str()).send().await?;
if response.ok() {
Ok(response.json().await?)
} else {
Err(anyhow!("Error response: {}", response.status()))
}
}
pub fn version(&self, version: VersionId) -> Option<&VersionInfo> {
match version {
VersionId::Exact(version) => self.versions.iter().find(|v| v.version == version),
VersionId::Named(VersionNamed::Latest) => self.versions.first(),
VersionId::Named(VersionNamed::Previous) => {
self.versions.get(1).or(self.versions.first())
}
VersionId::Requirement(req) => self
.versions
.iter()
.filter(|v| req.matches(&v.version))
.max_by_key(|v| &v.version),
}
}
}
impl VersionInfo {
/// Get download URL for this crate
///
/// We purposefully construct a URL here and don't use the one returned in the response,
/// because we want to download it from the CDN instead of from the API (so it does not count
/// towards crate downloads).
pub fn download_url(&self) -> Result<Url> {
let Self { krate, version, .. } = &self;
let url = format!("https://static.crates.io/crates/{krate}/{krate}-{version}.crate");
let url = url.parse()?;
Ok(url)
}
/// Fetch a crate source for the given version.
pub async fn fetch(&self) -> Result<CrateSource> {
info!(
"Fetching crate source for {} v{} from network",
self.krate, self.version
);
let url = self.download_url()?;
let response = Request::get(url.as_str()).send().await?;
if !response.ok() {
return Err(anyhow!("Error response: {}", response.status()));
}
let bytes: Bytes = response.binary().await?.into();
let source = CrateSource::new(self.clone(), &bytes[..])?;
Ok(source)
}
}
#[derive(Clone, Debug, PartialEq)]
pub struct RepositoryInfo {
pub repository: Url,
pub vcs_info: CargoVcsInfo,
}
impl RepositoryInfo {
pub fn url(&self) -> Option<Url> {
if self.repository.as_str().starts_with("https://github.com/") {
let mut url = Url::parse("https://codeload.github.com/").unwrap();
url.path_segments_mut()
.unwrap()
.extend(self.repository.path_segments().unwrap())
.extend(&["tar.gz", &self.vcs_info.git.sha1]);
let url = format!("https://corsproxy.io/?{url}").parse().unwrap();
return Some(url);
}
if self.repository.as_str().starts_with("https://gitlab.com/") {
let mut url = self.repository.clone();
url.path_segments_mut().unwrap().extend(&[
"-",
"archive",
&format!("{}.tar.gz", self.vcs_info.git.sha1),
]);
let url = format!("https://corsproxy.io/?{url}").parse().unwrap();
return Some(url);
}
None
}
fn prefix(&self) -> String {
let repo = self.repository.path().split('/').next_back().unwrap_or("");
let mut prefix = format!("{repo}-{}/", self.vcs_info.git.sha1);
if !self.vcs_info.path_in_vcs.is_empty() {
prefix.push_str(&self.vcs_info.path_in_vcs);
prefix.push('/');
}
prefix
}
pub async fn fetch(&self) -> Result<CrateSource> {
let version = VersionInfo {
checksum: vec![],
dl_path: Default::default(),
krate: "".into(),
yanked: false,
version: "0.0.0".parse().unwrap(),
};
let url = self
.url()
.ok_or(anyhow::anyhow!("cannot get repository URL"))?;
let response = Request::get(url.as_str()).send().await?;
if !response.ok() {
return Err(anyhow!("Error response: {}", response.status()));
}
let bytes = response.binary().await?;
let prefix = self.prefix();
Ok(CrateSource {
version,
files: CrateSource::parse_archive(&prefix, &bytes[..], false)?,
})
}
}
type FileContents = BTreeMap<Utf8PathBuf, Bytes>;
/// Crate source
///
/// This is parsed from the gzipped tarball that crates.io serves for every crate.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CrateSource {
pub version: VersionInfo,
pub files: FileContents,
}
#[derive(thiserror::Error, Debug)]
pub enum CrateSourceError {
/// We get an expected hashsum in the crate info response from crates.io. When
/// we download a crate, we verify that the data we got matches this. If not,
/// return an error here.
#[error("hashsum mismatch in crate response: expected {expected:02x?} but got {got:02x?}")]
HashsumMismatch { expected: Vec<u8>, got: Vec<u8> },
/// These errors can be caused by the decompression (flate2 crate) or the untarring (tar
/// crate).
#[error(transparent)]
Io(#[from] std::io::Error),
/// In tar archives, paths are represented as raw bytes. We expect that these are valid UTF-8
/// encoded strings. If this is not the case, we return an error. This is safer than using
/// something like [`String::from_utf8_lossy`], because an attacker could place two files with
/// invalid characters which would result in the same (lossy) path, thereby hiding the presence
/// of a file from the user interface.
#[error("error decoding path as utf8")]
PathEncoding(#[from] std::str::Utf8Error),
/// Crate tar archives contain files predixed under the path `<crate>-<version>`. There should
/// not be any other files in this archive. If we encounter a file with a different path
/// prefix, we return an error here. Those files would otherwise be invisible to the user
/// interface.
#[error("encountered invalid prefix in path {path} (expected {prefix})")]
InvalidPrefix { path: String, prefix: String },
}
impl CrateSource {
/// Create empty crate source for the given version.
pub fn new(version: VersionInfo, data: &[u8]) -> Result<Self, CrateSourceError> {
// compute hash
let mut hasher = Sha256::new();
hasher.update(data);
let hash = hasher.finalize();
// make sure hash matches
if hash[..] != version.checksum[..] {
return Err(CrateSourceError::HashsumMismatch {
expected: version.checksum.clone(),
got: hash[..].to_vec(),
});
}
let prefix = format!("{}-{}/", version.krate, version.version);
let source = CrateSource {
version,
files: Self::parse_archive(&prefix, data, true)?,
};
Ok(source)
}
/// Parse gzipped archive.
fn parse_archive(
prefix: &str,
data: &[u8],
error_outside_prefix: bool,
) -> Result<FileContents, CrateSourceError> {
let mut data = GzDecoder::new(data);
let mut archive = Archive::new(&mut data);
let mut files = FileContents::default();
// this is the path prefix we expect in the archive.
for entry in archive.entries()? {
let mut entry = entry?;
//debug!("{} {:?}", entry.path().unwrap().display(), entry.header().entry_type());
if !entry.header().entry_type().is_file() {
continue;
}
// make path encoding error explicit
let bytes = entry.path_bytes();
let path = std::str::from_utf8(&bytes)?;
let path = match path.strip_prefix(prefix) {
Some(path) => path,
None if error_outside_prefix => {
return Err(CrateSourceError::InvalidPrefix {
path: path.to_string(),
prefix: prefix.into(),
})
}
None => continue,
};
let path: Utf8PathBuf = path.into();
// read data
let mut data = vec![];
entry.read_to_end(&mut data)?;
debug!("Storing path {path} ({} bytes)", data.len());
// store data
files.insert(path, data.into());
}
Ok(files)
}
/// Get [`CargoVcsInfo`] from the crate sources.
pub fn cargo_vcs_info(&self) -> Result<CargoVcsInfo, CargoVcsInfoError> {
let raw = self
.files
.get(Utf8Path::new(".cargo_vcs_info.json"))
.ok_or(CargoVcsInfoError::Missing)?;
let decoded = serde_json::from_slice(raw)?;
Ok(decoded)
}
}
#[derive(thiserror::Error, Debug)]
pub enum CargoVcsInfoError {
#[error("missing .cargo_vcs_info.json")]
Missing,
#[error("cannot decode .cargo_vcs_info.json")]
Decode(#[from] serde_json::Error),
}
#[derive(Deserialize, Clone, Debug, PartialEq)]
pub struct CargoVcsInfo {
git: CargoGitInfo,
path_in_vcs: String,
}
#[derive(Deserialize, Clone, Debug, PartialEq)]
pub struct CargoGitInfo {
sha1: String,
}
#[derive(Clone, Debug, PartialEq, Eq, Default)]
pub struct FileDiff {
/// Diff in this file
pub changes: Vec<(ChangeTag, Bytes)>,
/// Ranges of lines to show for each file
pub context_ranges: Vec<ChunkInfo>,
// Redundant - alternativly take from files
pub summary: Changes,
}
/// Precomputed diff data
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct VersionDiff {
/// Left crate source that is diffed
pub left: Arc<CrateSource>,
/// Right crate source that is diffed
pub right: Arc<CrateSource>,
/// Files in this version diff
pub files: BTreeMap<Utf8PathBuf, FileDiff>,
/// Summaries of files and folders
pub summary: BTreeMap<Utf8PathBuf, (usize, usize)>,
pub tree: Entry,
}
/// How many lines of context to show in a diff
const CONTEXT_LINES: usize = 3;
impl VersionDiff {
/// Generate diff data
pub fn new(left: Arc<CrateSource>, right: Arc<CrateSource>) -> Self {
info!(
"Computing diff for {} version {} and {} version {}",
left.version.krate, left.version.version, right.version.krate, right.version.version
);
let mut entry = Entry::default();
let mut files = BTreeMap::new();
let mut summary: BTreeMap<Utf8PathBuf, (usize, usize)> = BTreeMap::new();
// intersection of file paths in both left and right crate sources
let file_paths: BTreeSet<&Utf8Path> = left
.files
.keys()
.chain(right.files.keys())
.map(|s| s.as_path())
.collect();
// compute diffs
for path in file_paths.into_iter() {
info!("Computing diff for {path}");
// lookup files, default to empty bytes
let left = left.files.get(path).cloned().unwrap_or_default();
let right = right.files.get(path).cloned().unwrap_or_default();
// generate text diff
let diff = TextDiff::from_lines(&left[..], &right[..]);
// collect changes
let changes: Vec<_> = diff
.iter_all_changes()
.map(|change| {
// soo... we do an awkward little dance here. out data is a Bytes struct, which we
// can cheaply get subslices from. the diff algorithm gets a &[u8] and every
// change gives us a &[u8]. now, we want to figure out what the offset of this
// &[u8] was from the original bytes, so that we can call .slice() on it to get a
// cheap reference-counted bytes rather than having to clone it. so we use the
// subslice_offset crate which lets us do exactly that.
let value = change.value();
let value = [&left, &right]
.iter()
.find_map(|b| {
b[..]
.subslice_offset(value)
.map(|index| b.slice(index..index + value.len()))
})
.unwrap();
(change.tag(), value)
})
.collect();
let mut offsets = vec![];
let mut insertions = 0;
let mut deletions = 0;
for (index, (tag, _)) in changes.iter().enumerate() {
match tag {
ChangeTag::Equal => {}
ChangeTag::Delete => {
// cnt for determining start idx of hunk, wanna start before this line, so do not count current line
offsets.push((index, insertions, deletions));
deletions += 1;
}
ChangeTag::Insert => {
offsets.push((index, insertions, deletions));
insertions += 1;
}
}
}
// compute ranges to show
let mut ranges = vec![];
let mut last_hunk = (0..0, 0, 0);
for (offset, ins, del) in offsets.iter() {
let hunk_start = offset.saturating_sub(CONTEXT_LINES);
let left_start = hunk_start.saturating_sub(*ins);
let right_start = hunk_start.saturating_sub(*del);
let hunk = (
hunk_start..*offset + CONTEXT_LINES + 1,
left_start,
right_start,
);
let overlaps_with_last_hunk = hunk.0.start.max(last_hunk.0.start)
<= hunk.0.end.min(last_hunk.0.end) + CONTEXT_LINES;
if overlaps_with_last_hunk {
last_hunk = (last_hunk.0.start..hunk.0.end, last_hunk.1, last_hunk.2);
} else {
if last_hunk.0.end != 0 {
ranges.push(last_hunk.clone().into());
}
last_hunk = hunk;
}
}
// Push the last hunk we've computed if any
if last_hunk.0.end != 0 {
ranges.push(last_hunk.into())
}
// compute additions
for path in path.ancestors() {
let summary = summary.entry(path.into()).or_default();
summary.0 += insertions;
summary.1 += deletions;
}
entry.insert(
path,
Changes {
added: insertions as u64,
removed: deletions as u64,
},
);
files.insert(
path.into(),
FileDiff {
changes,
context_ranges: ranges,
summary: Changes {
added: insertions as u64,
removed: deletions as u64,
},
},
);
}
VersionDiff {
left,
right,
files,
summary,
tree: entry,
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Default)]
pub struct ChunkInfo {
pub range: Range<usize>,
pub left_start: usize,
pub right_start: usize,
}
impl From<(Range<usize>, usize, usize)> for ChunkInfo {
fn from((range, left_start, right_start): (Range<usize>, usize, usize)) -> Self {
ChunkInfo {
range,
left_start,
right_start,
}
}
}
impl ChunkInfo {
pub fn start(&self) -> usize {
self.range.start
}
pub fn end(&self) -> usize {
self.range.end
}
}
#[derive(Default, Clone, Copy, PartialEq, Eq, Debug)]
pub struct Changes {
pub added: u64,
pub removed: u64,
}
impl std::ops::Add for Changes {
type Output = Self;
fn add(mut self, rhs: Self) -> Self {
self.added += rhs.added;
self.removed += rhs.removed;
self
}
}
impl std::ops::AddAssign for Changes {
fn add_assign(&mut self, rhs: Self) {
*self = *self + rhs;
}
}
#[derive(Default, Clone, Copy, PartialEq, Eq, Debug)]
pub enum State {
#[default]
Unchanged,
Added,
Deleted,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Item {
File,
Dir(BTreeMap<String, Rc<Entry>>),
}
impl Item {
pub fn is_dir(&self) -> bool {
matches!(self, Item::Dir(_))
}
}
impl Default for Item {
fn default() -> Self {
Self::Dir(Default::default())
}
}
#[derive(Clone, PartialEq, Default, Debug, Eq)]
pub struct Entry {
pub name: String,
pub item: Item,
pub changes: Changes,
pub state: State,
}
impl Entry {
pub fn new(name: String) -> Self {
Self {
name,
..Default::default()
}
}
pub fn insert(&mut self, path: &Utf8Path, changes: Changes) {
debug!("Inserting {path} with changes {changes:?}");
let mut entry = self;
for component in path.components() {
entry.changes += changes;
let component = match component {
Utf8Component::RootDir => continue,
Utf8Component::Normal(path) => path,
Utf8Component::CurDir => unreachable!(),
Utf8Component::ParentDir => unreachable!(),
Utf8Component::Prefix(_) => unreachable!(),
};
entry = Rc::make_mut(
match &mut entry.item {
Item::File => unreachable!(),
Item::Dir(entries) => entries,
}
.entry(component.to_string())
.or_insert_with(|| Rc::new(Entry::new(component.to_string()))),
);
}
entry.changes = changes;
entry.item = Item::File;
}
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/cache.rs | src/cache.rs | use crate::data::*;
use anyhow::Result;
use log::*;
use semver::Version;
use std::{
collections::BTreeMap,
sync::{Arc, Mutex},
};
/// Crate response cache
pub struct CrateResponseCache(Mutex<BTreeMap<String, Arc<CrateResponse>>>);
/// Global crate response cache instance
pub static CRATE_RESPONSE_CACHE: CrateResponseCache = CrateResponseCache::new();
impl CrateResponseCache {
/// Create new, empty cache
pub const fn new() -> Self {
CrateResponseCache(Mutex::new(BTreeMap::new()))
}
/// Lookup in cache or fetch
pub async fn fetch_cached(&self, name: &str) -> Result<Arc<CrateResponse>> {
if let Some(info) = self.cached(name) {
info!("Fetching crate metadata for {name} from cache");
return Ok(info);
}
// fetch it
let info = CrateResponse::fetch(name).await?;
let info = Arc::new(info);
// save back into cache
self.cache(info.clone());
Ok(info)
}
/// Store in cache
fn cache<T: Into<Arc<CrateResponse>>>(&self, response: T) {
let mut lock = self.0.lock().unwrap();
let response: Arc<CrateResponse> = response.into();
info!("Storing crate metadata for {} in cache", response.krate.id);
lock.insert(response.krate.id.clone(), response);
}
/// Lookup in cache
pub fn cached(&self, name: &str) -> Option<Arc<CrateResponse>> {
// check if we have it cached
let lock = self.0.lock().unwrap();
lock.get(name).cloned()
}
}
#[test]
fn test_crate_response_cache_missing() {
let cache = CrateResponseCache::new();
assert!(cache.cached("serde").is_none());
}
#[test]
fn test_crate_response_cache_store() {
let cache = CrateResponseCache::new();
assert!(cache.cached("serde").is_none());
let crate_response = Arc::new(CrateResponse {
krate: CrateDetail {
id: "serde".into(),
max_version: "0.1.0".parse().unwrap(),
max_stable_version: Some("0.1.0".parse().unwrap()),
description: "".into(),
documentation: None,
downloads: 0,
exact_match: true,
homepage: None,
newest_version: "0.1.0".parse().unwrap(),
recent_downloads: None,
repository: None,
},
versions: Default::default(),
});
cache.cache(crate_response.clone());
assert_eq!(crate_response, cache.cached("serde").unwrap());
}
/// Crate source cache
pub struct CrateSourceCache(Mutex<BTreeMap<(String, Version), Arc<CrateSource>>>);
/// Global crate source cache instance
pub static CRATE_SOURCE_CACHE: CrateSourceCache = CrateSourceCache::new();
impl CrateSourceCache {
/// Create new, empty cache
pub const fn new() -> Self {
CrateSourceCache(Mutex::new(BTreeMap::new()))
}
/// Lookup in cache or fetch
pub async fn fetch_cached(&self, version: &VersionInfo) -> Result<Arc<CrateSource>> {
if let Some(source) = self.cached(version) {
info!(
"Fetching crate source for {} v{} from cache",
version.krate, version.version
);
return Ok(source);
}
// fetch it
let source = version.fetch().await?;
let source = Arc::new(source);
// save back into cache
self.cache(source.clone());
Ok(source)
}
/// Store in cache
fn cache<T: Into<Arc<CrateSource>>>(&self, source: T) {
let mut lock = self.0.lock().unwrap();
let source: Arc<CrateSource> = source.into();
info!(
"Storing crate source {} v{} in cache",
source.version.krate, source.version.version
);
lock.insert(
(source.version.krate.clone(), source.version.version.clone()),
source,
);
}
/// Lookup in cache
pub fn cached(&self, version: &VersionInfo) -> Option<Arc<CrateSource>> {
// check if we have it cached
let lock = self.0.lock().unwrap();
lock.get(&(version.krate.clone(), version.version.clone()))
.cloned()
}
}
#[test]
fn test_crate_source_cache_missing() {
let cache = CrateSourceCache::new();
let version = VersionInfo {
checksum: "abc".into(),
dl_path: "/path".into(),
krate: "serde".into(),
version: "0.1.0".parse().unwrap(),
yanked: false,
};
assert!(cache.cached(&version).is_none());
}
#[test]
fn test_crate_source_cache_store() {
let cache = CrateSourceCache::new();
let version = VersionInfo {
checksum: "abc".into(),
dl_path: "/path".into(),
krate: "serde".into(),
version: "0.1.0".parse().unwrap(),
yanked: false,
};
assert!(cache.cached(&version).is_none());
let source = Arc::new(CrateSource {
version: version.clone(),
files: Default::default(),
});
cache.cache(source.clone());
assert_eq!(source, cache.cached(&version).unwrap());
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/components/diff_view.rs | src/components/diff_view.rs | use crate::{
data::{ChunkInfo, FileDiff, VersionDiff},
syntax::{highlight_changes, infer_syntax_for_file, syntect_style_to_css},
};
use bytes::Bytes;
use camino::Utf8PathBuf;
use log::*;
use similar::ChangeTag;
use std::rc::Rc;
use syntect::highlighting::Style;
use yew::prelude::*;
/// Contains information about contiguous changes
#[derive(PartialEq, Clone)]
struct DiffGroupInfo {
/// The actual changes
group: Vec<(ChangeTag, Vec<(Style, bytes::Bytes)>)>,
/// What range of lines the group covers (used as a Yew list key)
range: ChunkInfo,
/// Whether the group contains an actual diff (and therefore shows some context)
in_context: bool,
}
#[derive(Properties, PartialEq, Clone)]
pub struct DiffViewProps {
pub path: Utf8PathBuf,
pub diff: Rc<VersionDiff>,
}
#[derive(Clone, Copy, Debug, PartialEq, Default)]
pub enum DiffStyle {
#[default]
Unified,
Split,
}
#[function_component]
fn FileIcon() -> Html {
// from https://www.svgrepo.com/svg/491619/doc
html! {
<svg class="fill-gray-500 w-4" viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg">
<path fill-rule="evenodd" clip-rule="evenodd" d="M6 1C4.34314 1 3 2.34315 3 4V20C3 21.6569 4.34315 23 6 23H19C20.6569 23 22 21.6569 22 20V10C22 9.73478 21.8946 9.48043 21.7071 9.29289L13.7071 1.29292C13.6114 1.19722 13.4983 1.1229 13.3753 1.07308C13.2572 1.02527 13.1299 1 13 1H6ZM12 3H6C5.44771 3 5 3.44771 5 4V20C5 20.5523 5.44772 21 6 21H19C19.5523 21 20 20.5523 20 20V11H13C12.4477 11 12 10.5523 12 10V3ZM18.5858 9.00003L14 4.41424V9.00003H18.5858Z" />
</svg>
}
}
#[function_component]
pub fn DiffView(props: &DiffViewProps) -> Html {
let empty = FileDiff::default();
let file_diff = props.diff.files.get(&props.path).unwrap_or(&empty);
let summary = props.diff.summary.get(&props.path).unwrap_or(&(0, 0));
let is_identical_version = props.diff.left.version == props.diff.right.version;
// Determine which syntax should be used for this file. It will be based
// first on the file's name, then the file's extension, then the first line.
let syntax = infer_syntax_for_file(
props.path.as_str(),
file_diff
.changes
.iter()
.find(|(tag, _)| *tag != ChangeTag::Delete)
.and_then(|(_, line)| std::str::from_utf8(line).ok()),
);
info!("Highlighting {} as {}", syntax.name, props.path);
// Apply highlighting to every change in the file.
let mut changes = highlight_changes(syntax, &file_diff.changes).into_iter();
let ranges = file_diff.context_ranges.iter();
// Group contiguous lines by whether they contain an actual diff +/- some context buffer.
let mut cursor = 0;
let mut stack: Vec<DiffGroupInfo> = vec![];
for next_range in ranges {
// out of context lines
if next_range.start() != 0 {
stack.push(DiffGroupInfo {
group: changes.by_ref().take(next_range.start() - cursor).collect(),
range: ChunkInfo {
range: cursor..next_range.start(),
left_start: (next_range.left_start + cursor).saturating_sub(next_range.start()),
right_start: (next_range.right_start + cursor)
.saturating_sub(next_range.start()),
},
in_context: false,
});
}
// in context lines
stack.push(DiffGroupInfo {
group: changes
.by_ref()
.take(next_range.end() - next_range.start())
.collect(),
range: next_range.clone(),
in_context: true,
});
cursor = next_range.end();
}
if changes.len() > 0 {
// Trailing unchanged lines at the end of a file
stack.push(DiffGroupInfo {
group: changes.by_ref().collect(),
range: ChunkInfo {
range: cursor..file_diff.changes.len(),
left_start: (cursor).saturating_sub(file_diff.summary.added as usize),
right_start: (cursor).saturating_sub(file_diff.summary.removed as usize),
},
// When comparing a version of the crate to itself, this group will
// always contain the full text of the file. Don't collapse it.
in_context: is_identical_version,
});
}
html! {
<div class="diff-view">
<div class="header">
<FileIcon />
<span class="filename">{props.path.file_name().unwrap_or("")}</span>
</div>
<div class="content">
{
if summary == &(0,0) {
html! {<FileDisplayView {stack} />}
} else {
html! {<UnifiedDiffView {stack} />}
}
}
</div>
</div>
}
}
#[derive(Properties, PartialEq)]
pub struct AnyDiffViewProps {
stack: Vec<DiffGroupInfo>,
}
#[function_component]
pub fn UnifiedDiffView(props: &AnyDiffViewProps) -> Html {
let mut overall_index = 0;
html! {
<div class="overflow-x-scroll bg-white">
<div class="unified">
{
props.stack.iter()
.map(|DiffGroupInfo {group, range, in_context}| {
let res = html!{
<DiffLineGroup
key={format!("{:?}", range.range)}
group={group.clone()}
{in_context}
group_start_index={(overall_index, range.left_start, range.right_start)}
/>
};
overall_index += group.len();
res
})
.collect::<Html>()
}
</div>
</div>
}
}
#[function_component]
pub fn FileDisplayView(props: &AnyDiffViewProps) -> Html {
let mut overall_index = 0;
html! {
<div class="overflow-x-scroll bg-white">
<div class="unified">
{
props.stack.iter()
.map(|DiffGroupInfo {group, range, in_context: _}| {
let res = html!{
<FileView
key={format!("{:?}", range)}
group={group.iter().map(|(_, line)| line.clone()).collect::<Vec<_>>()}
group_start_index={overall_index}
/>
};
overall_index += group.len();
res
})
.collect::<Html>()
}
</div>
</div>
}
}
#[function_component]
pub fn SplitDiffView(props: &AnyDiffViewProps) -> Html {
let mut overall_index = 0;
html! {
<div class="p-2 overflow-x-scroll bg-white">
<pre class="bg-white">
{
props.stack.iter()
.map(|DiffGroupInfo {group, range, in_context}| {
let res = html!{
<DiffLineGroup
key={format!("{:?}", range)}
group={group.clone()}
{in_context}
group_start_index={(overall_index, range.left_start, range.right_start)}
/>
};
overall_index += group.len();
res
})
.collect::<Html>()
}
</pre>
</div>
}
}
#[function_component]
fn ExpandIcon() -> Html {
html! {
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" fill="currentColor" height="1em" width="1em" class="inline">
<path d="m8.177.677 2.896 2.896a.25.25 0 0 1-.177.427H8.75v1.25a.75.75 0 0 1-1.5 0V4H5.104a.25.25 0 0 1-.177-.427L7.823.677a.25.25 0 0 1 .354 0ZM7.25 10.75a.75.75 0 0 1 1.5 0V12h2.146a.25.25 0 0 1 .177.427l-2.896 2.896a.25.25 0 0 1-.354 0l-2.896-2.896A.25.25 0 0 1 5.104 12H7.25v-1.25Zm-5-2a.75.75 0 0 0 0-1.5h-.5a.75.75 0 0 0 0 1.5h.5ZM6 8a.75.75 0 0 1-.75.75h-.5a.75.75 0 0 1 0-1.5h.5A.75.75 0 0 1 6 8Zm2.25.75a.75.75 0 0 0 0-1.5h-.5a.75.75 0 0 0 0 1.5h.5ZM12 8a.75.75 0 0 1-.75.75h-.5a.75.75 0 0 1 0-1.5h.5A.75.75 0 0 1 12 8Zm2.25.75a.75.75 0 0 0 0-1.5h-.5a.75.75 0 0 0 0 1.5h.5Z"></path>
</svg>
}
}
#[derive(Properties, PartialEq)]
pub struct DiffLineGroupProps {
group: Vec<(ChangeTag, Vec<(Style, bytes::Bytes)>)>,
in_context: bool,
group_start_index: (usize, usize, usize),
}
#[derive(Properties, PartialEq)]
pub struct DisplayGroupProps {
group: Vec<Vec<(Style, bytes::Bytes)>>,
group_start_index: usize,
}
#[function_component]
pub fn DiffLineGroup(props: &DiffLineGroupProps) -> Html {
let folded = use_state(|| !props.in_context);
let onclick = {
let folded = folded.clone();
Callback::from(move |_| folded.set(!*folded))
};
// go from 0-indexed to 1-indexed
let start_index = (
props.group_start_index.0 + 1,
props.group_start_index.1 + 1,
props.group_start_index.2 + 1,
);
// use the fact that folded sections never contain changes
let end_index = (
start_index.0 + props.group.len() - 1,
start_index.1 + props.group.len() - 1,
start_index.2 + props.group.len() - 1,
);
if *folded {
html! {
<div class="expand">
<button class={classes!("button")} onclick={onclick.clone()}>
<ExpandIcon />
</button>
<button class={classes!("info")} {onclick}>
{
if start_index.1 == start_index.2 {
format!("Show lines {:?} to {:?}", start_index.1, end_index.1)
} else {
format!("Show lines {:?} to {:?}", (start_index.1,start_index.2), (end_index.1,end_index.2))
}
}
</button>
</div>
}
} else {
let (mut left_idx, mut right_idx) = (start_index.1, start_index.2);
html! {
<>
{
props.group.iter().map(|(tag, change)| {
let (sign, class, left, right) = match tag {
ChangeTag::Delete => ("-", "deletion", Some(left_idx), None),
ChangeTag::Insert => ("+", "insertion", None, Some(right_idx)),
ChangeTag::Equal => (" ", "unchanged", Some(left_idx), Some(right_idx)),
};
(left_idx, right_idx) = match tag {
ChangeTag::Delete => (left_idx + 1, right_idx),
ChangeTag::Insert => (left_idx, right_idx + 1),
ChangeTag::Equal => (left_idx + 1, right_idx + 1),
};
html! {
<div class={classes!("line", class)}>
<a id={left.map(|i| format!("L{i}"))} class="line-number">
if let Some(index) = left {
{index}
}
</a>
<a id={right.map(|i| format!("R{i}"))} class="line-number">
if let Some(index) = right {
{index}
}
</a>
<div class="change-icon">
{
format!("{sign}")
}
</div>
<div class="code-line">
<CodeLine stack={change.clone()} />
</div>
</div>
}
}).collect::<Html>()
}
</>
}
}
}
#[function_component]
pub fn FileView(props: &DisplayGroupProps) -> Html {
props
.group
.iter()
.enumerate()
.map(|(index, change)| {
html! {
<div class={classes!("line", "unchanged")}>
<div class={classes!("line-number", "file-view")}>
{
format!("{}", index+1+ props.group_start_index)
}
</div>
<div class="code-line">
<CodeLine stack={change.clone()} />
</div>
</div>
}
})
.collect::<Html>()
}
#[derive(Properties, PartialEq)]
pub struct CodeLineProps {
stack: Vec<(Style, Bytes)>,
}
#[function_component]
pub fn CodeLine(props: &CodeLineProps) -> Html {
props
.stack
.iter()
.map(|(style, text)| {
let style = syntect_style_to_css(style);
let contents = String::from_utf8_lossy(&text[..]);
html! {
<span style={style}>{contents}</span>
}
})
.collect::<Html>()
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/components/footer.rs | src/components/footer.rs | use yew::prelude::*;
#[function_component]
pub fn Footer() -> Html {
html! {
<div class="text-center py-4 text-gray-700 dark:text-gray-300">
<a href="https://github.com/xfbs/diff.rs">{"diff.rs"}</a>
{" build "}
<a class="font-mono" href={concat!("https://github.com/xfbs/diff.rs/commit/", env!("VERGEN_GIT_SHA"))}>{&env!("VERGEN_GIT_SHA")[0..8]}</a>
{", made with ❤️ by "}
<a href="https://github.com/xfbs">{"xfbs"}</a>
</div>
}
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/components/search.rs | src/components/search.rs | use crate::{
data::{CrateDetail, SearchResponse, SummaryCategory, SummaryResponse},
Link, Route,
};
use gloo_net::http::Request;
use implicit_clone::unsync::IString;
use web_sys::HtmlInputElement;
use yew::{
prelude::*,
suspense::{use_future, use_future_with},
};
use yew_hooks::prelude::*;
use yew_router::prelude::*;
#[function_component]
pub(super) fn SearchGlass() -> Html {
html! {
<svg class="w-4 h-4 text-gray-500 dark:text-gray-400" aria-hidden="true" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 20 20">
<path stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="m19 19-4-4m0-7A7 7 0 1 1 1 8a7 7 0 0 1 14 0Z"/>
</svg>
}
}
#[derive(Properties, PartialEq)]
pub struct SearchBarProps {
pub value: IString,
#[prop_or_default]
pub onchange: Callback<String>,
}
#[function_component]
pub fn SearchBar(props: &SearchBarProps) -> Html {
let oninput = {
let onchange = props.onchange.clone();
move |event: InputEvent| {
let target: HtmlInputElement = event.target_dyn_into().unwrap();
onchange.emit(target.value());
}
};
// prevent default action on form submission (page reload)
let onsubmit = |event: SubmitEvent| {
event.prevent_default();
};
// set focus
let input = use_node_ref();
use_effect_once({
let input = input.clone();
move || {
if let Some(input) = input.cast::<HtmlInputElement>() {
let _ = input.focus();
}
|| {}
}
});
html! {
<form class="max-w-xl mx-auto p-4" {onsubmit}>
<label for="default-search" class="mb-2 text-sm font-medium text-gray-900 sr-only dark:text-white">{"Search"}</label>
<div class="relative">
<div class="absolute inset-y-0 start-0 flex items-center ps-3 pointer-events-none">
<SearchGlass />
</div>
<input ref={input} type="search" id="default-search" class="block w-full p-4 ps-10 text-sm text-gray-900 border border-gray-300 rounded-lg bg-gray-50 focus:ring-blue-500 focus:border-blue-500 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500" placeholder="Search for crates" required=true value={props.value.clone()} {oninput} />
<button type="submit" class="text-white absolute end-2.5 bottom-2.5 bg-blue-700 hover:bg-blue-800 focus:ring-4 focus:outline-none focus:ring-blue-300 font-medium rounded-lg text-sm px-4 py-2 dark:bg-blue-600 dark:hover:bg-blue-700 dark:focus:ring-blue-800">{"Search"}</button>
</div>
</form>
}
}
#[function_component]
pub fn Search() -> Html {
let state = use_state(|| "".to_string());
let navigator = use_navigator().unwrap();
let oninput = {
let state = state.clone();
move |event: InputEvent| {
state.set(event.target_unchecked_into::<HtmlInputElement>().value());
}
};
let onkeydown = {
let state = state.clone();
move |event: KeyboardEvent| {
if event.key() == "Enter" {
navigator.push(&Route::Crate {
krate: state.to_string(),
});
}
}
};
html! {
<div class="relative w-full">
<div class="absolute inset-y-0 start-0 flex items-center ps-3 pointer-events-none">
<SearchGlass />
</div>
<input type="search" id="default-search" class="block w-full p-2 ps-10 text-sm text-gray-900 border border-gray-300 rounded-lg bg-gray-50 focus:ring-blue-500 focus:border-blue-500 dark:bg-gray-800 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500" placeholder="Jump to crate..." value={state.to_string()} {oninput} {onkeydown} />
</div>
}
}
#[derive(Properties, PartialEq)]
pub struct SearchResultsProps {
pub query: String,
}
#[function_component]
pub fn SearchResults(props: &SearchResultsProps) -> Html {
let fallback = html! {};
html! {
<Suspense {fallback}>
if props.query.is_empty() {
} else {
<SearchResultsLoader query={props.query.clone()} />
}
</Suspense>
}
}
#[function_component]
pub fn SearchResultsLoader(props: &SearchResultsProps) -> HtmlResult {
let info = use_future_with(props.query.clone(), |name| async move {
let response = Request::get("https://crates.io/api/v1/crates")
.query([("q", name.as_str())])
.build()?
.send()
.await?;
let text = response.json::<SearchResponse>().await?;
Ok(text) as anyhow::Result<SearchResponse>
})?;
let html = match &*info {
Ok(response) => html! {
<div class="flex flex-col gap-2 my-4">
{ for response.crates.iter().map(|c| html! {<Card details={c.clone()} /> }) }
</div>
},
Err(error) => html! {
<>
{"Error: "}
{format!("{error:?}")}
</>
},
};
Ok(html)
}
#[function_component]
fn GitIcon() -> Html {
html! {
<svg class="inline" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="1em" height="1em" fill="currentColor"><path d="M15 4.75a3.25 3.25 0 1 1 6.5 0 3.25 3.25 0 0 1-6.5 0ZM2.5 19.25a3.25 3.25 0 1 1 6.5 0 3.25 3.25 0 0 1-6.5 0Zm0-14.5a3.25 3.25 0 1 1 6.5 0 3.25 3.25 0 0 1-6.5 0ZM5.75 6.5a1.75 1.75 0 1 0-.001-3.501A1.75 1.75 0 0 0 5.75 6.5Zm0 14.5a1.75 1.75 0 1 0-.001-3.501A1.75 1.75 0 0 0 5.75 21Zm12.5-14.5a1.75 1.75 0 1 0-.001-3.501A1.75 1.75 0 0 0 18.25 6.5Z"></path><path d="M5.75 16.75A.75.75 0 0 1 5 16V8a.75.75 0 0 1 1.5 0v8a.75.75 0 0 1-.75.75Z"></path><path d="M17.5 8.75v-1H19v1a3.75 3.75 0 0 1-3.75 3.75h-7a1.75 1.75 0 0 0-1.75 1.75H5A3.25 3.25 0 0 1 8.25 11h7a2.25 2.25 0 0 0 2.25-2.25Z"></path></svg>
}
}
#[function_component]
fn DocsRsIcon() -> Html {
html! {
<svg class="inline" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" width="1em" height="1em" fill="currentColor">
<path d="M488.6 250.2L392 214V105.5c0-15-9.3-28.4-23.4-33.7l-100-37.5c-8.1-3.1-17.1-3.1-25.3 0l-100 37.5c-14.1 5.3-23.4 18.7-23.4 33.7V214l-96.6 36.2C9.3 255.5 0 268.9 0 283.9V394c0 13.6 7.7 26.1 19.9 32.2l100 50c10.1 5.1 22.1 5.1 32.2 0l103.9-52 103.9 52c10.1 5.1 22.1 5.1 32.2 0l100-50c12.2-6.1 19.9-18.6 19.9-32.2V283.9c0-15-9.3-28.4-23.4-33.7zM358 214.8l-85 31.9v-68.2l85-37v73.3zM154 104.1l102-38.2 102 38.2v.6l-102 41.4-102-41.4v-.6zm84 291.1l-85 42.5v-79.1l85-38.8v75.4zm0-112l-102 41.4-102-41.4v-.6l102-38.2 102 38.2v.6zm240 112l-85 42.5v-79.1l85-38.8v75.4zm0-112l-102 41.4-102-41.4v-.6l102-38.2 102 38.2v.6z"></path>
</svg>
}
}
#[derive(Properties, PartialEq)]
struct CardProps {
pub details: CrateDetail,
}
#[function_component]
fn Card(props: &CardProps) -> Html {
let link = Route::Crate {
krate: props.details.id.clone(),
};
html! {
<Link to={link} classes="card">
<div class="header">
<h3 class="name">{&props.details.id}</h3>
<span class="version">{props.details.max_version.to_string()}</span>
<span class="grow"></span>
<a class="icon" href={format!("https://docs.rs/{}/{}", &props.details.id, &props.details.max_version)}><DocsRsIcon /></a>
if let Some(url) = &props.details.repository {
<a class="icon" href={url.to_string()}><GitIcon /></a>
}
</div>
<p class="description">{&props.details.description}</p>
</Link>
}
}
#[function_component]
pub fn SummaryLoader(summary: &StaticResultPropNew) -> HtmlResult {
let info = use_future(|| async move {
let response = Request::get("https://crates.io/api/v1/summary")
.build()?
.send()
.await?;
let text = response.json::<SummaryResponse>().await?;
Ok(text) as anyhow::Result<SummaryResponse>
})?;
let html = match &*info {
Ok(response) => html! {
<section class="summary">
{
for summary.category.iter().copied().map(|category| html! {
<SummaryColumn {category} crates={response.get(category).clone()} />
})
}
</section>
},
Err(error) => html! {
<>
{"Error: "}
{format!("{error:?}")}
</>
},
};
Ok(html)
}
#[derive(Properties, PartialEq)]
pub struct SummaryColumnProps {
category: SummaryCategory,
crates: Vec<CrateDetail>,
}
#[function_component]
fn SummaryColumn(props: &SummaryColumnProps) -> Html {
if props.crates.is_empty() {
return html! {};
}
html! {
<div class="column">
<h2 class="title">
{ props.category.title() }
</h2>
<section class="results">
{ for props.crates.iter().cloned().map(|c| html! {<Card details={c} /> }) }
</section>
</div>
}
}
#[derive(Properties, PartialEq)]
pub struct StaticResultPropNew {
pub category: Vec<SummaryCategory>,
}
#[function_component]
pub fn DefaultSummarySection() -> Html {
let fallback = html! {
{"Loading"}
};
html! {
<Suspense {fallback}>
<SummaryLoader category={ vec![
SummaryCategory::MostRecent,
SummaryCategory::MostDownloaded,
SummaryCategory::JustUpdated,
]}/>
</Suspense>
}
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/components/navigation.rs | src/components/navigation.rs | use crate::{components::Search, data::CrateResponse, *};
use implicit_clone::unsync::IString;
use indexmap::IndexMap;
use semver::Version;
use std::sync::Arc;
use web_sys::HtmlSelectElement;
use yew::prelude::*;
use yew_icons::{Icon as YewIcon, IconId};
#[derive(Properties, PartialEq)]
pub struct NavbarProps {
pub children: Children,
}
#[function_component]
pub fn Navbar(props: &NavbarProps) -> Html {
html! {
<nav id="navbar" class="" aria-label="Main">
<div class="navbar-items">
{ for props.children.iter() }
</div>
</nav>
}
}
#[derive(Properties, PartialEq)]
pub struct NavbarGroupProps {
pub children: Children,
}
#[function_component]
pub fn NavbarGroup(props: &NavbarGroupProps) -> Html {
html! {
<div class="navbar-group">
{ for props.children.iter() }
</div>
}
}
#[derive(Properties, PartialEq)]
pub struct NavbarHeadingProps {
pub children: Children,
}
#[function_component]
pub fn NavbarHeading(props: &NavbarHeadingProps) -> Html {
html! {
<div class="text-xl font-bold text-nowrap flex flex-row items-center">
{ for props.children.iter() }
</div>
}
}
#[function_component]
pub fn NavbarItem(props: &NavbarHeadingProps) -> Html {
html! {
<div class="text-lg text-nowrapf flex flex-row items-center">
{ for props.children.iter() }
</div>
}
}
#[function_component]
pub fn SimpleNavbar() -> Html {
html! {
<Navbar>
<NavbarGroup>
<NavbarHeading>
<Link to={Route::Home} classes="flex flex-row items-center">
<YewIcon height={"1.5ex"} icon_id={IconId::LucideFileDiff} />
{ "diff.rs" }
</Link>
</NavbarHeading>
<NavbarItem>
<Link to={Route::About}>
{"About"}
</Link>
</NavbarItem>
</NavbarGroup>
</Navbar>
}
}
#[derive(Properties, PartialEq)]
pub struct SelectProps {
#[prop_or_default]
values: IndexMap<IString, IString>,
#[prop_or_default]
selected: Option<IString>,
#[prop_or_default]
onchange: Callback<IString>,
}
#[function_component]
pub fn Select(props: &SelectProps) -> Html {
let onchange = {
let onchange = props.onchange.clone();
move |event: Event| {
let target = event.target_dyn_into::<HtmlSelectElement>().unwrap();
let value = target.value();
onchange.emit(value.into());
}
};
html! {
<select class="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-1.5 dark:bg-gray-800 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500" {onchange}>
{
for props
.values
.iter()
.map(|(key, value)| {
let selected = props
.selected
.as_ref()
.map(|k| k == key)
.unwrap_or(false);
html! {
<option {selected} value={key}>{value}</option>
}
})
}
</select>
}
}
#[derive(Properties, PartialEq)]
pub struct ComplexNavbarProps {
pub src_name: String,
pub dst_name: String,
pub old: Version,
pub new: Version,
pub src_info: Arc<CrateResponse>,
pub dst_info: Arc<CrateResponse>,
#[prop_or_default]
pub onchange: Callback<((String, Version), (String, Version))>,
}
#[function_component]
fn SwitchIcon() -> Html {
html! {
<svg class="h-4 w-4" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="24" height="24">
<path fill="currentColor" d="M7.72 21.78a.75.75 0 0 0 1.06-1.06L5.56 17.5h14.69a.75.75 0 0 0 0-1.5H5.56l3.22-3.22a.75.75 0 1 0-1.06-1.06l-4.5 4.5a.75.75 0 0 0 0 1.06l4.5 4.5Zm8.56-9.5a.75.75 0 1 1-1.06-1.06L18.44 8H3.75a.75.75 0 0 1 0-1.5h14.69l-3.22-3.22a.75.75 0 0 1 1.06-1.06l4.5 4.5a.75.75 0 0 1 0 1.06l-4.5 4.5Z">
</path>
</svg>
}
}
#[function_component]
pub fn ComplexNavbar(props: &ComplexNavbarProps) -> Html {
let prepare_versions = |versions: &[crate::data::VersionInfo]| {
versions
.iter()
.map(|version| {
let num = IString::from(version.version.to_string());
if version.yanked {
(num.clone(), format!("{num} (yanked)").into())
} else {
(num.clone(), num.clone())
}
})
.collect()
};
let switched = use_state(|| false);
let (src_name, dst_name, old, new, src_info, dst_info) = if *switched {
(
&props.dst_name,
&props.src_name,
&props.new,
&props.old,
&props.dst_info,
&props.src_info,
)
} else {
(
&props.src_name,
&props.dst_name,
&props.old,
&props.new,
&props.src_info,
&props.dst_info,
)
};
let src_versions: IndexMap<IString, IString> = prepare_versions(&src_info.versions);
let dst_versions: IndexMap<IString, IString> = prepare_versions(&dst_info.versions);
let switch = {
let onchange = props.onchange.clone();
let new_switched = !*switched;
let versions = if new_switched {
(
(dst_name.clone(), new.clone()),
(src_name.clone(), old.clone()),
)
} else {
(
(src_name.clone(), old.clone()),
(dst_name.clone(), new.clone()),
)
};
Callback::from(move |_| {
switched.set(new_switched);
onchange.emit(versions.clone());
})
};
html! {
<Navbar>
<NavbarHeading>
<Link to={Route::Home} classes="flex flex-row items-center">
<YewIcon height={"1.5ex"} icon_id={IconId::LucideFileDiff} />
<span>{ "diff.rs" }</span>
</Link>
</NavbarHeading>
<div class="navbar-group lg:order-last grow min-w-[370px]">
<Search />
</div>
<div class="navbar-group grow">
<div class="navbar-group flex-nowrap">
<NavbarItem>
<a href={format!("https://crates.io/crates/{src_name}")} class="flex flex-row items-center">
<YewIcon height={"1.5ex"} icon_id={IconId::LucideBox} />
</a>
{ src_name.clone() }
</NavbarItem>
<NavbarItem>
<Select
values={src_versions.clone()}
selected={Some(old.to_string().into()) as Option<IString>}
onchange={
let onchange = props.onchange.clone();
let src_name = src_name.clone();
let dst_name = dst_name.clone();
let new = new.clone();
move |old: IString| {
let old: Version = old.parse().unwrap();
onchange.emit(((src_name.clone(), old.clone()), (dst_name.clone(), new.clone())))
}
}
/>
</NavbarItem>
</div>
<NavbarItem>
<span class="cursor-pointer hover:rotate-180 transition delay-150 duration-300 ease-in-out" onclick={switch}>
<SwitchIcon />
</span>
</NavbarItem>
<div class="navbar-group flex-nowrap">
<NavbarItem>
<a href={format!("https://crates.io/crates/{dst_name}")} class="flex flex-row items-center">
<YewIcon height={"1.5ex"} icon_id={IconId::LucideBox} />
</a>
{ dst_name.clone() }
</NavbarItem>
<NavbarItem>
<Select
values={dst_versions}
selected={Some(new.to_string().into()) as Option<IString>}
onchange={
let onchange = props.onchange.clone();
let src_name = src_name.clone();
let dst_name = dst_name.clone();
let old = old.clone();
move |new: IString| {
let new: Version = new.parse().unwrap();
onchange.emit(((src_name.clone(), old.clone()), (dst_name.clone(), new.clone())))
}
}
/>
</NavbarItem>
</div>
</div>
</Navbar>
}
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/components/layout.rs | src/components/layout.rs | use yew::prelude::*;
#[derive(Properties, PartialEq)]
pub struct CenterProps {
pub children: Children,
}
#[function_component]
pub fn Center(props: &CenterProps) -> Html {
html! {
<div style="position: absolute; top: 50%; width: 100%; text-align: center;">
{ for props.children.iter() }
</div>
}
}
#[derive(Properties, PartialEq)]
pub struct ContentProps {
#[prop_or_default]
pub children: Children,
}
#[function_component]
pub fn Content(props: &ContentProps) -> Html {
html! {
<div class="grow">
{ for props.children.iter() }
</div>
}
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/components/file_tree.rs | src/components/file_tree.rs | use crate::{
components::SearchGlass,
data::{Changes, Entry, Item, VersionDiff},
Link, Route, VersionId,
};
use camino::Utf8PathBuf;
use implicit_clone::unsync::IString;
use std::rc::Rc;
use web_sys::HtmlInputElement;
use yew::prelude::*;
#[derive(PartialEq, Clone, Debug)]
struct Context {
old_krate: String,
old_version: VersionId,
new_krate: String,
new_version: VersionId,
}
impl Context {
fn file_route(&self, path: Utf8PathBuf) -> Route {
Route::File {
old_krate: self.old_krate.clone(),
old_version: self.old_version.clone(),
new_krate: self.new_krate.clone(),
new_version: self.new_version.clone(),
path,
}
.simplify()
}
}
#[derive(Clone, Copy, Debug, PartialEq, Default)]
enum ChangeFilter {
#[default]
All,
Changed,
}
impl ChangeFilter {
fn is_all(&self) -> bool {
matches!(self, Self::All)
}
fn is_changed(&self) -> bool {
matches!(self, Self::Changed)
}
fn matches(&self, changes: Changes) -> bool {
match self {
Self::All => true,
Self::Changed => changes != Changes::default(),
}
}
}
#[derive(Clone, Debug, PartialEq, Default)]
enum SearchFilter {
#[default]
All,
Filter(IString),
}
#[allow(clippy::to_string_trait_impl)]
impl ToString for SearchFilter {
fn to_string(&self) -> String {
match self {
SearchFilter::All => "".to_string(),
SearchFilter::Filter(s) => s.to_string(),
}
}
}
impl SearchFilter {
fn matches(&self, name: &str) -> bool {
match self {
Self::All => true,
Self::Filter(s) => name.to_lowercase().contains(&s.to_lowercase()),
}
}
fn match_entry(&self, entry: &Entry) -> bool {
match &entry.item {
Item::File => self.matches(&entry.name),
Item::Dir(e) => self.matches(&entry.name) || e.iter().any(|(_, e)| self.match_entry(e)),
}
}
}
#[derive(Properties, PartialEq, Clone)]
pub struct FileTreeProps {
pub diff: Rc<VersionDiff>,
pub path: Utf8PathBuf,
}
#[derive(Properties, PartialEq, Clone)]
struct SubTreeProps {
pub context: Rc<Context>,
pub entry: Rc<Entry>,
#[prop_or_default]
pub active: Rc<Utf8PathBuf>,
#[prop_or_default]
pub prefix: Rc<Utf8PathBuf>,
#[prop_or_default]
pub change_filter: ChangeFilter,
#[prop_or_default]
pub search_filter: SearchFilter,
}
#[derive(Properties, PartialEq, Clone)]
struct FileEntryProps {
pub context: Rc<Context>,
pub entry: Rc<Entry>,
#[prop_or_default]
pub active: Rc<Utf8PathBuf>,
#[prop_or_default]
pub prefix: Rc<Utf8PathBuf>,
#[prop_or_default]
pub change_filter: ChangeFilter,
#[prop_or_default]
pub search_filter: SearchFilter,
}
#[derive(Debug, Properties, Clone, Copy, PartialEq)]
struct ExpandIconProps {
pub is_expanded: bool,
}
#[function_component]
fn FolderIcon() -> Html {
// from https://www.svgrepo.com/svg/491619/doc
html! {
<svg class="fill-blue-300 dark:fill-gray-600" viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg">
<path d="M4 2C3.20435 2 2.44129 2.31607 1.87868 2.87868C1.31607 3.44129 1 4.20435 1 5V19C1 19.7957 1.31607 20.5587 1.87868 21.1213C2.44129 21.6839 3.20435 22 4 22H20C20.7957 22 21.5587 21.6839 22.1213 21.1213C22.6839 20.5587 23 19.7957 23 19V8C23 7.20435 22.6839 6.44129 22.1213 5.87868C21.5587 5.31607 20.7957 5 20 5H11.5352L10.1289 2.8906C9.75799 2.3342 9.13352 2 8.46482 2H4Z" />
</svg>
}
}
#[function_component]
fn FileIcon() -> Html {
// from https://www.svgrepo.com/svg/491619/doc
html! {
<svg class="fill-gray-500" viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg">
<path fill-rule="evenodd" clip-rule="evenodd" d="M6 1C4.34314 1 3 2.34315 3 4V20C3 21.6569 4.34315 23 6 23H19C20.6569 23 22 21.6569 22 20V10C22 9.73478 21.8946 9.48043 21.7071 9.29289L13.7071 1.29292C13.6114 1.19722 13.4983 1.1229 13.3753 1.07308C13.2572 1.02527 13.1299 1 13 1H6ZM12 3H6C5.44771 3 5 3.44771 5 4V20C5 20.5523 5.44772 21 6 21H19C19.5523 21 20 20.5523 20 20V11H13C12.4477 11 12 10.5523 12 10V3ZM18.5858 9.00003L14 4.41424V9.00003H18.5858Z" />
</svg>
}
}
#[function_component]
fn ExpandIcon(props: &ExpandIconProps) -> Html {
html! {
<svg viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class={(props.is_expanded).then_some("rotate-90")}>
<path fill-rule="evenodd" clip-rule="evenodd" d="M8.79289 6.29289C9.18342 5.90237 9.81658 5.90237 10.2071 6.29289L15.2071 11.2929C15.5976 11.6834 15.5976 12.3166 15.2071 12.7071L10.2071 17.7071C9.81658 18.0976 9.18342 18.0976 8.79289 17.7071C8.40237 17.3166 8.40237 16.6834 8.79289 16.2929L13.0858 12L8.79289 7.70711C8.40237 7.31658 8.40237 6.68342 8.79289 6.29289Z" />
</svg>
}
}
#[function_component]
fn FileEntry(props: &FileEntryProps) -> Html {
let path = {
let mut path = (*props.prefix).clone();
path.push(&props.entry.name);
path
};
let expanded = use_state(|| false);
let current = path == *props.active;
let route = props.context.file_route(path.clone());
let toggle_expand = {
let expanded = expanded.clone();
Callback::from(move |event: MouseEvent| {
event.prevent_default();
expanded.set(!*expanded);
})
};
if !props.search_filter.match_entry(&props.entry) {
return html! { <></> };
}
html! {
<>
<Link to={route} classes={classes!("file-entry", current.then_some("active"))}>
<button class={classes!("toggle", (*expanded).then_some("active"))} onclick={toggle_expand}>
if props.entry.item.is_dir() {
<ExpandIcon is_expanded={*expanded} />
}
</button>
<div class="icon">
if props.entry.item.is_dir() {
<FolderIcon />
} else {
<FileIcon />
}
</div>
<div class="name">
{&props.entry.name}
</div>
<div class="tags">
if props.entry.changes.added > 0 {
<span class="tag added">{"+"}{props.entry.changes.added}</span>
}
if props.entry.changes.removed > 0 {
<span class="tag removed">{"-"}{props.entry.changes.removed}</span>
}
</div>
</Link>
if props.entry.item.is_dir() && *expanded {
<SubTree
entry={props.entry.clone()}
context={props.context.clone()}
prefix={props.prefix.clone()}
active={props.active.clone()}
change_filter={props.change_filter}
search_filter={props.search_filter.clone()}
/>
}
</>
}
}
#[function_component]
fn SubTree(props: &SubTreeProps) -> Html {
debug_assert!(props.entry.item.is_dir());
// build new prefix
let mut prefix = (*props.prefix).clone();
prefix.push(&props.entry.name);
let prefix = Rc::new(prefix);
let entries = match &props.entry.item {
Item::File => unreachable!(),
Item::Dir(entries) => entries,
};
html! {
<div class="file-subtree">
{
entries
.iter()
.filter(|(_, entry)| props.change_filter.matches(entry.changes))
.filter(|(_, entry)| props.search_filter.matches(&entry.name) || entry.item.is_dir())
.map(|(key, entry)| html! {
<FileEntry
key={key.to_string()}
context={props.context.clone()}
entry={entry.clone()}
prefix={prefix.clone()}
active={props.active.clone()}
change_filter={props.change_filter}
search_filter={props.search_filter.clone()}
/>
})
.collect::<Html>()
}
</div>
}
}
#[function_component]
pub fn FileTree(props: &FileTreeProps) -> Html {
let entries = match props.diff.tree.item.clone() {
Item::File => Default::default(),
Item::Dir(entries) => entries,
};
let change_filter = use_state(|| ChangeFilter::All);
let change_filter_set = |filter: ChangeFilter| {
let change_filter = change_filter.clone();
move |event: MouseEvent| {
change_filter.set(filter);
event.prevent_default();
}
};
let search_filter = use_state(|| SearchFilter::All);
let prefix = Rc::new(Utf8PathBuf::default());
let active = Rc::new(props.path.clone());
let context = Rc::new(Context {
old_krate: props.diff.left.version.krate.clone(),
old_version: props.diff.left.version.version.clone().into(),
new_krate: props.diff.right.version.krate.clone(),
new_version: props.diff.right.version.version.clone().into(),
});
html! {
<div class="file-tree">
<div class="header">
<FileSearch filter={search_filter.clone()} />
<div class="button-group" role="group">
<button
type="button"
class={classes!("first", change_filter.is_all().then_some("active"))}
onclick={change_filter_set(ChangeFilter::All)}>
{"all"}
</button>
<button
type="button"
class={classes!("last", change_filter.is_changed().then_some("active"))}
onclick={change_filter_set(ChangeFilter::Changed)}>
{"changed"}
</button>
</div>
</div>
{
entries
.into_iter()
.filter(|(_, entry)| change_filter.matches(entry.changes))
.filter(|(_, entry)| search_filter.matches(&entry.name) || entry.item.is_dir())
.map(|(key, entry)| html! {
<FileEntry
{key}
{entry}
prefix={prefix.clone()}
active={active.clone()}
context={context.clone()}
change_filter={*change_filter}
search_filter={(*search_filter).clone()}
/>
})
.collect::<Html>()
}
</div>
}
}
#[derive(Properties, PartialEq)]
pub struct FileSearchProps {
filter: UseStateHandle<SearchFilter>,
}
#[function_component]
fn FileSearch(props: &FileSearchProps) -> Html {
let oninput = {
let search_filter = props.filter.clone();
move |event: InputEvent| {
let value = event
.target_unchecked_into::<HtmlInputElement>()
.value()
.into();
search_filter.set(SearchFilter::Filter(value));
}
};
html! {
<div class="relative w-full">
<div class="absolute inset-y-0 start-0 flex items-center ps-3 pointer-events-none">
<SearchGlass />
</div>
<input type="search" class="block w-full p-1 ps-10 text-gray-900 border border-gray-300 rounded-lg bg-gray-50 focus:ring-blue-500 focus:border-blue-500 dark:bg-gray-800 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500" placeholder="Filter..." value={props.filter.to_string()} {oninput} />
</div>
}
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/components/non_ideal.rs | src/components/non_ideal.rs | use yew::prelude::*;
use yewprint::*;
#[derive(Properties, PartialEq, Clone)]
pub struct ErrorProps {
pub title: String,
pub status: String,
}
#[function_component]
pub fn Error(props: &ErrorProps) -> Html {
html! {
<div class="m-auto text-center dark:text-gray-100">
<div class="p-2" style="font-size: 48px; line-height: 48px;">
<Icon icon={Icon::Error} intent={Intent::Danger} size={48} />
</div>
<div class="">
<h4 class="font-bold p-2">{ &props.title }</h4>
<div>{ &props.status }</div>
</div>
</div>
}
}
#[derive(Properties, PartialEq, Clone)]
pub struct LoadingProps {
pub title: String,
pub status: String,
}
#[function_component]
pub fn Loading(props: &LoadingProps) -> Html {
html! {
<div class="m-auto text-center dark:text-gray-100">
<div class="p-2" style="font-size: 48px; line-height: 48px;">
<Spinner size={48.0} />
</div>
<div class="">
<h4 class="font-bold p-2">{ &props.title }</h4>
<div>{ &props.status }</div>
</div>
</div>
}
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/views/diff.rs | src/views/diff.rs | use crate::{cache::*, components::*, data::*, version::VersionId, Route};
use camino::Utf8PathBuf;
use semver::Version;
use std::sync::Arc;
use yew::{prelude::*, suspense::*};
use yew_router::prelude::*;
/// Props for which file to show.
#[derive(Properties, PartialEq, Clone)]
pub struct DiffProps {
pub src_name: String,
pub dst_name: String,
pub old: VersionId,
pub new: VersionId,
#[prop_or_default]
pub path: Option<Utf8PathBuf>,
}
/// Show diff of a file change between two crate versions.
#[function_component]
pub fn Diff(props: &DiffProps) -> Html {
let fallback = html! {
<>
<SimpleNavbar />
<Content>
<Center>
<Loading title={"Loading crate"} status={"Loading crate metadata"} />
</Center>
</Content>
</>
};
html! {
<Suspense {fallback}>
<CrateFetcher
src_name={props.src_name.clone()}
dst_name={props.dst_name.clone()}
old={props.old.clone()}
new={props.new.clone()}
path={props.path.clone()}
/>
</Suspense>
}
}
#[function_component]
fn CrateFetcher(props: &DiffProps) -> HtmlResult {
let info = use_future_with(
(props.src_name.clone(), props.dst_name.clone()),
|names| async move {
(
CRATE_RESPONSE_CACHE.fetch_cached(&names.0).await,
CRATE_RESPONSE_CACHE.fetch_cached(&names.1).await,
)
},
)?;
let errors = match &*info {
(Ok(src_info), Ok(dst_info)) => {
return Ok(html! {
<VersionResolver
{src_info}
{dst_info}
old={props.old.clone()}
new={props.new.clone()}
path={props.path.clone()}
/>
})
}
(Err(error), Ok(_)) => vec![(&props.src_name, error)],
(Ok(_), Err(error)) => vec![(&props.dst_name, error)],
(Err(src_error), Err(dst_error)) => {
vec![(&props.src_name, src_error), (&props.dst_name, dst_error)]
}
};
let errors = errors
.iter()
.map(|(name, error)| format!("{name} with {error}"))
.collect::<Vec<_>>()
.join(" and ");
Ok(html! {
<>
<SimpleNavbar />
<Content>
<Center>
<Error title={"Loading crate"} status={format!("Error: {errors}")} />
</Center>
</Content>
</>
})
}
#[derive(Properties, PartialEq, Clone)]
struct VersionResolverProps {
src_info: Arc<CrateResponse>,
dst_info: Arc<CrateResponse>,
old: VersionId,
new: VersionId,
path: Option<Utf8PathBuf>,
}
#[function_component]
fn VersionResolver(props: &VersionResolverProps) -> Html {
// find krate version info
let old = props.src_info.version(props.old.clone());
let new = props.dst_info.version(props.new.clone());
let errors = match (old, new) {
(Some(old), Some(new)) => {
return html! {
<SourceFetcher
src_info={props.src_info.clone()}
dst_info={props.dst_info.clone()}
old={old.clone()}
new={new.clone()}
path={props.path.clone()}
/>
}
}
// get invalid versions from props
(None, Some(_)) => vec![(&props.src_info, &props.old)],
(Some(_), None) => vec![(&props.dst_info, &props.new)],
(None, None) => vec![(&props.src_info, &props.old), (&props.dst_info, &props.new)],
};
let errors = errors
.iter()
.map(|(info, version)| format!("Error: version {version} of {} not found", info.krate.id))
.collect::<Vec<_>>()
.join(" and ");
html! {
<>
<SimpleNavbar />
<Content>
<Center>
<Error title={"Resolving version"} status={errors} />
</Center>
</Content>
</>
}
}
#[derive(Properties, PartialEq, Clone)]
struct SourceFetcherProps {
src_info: Arc<CrateResponse>,
dst_info: Arc<CrateResponse>,
old: VersionInfo,
new: VersionInfo,
path: Option<Utf8PathBuf>,
}
#[function_component]
fn SourceFetcher(props: &SourceFetcherProps) -> Html {
let fallback = html! {
<>
<ComplexNavbar
src_name={props.src_info.krate.id.clone()}
dst_name={props.dst_info.krate.id.clone()}
old={props.old.version.clone()}
new={props.new.version.clone()}
src_info={props.src_info.clone()}
dst_info={props.dst_info.clone()}
/>
<Center>
<Loading title={"Loading crate"} status={"Loading crate source"} />
</Center>
</>
};
html! {
<Suspense {fallback}>
<SourceFetcherInner
src_info={props.src_info.clone()}
dst_info={props.dst_info.clone()}
old={props.old.clone()}
new={props.new.clone()}
path={props.path.clone()}
/>
</Suspense>
}
}
#[function_component]
fn SourceFetcherInner(props: &SourceFetcherProps) -> HtmlResult {
// fetch old version source
let old = use_future_with(props.old.clone(), |version| async move {
CRATE_SOURCE_CACHE.fetch_cached(&version).await
})?;
// fetch new version source
let new = use_future_with(props.new.clone(), |version| async move {
CRATE_SOURCE_CACHE.fetch_cached(&version).await
})?;
let navigator = use_navigator().unwrap();
let (old, new) = match (&*old, &*new) {
(Ok(old), Ok(new)) => (old, new),
(Err(error), _) | (_, Err(error)) => {
return Ok(html! {
<>
<ComplexNavbar
src_name={props.src_info.krate.id.clone()}
dst_name={props.dst_info.krate.id.clone()}
old={props.old.version.clone()}
new={props.new.version.clone()}
src_info={props.src_info.clone()}
dst_info={props.dst_info.clone()}
onchange={
let path = props.path.clone();
move |((src_name, old), (dst_name, new)): ((String, Version), (String, Version))| {
navigator.push(&Route::File {
old_krate: src_name.clone(),
new_krate: dst_name.clone(),
old_version: old.clone().into(),
new_version: new.clone().into(),
path: path.clone().unwrap_or_default().into(),
});
}
}
/>
<Content>
<Center>
<Error title={"Loading crate"} status={format!("Error: {error}")} />
</Center>
</Content>
</>
})
}
};
dbg!(&props.path);
let path = match &props.path {
None => {
return Ok(html! {
<Redirect<Route> to={Route::File {
old_krate: props.src_info.krate.id.clone(),
new_krate: props.dst_info.krate.id.clone(),
old_version: props.old.version.clone().into(),
new_version: props.new.version.clone().into(),
path: "Cargo.toml".into(),
}} />
})
}
Some(path) => path.clone(),
};
Ok(html! {
<div class="">
<SourceView
src_info={props.src_info.clone()}
dst_info={props.dst_info.clone()}
{old}
{new}
{path}
/>
</div>
})
}
#[derive(Properties, PartialEq, Clone)]
pub struct SourceViewProps {
pub src_info: Arc<CrateResponse>,
pub dst_info: Arc<CrateResponse>,
pub old: Arc<CrateSource>,
pub new: Arc<CrateSource>,
pub path: Utf8PathBuf,
}
#[function_component]
pub fn SourceView(props: &SourceViewProps) -> Html {
let diff = use_memo((props.old.clone(), props.new.clone()), |(old, new)| {
VersionDiff::new(old.clone(), new.clone())
});
let navigator = use_navigator().unwrap();
html! {
<>
<ComplexNavbar
src_name={props.src_info.krate.id.clone()}
dst_name={props.dst_info.krate.id.clone()}
old={props.old.version.version.clone()}
new={props.new.version.version.clone()}
src_info={props.src_info.clone()}
dst_info={props.dst_info.clone()}
onchange={
let path = props.path.clone();
let navigator = navigator;
move |((src_name, old), (dst_name, new)): ((String, Version), (String, Version))| {
navigator.push(&Route::File {
old_krate: src_name.clone(),
new_krate: dst_name.clone(),
old_version: old.clone().into(),
new_version: new.clone().into(),
path: path.clone(),
});
}
}
/>
<Content>
<main class="flex flex-col md:flex-row gap-2 lg:gap-4 p-2">
<nav id="files" class="md:w-72 lg:w-84 xl:w-96" aria-label="Files">
<FileTree
diff={diff.clone()}
path={props.path.clone()}
/>
</nav>
<div id="diff-view" class="flex-1">
<DiffView {diff} path={props.path.clone()} />
</div>
</main>
</Content>
</>
}
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/views/repo.rs | src/views/repo.rs | use crate::{
cache::*,
components::*,
data::{CrateResponse, CrateSource, RepositoryInfo, VersionDiff, VersionInfo},
version::VersionId,
};
use camino::Utf8PathBuf;
use std::{rc::Rc, sync::Arc};
use yew::{prelude::*, suspense::*};
#[derive(Properties, PartialEq)]
pub struct RepoFileViewProps {
pub krate: String,
pub version: VersionId,
pub path: Utf8PathBuf,
}
#[function_component]
pub fn RepoFileView(props: &RepoFileViewProps) -> Html {
let fallback = html! {
<>
<SimpleNavbar />
<Content>
<Center>
<Loading title={"Loading crate"} status={"Loading crate metadata"} />
</Center>
</Content>
</>
};
html! {
<Suspense {fallback}>
<CrateFetcher
krate={props.krate.clone()}
version={props.version.clone()}
path={props.path.clone()}
/>
</Suspense>
}
}
#[function_component]
fn CrateFetcher(props: &RepoFileViewProps) -> HtmlResult {
let info = use_future_with(props.krate.clone(), |krate| async move {
CRATE_RESPONSE_CACHE.fetch_cached(&krate).await
})?;
let info = match &*info {
Ok(info) => info.clone(),
Err(error) => {
return Ok(html! {
<>
<SimpleNavbar />
<Content>
<Center>
<Error title={"Loading crate"} status={format!("Error: {error}")} />
</Center>
</Content>
</>
});
}
};
Ok(html! {
<VersionResolver
info={info.clone()}
version={props.version.clone()}
path={props.path.clone()}
/>
})
}
#[derive(Properties, PartialEq)]
pub struct VersionResolverProps {
info: Arc<CrateResponse>,
version: VersionId,
path: Utf8PathBuf,
}
#[function_component]
fn VersionResolver(props: &VersionResolverProps) -> Html {
let Some(version) = &props.info.version(props.version.clone()) else {
return html! {
{"version not found"}
};
};
html! {
<CrateSourceFetcher
info={props.info.clone()}
version={(*version).clone()}
path={props.path.clone()}
/>
}
}
#[derive(Properties, PartialEq)]
pub struct CrateSourceFetcherProps {
info: Arc<CrateResponse>,
version: VersionInfo,
path: Utf8PathBuf,
}
#[function_component]
fn CrateSourceFetcher(props: &CrateSourceFetcherProps) -> Html {
html! {
<Suspense fallback={html!{{"Loading"}}}>
<CrateSourceFetcherInner info={props.info.clone()} version={props.version.clone()} path={props.path.clone()} />
</Suspense>
}
}
#[function_component]
fn CrateSourceFetcherInner(props: &CrateSourceFetcherProps) -> HtmlResult {
let source = use_future_with(props.version.clone(), |version| async move {
CRATE_SOURCE_CACHE.fetch_cached(&version).await
})?;
let source = match &*source {
Ok(source) => source.clone(),
Err(_error) => {
return Ok(html! {
{"Error fetching source"}
});
}
};
Ok(html! {
<RepoSourceFetcher
info={props.info.clone()}
{source}
path={props.path.clone()}
/>
})
}
#[derive(Properties, PartialEq)]
pub struct RepoSourceFetcherProps {
info: Arc<CrateResponse>,
source: Arc<CrateSource>,
path: Utf8PathBuf,
}
#[function_component]
fn RepoSourceFetcher(props: &RepoSourceFetcherProps) -> Html {
let Some(repository) = props.info.krate.repository.clone() else {
return html! {
{"No repository set in crate metadata"}
};
};
let vcs_info = match props.source.cargo_vcs_info() {
Ok(info) => info,
Err(error) => {
return html! {
<>
{"error: "}{error.to_string()}
</>
};
}
};
let info = RepositoryInfo {
repository,
vcs_info,
};
let fallback = html! {
{"Loading repository archive"}
};
html! {
<Suspense {fallback}>
<RepoSourceFetcherInner
info={props.info.clone()}
repository={info}
source={props.source.clone()}
path={props.path.clone()}
/>
</Suspense>
}
}
#[derive(Properties, PartialEq)]
pub struct RepoSourceFetcherInnerProps {
info: Arc<CrateResponse>,
source: Arc<CrateSource>,
repository: RepositoryInfo,
path: Utf8PathBuf,
}
#[function_component]
fn RepoSourceFetcherInner(props: &RepoSourceFetcherInnerProps) -> HtmlResult {
let source = use_future_with(props.repository.clone(), |repository| async move {
repository.fetch().await.map(Arc::new)
})?;
let source = match &*source {
Ok(source) => source.clone(),
Err(error) => {
return Ok(html! {
{format!("Error repo: {error}")}
})
}
};
let diff = VersionDiff::new(source.clone(), props.source.clone());
let diff = Rc::new(diff);
Ok(html! {
<Content>
<main class="flex flex-col md:flex-row gap-2 lg:gap-4 p-2">
<nav id="files" class="md:w-72 lg:w-84 xl:w-96" aria-label="Files">
<FileTree
diff={diff.clone()}
path={props.path.clone()}
/>
</nav>
<div id="diff-view" class="flex-1">
<DiffView {diff} path={props.path.clone()} />
</div>
</main>
</Content>
})
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/views/search.rs | src/views/search.rs | use crate::{components::*, Route};
use yew::prelude::*;
use yew_hooks::prelude::*;
use yew_router::prelude::*;
#[derive(Properties, PartialEq)]
pub struct SearchProps {
pub search: String,
}
#[function_component]
fn Logo() -> Html {
html! {
<h1 class="text-center text-3xl font-bold my-12 dark:text-white">{ "diff.rs" }</h1>
}
}
/// Search view, shows search results.
#[function_component]
pub fn Search(props: &SearchProps) -> Html {
let state = use_debounce_state(String::new, 500);
state.set(props.search.clone());
let navigator = use_navigator().unwrap();
let onchange = move |input: String| {
if input.is_empty() {
navigator.push(&Route::Home);
} else {
navigator.push(&Route::Search { query: input });
}
};
html! {
<div class="flex flex-col min-h-screen">
<div class="flex-1">
<SimpleNavbar />
<Content>
<div class="max-w-3xl m-auto">
<Logo />
<SearchBar value={props.search.to_string()} {onchange} />
<div class="my-6">
<SearchResults query={state.to_string()} />
</div>
</div>
</Content>
</div>
<Footer />
</div>
}
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/views/home.rs | src/views/home.rs | use crate::{components::*, Route};
use yew::prelude::*;
use yew_router::prelude::*;
#[function_component]
fn Logo() -> Html {
html! {
<h1 class="text-center text-3xl font-bold my-12 dark:text-white">{ "diff.rs" }</h1>
}
}
/// Home page, shows search bar.
#[function_component]
pub fn Home() -> Html {
let navigator = use_navigator().unwrap();
let onchange = move |input: String| {
if !input.is_empty() {
navigator.push(&Route::Search { query: input });
}
};
html! {
<div class="flex flex-col min-h-screen">
<div class="flex-1">
<SimpleNavbar />
<Content>
<Logo />
<div class="max-w-3xl m-auto">
<SearchBar value={""} {onchange} />
</div>
<DefaultSummarySection />
</Content>
</div>
<Footer />
</div>
}
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/views/not_found.rs | src/views/not_found.rs | use crate::components::{Center, Content, Error, Footer, SimpleNavbar};
use yew::prelude::*;
/// Not found view, shows generic error.
#[function_component]
pub fn NotFound() -> Html {
html! {
<div class="flex flex-col min-h-screen">
<div class="flex-1">
<SimpleNavbar />
<Content>
<Center>
<Error title={"Not found"} status={"The URL was not found"} />
</Center>
</Content>
</div>
<Footer />
</div>
}
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
xfbs/diff.rs | https://github.com/xfbs/diff.rs/blob/1a21d67f3a6c0679927fe903df4ca6303c739e13/src/views/about.rs | src/views/about.rs | use crate::components::{Content, Footer, SimpleNavbar};
use yew::prelude::*;
const TEXT: &str = include_str!("about.md");
#[function_component]
fn AboutText() -> Html {
let html = comrak::markdown_to_html(TEXT, &Default::default());
let parsed = Html::from_html_unchecked(AttrValue::from(html));
html! {
<div class="prose prose-slate dark:prose-invert max-w-2xl m-auto p-4 pt-12">
{ parsed }
</div>
}
}
/// About page, showing background information on this project.
#[function_component]
pub fn About() -> Html {
html! {
<div class="flex flex-col min-h-screen">
<div class="flex-1">
<SimpleNavbar />
<Content>
<AboutText />
</Content>
</div>
<Footer />
</div>
}
}
| rust | MIT | 1a21d67f3a6c0679927fe903df4ca6303c739e13 | 2026-01-04T20:20:18.095653Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/src/lib.rs | tsproto/src/lib.rs | //! This library implements the TeamSpeak3 protocol.
//!
//! For a usable library to build clients and bots, you should take a look at
//! [`tsclientlib`](https://github.com/ReSpeak/tsclientlib), which provides a
//! convenient interface to this library.
//!
//! If you are searching for a usable client, [Qint](https://github.com/ReSpeak/Qint)
//! is a cross-platform TeamSpeak client, which is using this library (more
//! correctly, it is using `tsclientlib`).
//!
//! For more info on this project, take a look at the
//! [tsclientlib README](https://github.com/ReSpeak/tsclientlib).
use std::fmt;
use std::num::ParseIntError;
use base64::prelude::*;
use serde::de::{Unexpected, Visitor};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use thiserror::Error;
use tsproto_packets::packets;
use tsproto_types::crypto::EccKeyPrivP256;
pub mod algorithms;
pub mod client;
pub mod connection;
pub mod license;
pub mod log;
pub mod packet_codec;
pub mod resend;
pub mod utils;
use algorithms as algs;
// The build environment of tsproto.
git_testament::git_testament!(TESTAMENT);
#[doc(hidden)]
pub fn get_testament() -> &'static git_testament::GitTestament<'static> { &TESTAMENT }
type Result<T> = std::result::Result<T, Error>;
/// The maximum number of bytes for a fragmented packet.
///
/// The maximum packet size is 500 bytes, as used by
/// `algorithms::compress_and_split`.
/// We pick the ethernet MTU for possible future compatibility, it is unlikely
/// that a packet will get bigger.
pub const MAX_UDP_PACKET_LENGTH: usize = 1500;
/// The maximum number of bytes for a fragmented packet.
#[allow(clippy::unreadable_literal)]
const MAX_FRAGMENTS_LENGTH: usize = 40960;
/// The maximum number of packets which are stored, if they are received
/// out-of-order.
const MAX_QUEUE_LEN: u16 = 200;
/// The maximum decompressed size of a packet.
#[allow(clippy::unreadable_literal)]
/// On large servers with more than 2000 channels, the notifychannelsubscribed packet can be
/// 50 kB large (uncompressed). A maximum size to 2 MiB should allow for even larger servers.
const MAX_DECOMPRESSED_SIZE: u32 = 2 * 1024 * 1024;
const FAKE_KEY: [u8; 16] = *b"c:\\windows\\syste";
const FAKE_NONCE: [u8; 16] = *b"m\\firewall32.cpl";
/// The root key in the TeamSpeak license system.
pub const ROOT_KEY: [u8; 32] = [
0xcd, 0x0d, 0xe2, 0xae, 0xd4, 0x63, 0x45, 0x50, 0x9a, 0x7e, 0x3c, 0xfd, 0x8f, 0x68, 0xb3, 0xdc,
0x75, 0x55, 0xb2, 0x9d, 0xcc, 0xec, 0x73, 0xcd, 0x18, 0x75, 0x0f, 0x99, 0x38, 0x12, 0x40, 0x8a,
];
/// The maximum amount of ack pachets that a connection intermediately stores.
///
/// When this amount is stored, no new packets will be polled from the UDP
/// connection.
const UDP_SINK_CAPACITY: usize = 50;
#[derive(Error, Debug)]
#[non_exhaustive]
pub enum Error {
#[error("Failed to create ack packet: {0}")]
CreateAck(#[source] tsproto_packets::Error),
#[error("Failed to decompress packet: {0}")]
DecompressPacket(#[source] quicklz::Error),
#[error(transparent)]
IdentityCrypto(tsproto_types::crypto::Error),
#[error("Failed to parse int: {0}")]
InvalidHex(#[source] ParseIntError),
#[error("Maximum length exceeded for {0}")]
MaxLengthExceeded(&'static str),
#[error("Network error: {0}")]
Network(#[source] std::io::Error),
#[error("Packet {id} not in receive window [{next};{limit}) for type {p_type:?}")]
NotInReceiveWindow { id: u16, next: u16, limit: u16, p_type: packets::PacketType },
#[error("Failed to parse {0} packet: {1}")]
PacketParse(&'static str, #[source] tsproto_packets::Error),
#[error("Connection timed out: {0}")]
Timeout(&'static str),
#[error("Got unallowed unencrypted packet")]
UnallowedUnencryptedPacket,
#[error("Got unexpected init packet")]
UnexpectedInitPacket,
#[error("Packet has wrong client id {0}")]
WrongClientId(u16),
#[error("Received udp packet from wrong address")]
WrongAddress,
#[error("{p_type:?} Packet {generation_id}:{packet_id} has a wrong mac")]
WrongMac { p_type: packets::PacketType, generation_id: u32, packet_id: u16 },
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Identity {
#[serde(serialize_with = "serialize_id_key", deserialize_with = "deserialize_id_key")]
key: EccKeyPrivP256,
/// The `client_key_offset`/counter for hash cash.
counter: u64,
/// The maximum counter that was tried, this is greater or equal to
/// `counter` but may yield a lower level.
max_counter: u64,
}
struct IdKeyVisitor;
impl<'de> Visitor<'de> for IdKeyVisitor {
type Value = EccKeyPrivP256;
fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "a P256 private ecc key")
}
fn visit_str<E: serde::de::Error>(self, s: &str) -> std::result::Result<Self::Value, E> {
EccKeyPrivP256::import_str(s)
.map_err(|_| serde::de::Error::invalid_value(Unexpected::Str(s), &self))
}
}
fn serialize_id_key<S: Serializer>(
key: &EccKeyPrivP256, s: S,
) -> std::result::Result<S::Ok, S::Error> {
s.serialize_str(&BASE64_STANDARD.encode(key.to_short()))
}
fn deserialize_id_key<'de, D: Deserializer<'de>>(
d: D,
) -> std::result::Result<EccKeyPrivP256, D::Error> {
d.deserialize_str(IdKeyVisitor)
}
impl Identity {
#[inline]
pub fn create() -> Self {
let mut res = Self::new(EccKeyPrivP256::create(), 0);
res.upgrade_level(8);
res
}
#[inline]
pub fn new(key: EccKeyPrivP256, counter: u64) -> Self {
Self::new_with_max_counter(key, counter, counter)
}
#[inline]
pub fn new_with_max_counter(key: EccKeyPrivP256, counter: u64, max_counter: u64) -> Self {
Self { key, counter, max_counter }
}
#[inline]
pub fn new_from_str(key: &str) -> Result<Self> {
if let Ok(identity) = Identity::new_from_ts_str(key) {
return Ok(identity);
}
let mut res = Self::new(EccKeyPrivP256::import_str(key).map_err(Error::IdentityCrypto)?, 0);
res.upgrade_level(8);
Ok(res)
}
pub fn new_from_ts_str(key: &str) -> Result<Self> {
let counter_separator = key
.find('V')
.ok_or(Error::IdentityCrypto(tsproto_types::crypto::Error::NoCounterBlock))?;
let counter = key[..counter_separator]
.parse::<u64>()
.map_err(|_| Error::IdentityCrypto(tsproto_types::crypto::Error::NoCounterBlock))?;
let ecc_key = EccKeyPrivP256::import_str(&key[(counter_separator + 1)..])
.map_err(Error::IdentityCrypto)?;
Ok(Self::new(ecc_key, counter))
}
#[inline]
pub fn new_from_bytes(key: &[u8]) -> Result<Self> {
let mut res = Self::new(EccKeyPrivP256::import(key).map_err(Error::IdentityCrypto)?, 0);
res.upgrade_level(8);
Ok(res)
}
#[inline]
pub fn key(&self) -> &EccKeyPrivP256 { &self.key }
#[inline]
pub fn counter(&self) -> u64 { self.counter }
#[inline]
pub fn max_counter(&self) -> u64 { self.max_counter }
#[inline]
pub fn set_key(&mut self, key: EccKeyPrivP256) { self.key = key }
#[inline]
pub fn set_counter(&mut self, counter: u64) { self.counter = counter; }
#[inline]
pub fn set_max_counter(&mut self, max_counter: u64) { self.max_counter = max_counter; }
/// Compute the current hash cash level.
#[inline]
pub fn level(&self) -> u8 {
let omega = self.key.to_pub().to_ts();
algs::get_hash_cash_level(&omega, self.counter)
}
/// Compute a better hash cash level.
pub fn upgrade_level(&mut self, target: u8) {
let omega = self.key.to_pub().to_ts();
let mut offset = self.max_counter;
while offset < u64::MAX && algs::get_hash_cash_level(&omega, offset) < target {
offset += 1;
}
self.counter = offset;
self.max_counter = offset;
}
}
#[cfg(test)]
mod identity_tests {
use super::*;
const TEST_PRIV_KEY: &str =
"MG8DAgeAAgEgAiEA6rtKxDn/o/Bo50rNtAE5Ph3h2RKLHQ0gbFkvm2yA79kCIQCrfzAZts/\
vHP+3MOetKLjNnpZXt4c6U3UB4gWLKR4H9AIgYTyJofmztcTBjq3KZcDdxu+G4RPVwE5vg8VaN2jbQao=";
const TEST_UID: &str = "test/9PZ9vww/Bpf5vJxtJhpz80=";
#[test]
fn parse_ts_base64() {
let identity = Identity::new_from_str(TEST_PRIV_KEY).unwrap();
let uid = identity.key().to_pub().get_uid();
assert_eq!(TEST_UID, &uid);
}
#[test]
fn parse_ts_base64_with_offset() {
let ident_str = String::from("2792354V") + TEST_PRIV_KEY;
let identity = Identity::new_from_str(&ident_str).unwrap();
let uid = identity.key().to_pub().get_uid();
assert_eq!(TEST_UID, &uid);
assert_eq!(identity.level(), 21u8);
}
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/src/log.rs | tsproto/src/log.rs | use std::fmt::Debug;
use std::str;
use tracing::{debug, debug_span, Span};
use tsproto_packets::packets::{InUdpPacket, OutUdpPacket, PacketType};
use tsproto_packets::HexSlice;
use crate::connection::{Connection, Event};
fn prepare_span(is_client: bool, incoming: bool) -> Span {
let dir = if incoming {
if !cfg!(windows) { "\x1b[1;32mIN\x1b[0m" } else { "IN" }
} else if !cfg!(windows) {
"\x1b[1;31mOUT\x1b[0m"
} else {
"OUT"
};
let to = if is_client { "S" } else { "C" };
debug_span!("packet", to, %dir)
}
pub fn log_udp_packet(is_client: bool, incoming: bool, packet: &InUdpPacket) {
let _span = prepare_span(is_client, incoming).entered();
debug!(header = ?packet.0.header(), content = %HexSlice(packet.0.content()), "UdpPacket");
}
pub fn log_out_udp_packet(is_client: bool, incoming: bool, packet: &OutUdpPacket) {
let _span = prepare_span(is_client, incoming).entered();
debug!(
generation = packet.generation_id(),
header = ?packet.data().header(),
content = %HexSlice(packet.data().content()),
"UdpPacket"
);
}
pub fn log_packet<P: Debug>(is_client: bool, incoming: bool, packet: &P) {
// packet.header.c_id is not set for newly created packets so we cannot
// detect if a packet is incoming or not.
let _span = prepare_span(is_client, incoming).entered();
debug!(content = ?packet, "Packet");
}
pub fn log_command(is_client: bool, incoming: bool, p_type: PacketType, cmd: &str) {
// packet.header.c_id is not set for newly created packets so we cannot
// detect if a packet is incoming or not.
let _span = prepare_span(is_client, incoming).entered();
if p_type == PacketType::Command {
debug!(content = cmd, "Command");
} else {
debug!(content = cmd, "CommandLow");
}
}
/// Print the content of all packets
///
/// 0 - Print commands
/// 1 - Print packets
/// 2 - Print udp packets
pub fn add_logger_with_verbosity(verbosity: u8, con: &mut Connection) {
let log_commands = verbosity > 0;
let log_packets = verbosity > 1;
let log_udp_packets = verbosity > 2;
add_logger(log_commands, log_packets, log_udp_packets, con);
}
pub fn add_logger(
log_commands: bool, log_packets: bool, log_udp_packets: bool, con: &mut Connection,
) {
if log_commands || log_packets || log_udp_packets {
let is_client = con.is_client;
let listener = Box::new(move |event: &Event| match event {
Event::ReceiveUdpPacket(packet) => {
if log_udp_packets {
log_udp_packet(is_client, true, packet);
}
}
Event::ReceivePacket(packet) => {
if log_packets {
log_packet(is_client, true, packet);
} else if log_commands {
let p_type = packet.header().packet_type();
if p_type.is_command() {
if let Ok(s) = str::from_utf8(packet.content()) {
log_command(is_client, true, p_type, s);
}
}
}
}
Event::SendUdpPacket(packet) => {
if log_udp_packets {
log_out_udp_packet(is_client, false, packet);
}
}
Event::SendPacket(packet) => {
if log_packets {
log_packet(is_client, false, &packet.packet());
} else if log_commands {
let p_type = packet.header().packet_type();
if p_type.is_command() {
if let Ok(s) = str::from_utf8(packet.content()) {
log_command(is_client, false, p_type, s);
}
}
}
}
});
con.event_listeners.push(listener);
}
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/src/connection.rs | tsproto/src/connection.rs | use std::collections::VecDeque;
use std::io;
use std::mem;
use std::net::SocketAddr;
use std::pin::Pin;
use std::task::{Context, Poll};
use futures::prelude::*;
use generic_array::typenum::consts::U16;
use generic_array::GenericArray;
use num_traits::ToPrimitive;
use tokio::io::ReadBuf;
use tokio::net::UdpSocket;
use tracing::{info_span, Span};
use tsproto_packets::packets::*;
use tsproto_types::crypto::EccKeyPubP256;
use crate::packet_codec::PacketCodec;
use crate::resend::{PacketId, PartialPacketId, Resender, ResenderState};
use crate::{Error, Result, MAX_UDP_PACKET_LENGTH, UDP_SINK_CAPACITY};
/// The needed functions, this can be used to abstract from the underlying
/// transport and allows simulation.
pub trait Socket {
fn poll_recv_from(&self, cx: &mut Context, buf: &mut ReadBuf) -> Poll<io::Result<SocketAddr>>;
fn poll_send_to(
&self, cx: &mut Context, buf: &[u8], target: SocketAddr,
) -> Poll<io::Result<usize>>;
fn local_addr(&self) -> io::Result<SocketAddr>;
}
/// A cache for the key and nonce for a generation id.
/// This has to be stored for each packet type.
#[derive(Debug)]
pub struct CachedKey {
pub generation_id: u32,
pub key: GenericArray<u8, U16>,
pub nonce: GenericArray<u8, U16>,
}
/// Data that has to be stored for a connection when it is connected.
pub struct ConnectedParams {
/// The client id of this connection.
pub c_id: u16,
/// If voice packets should be encrypted
pub voice_encryption: bool,
/// The public key of the other side.
pub public_key: EccKeyPubP256,
/// The iv used to encrypt and decrypt packets.
pub shared_iv: [u8; 64],
/// The mac used for unencrypted packets.
pub shared_mac: [u8; 8],
/// Cached key and nonce per packet type and for server to client (without
/// client id inside the packet) and client to server communication.
pub key_cache: [[CachedKey; 2]; 8],
}
/// An event that originates from a tsproto raw connection.
#[derive(Debug)]
pub enum Event<'a> {
ReceiveUdpPacket(&'a InUdpPacket<'a>),
ReceivePacket(&'a InPacket<'a>),
SendUdpPacket(&'a OutUdpPacket),
SendPacket(&'a OutPacket),
}
/// An item that originates from a tsproto raw event stream.
///
/// The disconnected event is signaled by returning `None` from the stream.
#[derive(Debug)]
pub enum StreamItem {
Command(InCommandBuf),
Audio(InAudioBuf),
C2SInit(InC2SInitBuf),
S2CInit(InS2CInitBuf),
/// All packets with an id less or equal to this id were acknowledged.
AckPacket(PacketId),
/// The network statistics were updated.
NetworkStatsUpdated,
Error(Error),
}
type EventListener = Box<dyn for<'a> Fn(&'a Event<'a>) + Send>;
/// Represents a currently alive connection.
pub struct Connection {
pub is_client: bool,
pub span: Span,
/// The parameters of this connection, if it is already established.
pub params: Option<ConnectedParams>,
/// The address of the other side, where packets are coming from and going
/// to.
pub address: SocketAddr,
pub resender: Resender,
pub codec: PacketCodec,
pub udp_socket: Box<dyn Socket + Send>,
udp_buffer: Vec<u8>,
/// A buffer of packets that should be returned from the stream.
///
/// If a new udp packet is received and we already received the following
/// ids, we can get multiple packets back at once. As we can only return one
/// from the stream, the rest is stored here.
pub(crate) stream_items: VecDeque<StreamItem>,
/// The queue of non-command packets that should be sent.
///
/// These packets are not influenced by congestion control.
/// If it gets too long, we don't poll from the `udp_socket` anymore.
acks_to_send: VecDeque<OutUdpPacket>,
pub event_listeners: Vec<EventListener>,
}
impl Socket for UdpSocket {
fn poll_recv_from(&self, cx: &mut Context, buf: &mut ReadBuf) -> Poll<io::Result<SocketAddr>> {
self.poll_recv_from(cx, buf)
}
fn poll_send_to(
&self, cx: &mut Context, buf: &[u8], target: SocketAddr,
) -> Poll<io::Result<usize>> {
self.poll_send_to(cx, buf, target)
}
fn local_addr(&self) -> io::Result<SocketAddr> { self.local_addr() }
}
impl Default for CachedKey {
fn default() -> Self {
CachedKey { generation_id: u32::MAX, key: [0; 16].into(), nonce: [0; 16].into() }
}
}
impl ConnectedParams {
/// Fills the parameters for a connection with their default state.
pub fn new(public_key: EccKeyPubP256, shared_iv: [u8; 64], shared_mac: [u8; 8]) -> Self {
Self {
c_id: 0,
voice_encryption: true,
public_key,
shared_iv,
shared_mac,
key_cache: Default::default(),
}
}
}
impl Connection {
pub fn new(is_client: bool, address: SocketAddr, udp_socket: Box<dyn Socket + Send>) -> Self {
let span = info_span!("connection", local_addr = %udp_socket.local_addr().unwrap(),
remote_addr = %address);
let mut res = Self {
is_client,
span,
params: None,
address,
resender: Default::default(),
codec: Default::default(),
udp_socket,
udp_buffer: Default::default(),
stream_items: Default::default(),
acks_to_send: Default::default(),
event_listeners: Default::default(),
};
if is_client {
// The first command is sent as part of the C2SInit::Init4 packet
// so it does not get registered automatically.
res.codec.outgoing_p_ids[PacketType::Command.to_usize().unwrap()] =
PartialPacketId { generation_id: 0, packet_id: 1 };
} else {
res.codec.incoming_p_ids[PacketType::Command.to_usize().unwrap()] =
PartialPacketId { generation_id: 0, packet_id: 1 };
}
res
}
/// Check if a given id is in the receive window.
///
/// Returns
/// 1. If the packet id is inside the receive window
/// 1. The generation of the packet
/// 1. The minimum accepted packet id
/// 1. The maximum accepted packet id
pub(crate) fn in_receive_window(&self, p_type: PacketType, p_id: u16) -> (bool, u32, u16, u16) {
if p_type == PacketType::Init {
return (true, 0, 0, 0);
}
let type_i = p_type.to_usize().unwrap();
// Receive window is the next half of ids
let cur_next = self.codec.incoming_p_ids[type_i].packet_id;
let (limit, next_gen) = cur_next.overflowing_add(u16::MAX / 2);
let gen = self.codec.incoming_p_ids[type_i].generation_id;
let in_recv_win = (!next_gen && p_id >= cur_next && p_id < limit)
|| (next_gen && (p_id >= cur_next || p_id < limit));
let gen_id = if in_recv_win {
if next_gen && p_id < limit { gen + 1 } else { gen }
} else if p_id < cur_next {
gen
} else {
gen - 1
};
(in_recv_win, gen_id, cur_next, limit)
}
pub fn send_event(&self, event: &Event) {
for l in &self.event_listeners {
l(event)
}
}
pub fn hand_back_buffer(&mut self, buffer: Vec<u8>) {
if self.udp_buffer.capacity() < MAX_UDP_PACKET_LENGTH
&& buffer.capacity() >= MAX_UDP_PACKET_LENGTH
{
self.udp_buffer = buffer;
}
}
fn poll_send_acks(&mut self, cx: &mut Context) -> Result<()> {
// Poll acks_to_send
while let Some(packet) = self.acks_to_send.front() {
match self.poll_send_udp_packet(cx, packet) {
Poll::Ready(Ok(())) => {
self.resender.handle_loss_outgoing(packet);
}
Poll::Ready(Err(e)) => return Err(e),
Poll::Pending => break,
}
self.acks_to_send.pop_front();
}
Ok(())
}
fn poll_incoming_udp_packet(&mut self, cx: &mut Context) -> Poll<Result<StreamItem>> {
if self.acks_to_send.len() >= UDP_SINK_CAPACITY {
return Poll::Pending;
}
loop {
// Poll udp_socket
if self.udp_buffer.len() != MAX_UDP_PACKET_LENGTH {
self.udp_buffer.resize(MAX_UDP_PACKET_LENGTH, 0);
}
let mut read_buf = ReadBuf::new(&mut self.udp_buffer);
match self.udp_socket.poll_recv_from(cx, &mut read_buf) {
Poll::Ready(Ok(addr)) => {
let size = read_buf.filled().len();
let mut udp_buffer = mem::take(&mut self.udp_buffer);
udp_buffer.truncate(size);
match self.handle_udp_packet(cx, udp_buffer, addr) {
Ok(()) => {
if let Some(item) = self.stream_items.pop_front() {
return Poll::Ready(Ok(item));
}
}
Err(e) => {
return Poll::Ready(Err(e));
}
}
}
// Udp socket closed
Poll::Ready(Err(e)) => return Poll::Ready(Err(Error::Network(e))),
Poll::Pending => return Poll::Pending,
}
}
}
fn handle_udp_packet(
&mut self, cx: &mut Context, udp_buffer: Vec<u8>, addr: SocketAddr,
) -> Result<()> {
let _span = self.span.clone().entered();
if addr != self.address {
self.stream_items.push_back(StreamItem::Error(Error::WrongAddress));
return Ok(());
}
let dir = if self.is_client { Direction::S2C } else { Direction::C2S };
let packet = InUdpPacket(match InPacket::try_new(dir, &udp_buffer) {
Ok(r) => r,
Err(e) => {
self.stream_items.push_back(StreamItem::Error(Error::PacketParse("udp", e)));
return Ok(());
}
});
let event = Event::ReceiveUdpPacket(&packet);
self.send_event(&event);
self.resender.received_packet();
PacketCodec::handle_udp_packet(self, cx, udp_buffer)?;
Ok(())
}
/// Try to send an ack packet.
///
/// If it does not work, add it to the ack queue.
pub(crate) fn send_ack_packet(&mut self, cx: &mut Context, packet: OutPacket) -> Result<()> {
self.send_event(&Event::SendPacket(&packet));
let mut udp_packets = PacketCodec::encode_packet(self, packet)?;
assert_eq!(
udp_packets.len(),
1,
"Encoding an ack packet should only yield a single packet"
);
let packet = udp_packets.pop().unwrap();
match self.poll_send_udp_packet(cx, &packet) {
Poll::Ready(r) => {
if r.is_ok() {
self.resender.handle_loss_outgoing(&packet);
}
r
}
Poll::Pending => {
self.acks_to_send.push_back(packet);
Ok(())
}
}
}
/// Add a packet to the send queue.
///
/// This function buffers indefinitely, to prevent using a large amount of
/// memory, check `is_send_queue_full` first and only send a packet if this
/// function returns `false`.
///
/// When the `PacketId` which is returned by this function is acknowledged,
/// the packet was successfully received by the other side of the
/// connection.
pub fn send_packet(&mut self, packet: OutPacket) -> Result<PacketId> {
self.send_event(&Event::SendPacket(&packet));
let udp_packets = PacketCodec::encode_packet(self, packet)?;
let id = udp_packets.last().unwrap().into();
for p in udp_packets {
self.send_udp_packet(p);
}
Ok(id)
}
/// Add an udp packet to the send queue.
pub fn send_udp_packet(&mut self, packet: OutUdpPacket) {
let _span = self.span.clone().entered();
match packet.packet_type() {
PacketType::Init | PacketType::Command | PacketType::CommandLow => {
Resender::send_packet(self, packet);
}
_ => self.acks_to_send.push_back(packet),
}
}
pub fn poll_send_udp_packet(
&self, cx: &mut Context, packet: &OutUdpPacket,
) -> Poll<Result<()>> {
Self::static_poll_send_udp_packet(
&*self.udp_socket,
self.address,
&self.event_listeners,
cx,
packet,
)
}
/// Remember to add the size of the sent packet to the stats in the resender.
pub fn static_poll_send_udp_packet(
udp_socket: &dyn Socket, address: SocketAddr, event_listeners: &[EventListener],
cx: &mut Context, packet: &OutUdpPacket,
) -> Poll<Result<()>> {
let data = packet.data().data();
match udp_socket.poll_send_to(cx, data, address).map_err(Error::Network)? {
Poll::Pending => Poll::Pending,
Poll::Ready(size) => {
let event = Event::SendUdpPacket(packet);
for l in event_listeners {
l(&event)
}
if size != data.len() {
Poll::Ready(Err(Error::Network(std::io::Error::new(
std::io::ErrorKind::Other,
"Failed to send whole udp packet",
))))
} else {
Poll::Ready(Ok(()))
}
}
}
}
pub fn is_send_queue_full(&self) -> bool { self.resender.is_full() }
pub fn is_send_queue_empty(&self) -> bool { self.resender.is_empty() }
}
/// Pull for events.
///
/// `Ok(StreamItem::Error)` is recoverable, `Err()` is not.
///
/// Polling does a few things in round robin fashion:
/// 1. Check for new udp packets
/// 2. Use the resender to resend packets if necessary
/// 3. Use the resender to send ping packets if necessary
impl Stream for Connection {
type Item = Result<StreamItem>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
let _span = self.span.clone().entered();
if let Err(e) = self.poll_send_acks(cx) {
return Poll::Ready(Some(Err(e)));
}
if self.resender.get_state() == ResenderState::Disconnected {
// Send all ack packets and return `None` afterwards
if self.acks_to_send.is_empty() {
return Poll::Ready(None);
}
}
// Use the resender to resend packes
match Resender::poll_resend(&mut self, cx) {
Ok(()) => {}
Err(e) => return Poll::Ready(Some(Err(e))),
}
// Use the resender to send pings
match Resender::poll_ping(&mut self, cx) {
Ok(()) => {}
Err(e) => return Poll::Ready(Some(Err(e))),
}
// Return existing stream_items
if let Some(item) = self.stream_items.pop_front() {
return Poll::Ready(Some(Ok(item)));
}
// Check for new udp packets
match self.poll_incoming_udp_packet(cx) {
Poll::Ready(r) => return Poll::Ready(Some(r)),
Poll::Pending => {}
}
Poll::Pending
}
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/src/client.rs | tsproto/src/client.rs | use std::net::SocketAddr;
use std::ops::{Deref, DerefMut};
use std::pin::Pin;
use std::str;
use std::task::{Context, Poll};
use base64::prelude::*;
use futures::prelude::*;
#[cfg(not(feature = "rug"))]
use num_bigint::BigUint;
#[cfg(not(feature = "rug"))]
use num_traits::One;
use rand::Rng;
#[cfg(feature = "rug")]
use rug::integer::Order;
#[cfg(feature = "rug")]
use rug::Integer;
use thiserror::Error;
use time::OffsetDateTime;
use tracing::{info, warn};
use tsproto_packets::commands::{CommandItem, CommandParser};
use tsproto_packets::packets::*;
use tsproto_types::crypto::{EccKeyPrivEd25519, EccKeyPrivP256, EccKeyPubEd25519, EccKeyPubP256};
use crate::algorithms as algs;
use crate::connection::{ConnectedParams, Connection, Socket, StreamItem};
use crate::license::Licenses;
use crate::resend::{PacketId, ResenderState};
type Result<T> = std::result::Result<T, Error>;
#[derive(Error, Debug)]
#[non_exhaustive]
pub enum Error {
#[error("Connection ended unexpectedly")]
ConnectionEnd,
#[error("Got initserver, but we have not yet a full connection")]
EarlyInitserver,
#[error("Cannot parse base64 argument {0}: {1}")]
InvalidBase64Arg(&'static str, #[source] base64::DecodeError),
#[error("Invalid packet: Incorrect beta length in initivexpand2 {0} != 54")]
InvalidBetaLength(usize),
#[error("Got invalid client id {0:?}: {1}")]
InvalidClientId(Vec<u8>, #[source] tsproto_packets::Error),
#[error("Invalid packet: initivexpand2 command has wrong arguments")]
InvalidInitivexpand2,
#[error("Invalid packet: Cannot parse omega as key: {0}")]
InvalidOmegaKey(#[source] tsproto_types::crypto::Error),
#[error("Invalid packet: Cannot parse omega as string: {0}")]
InvalidOmegaString(#[source] tsproto_packets::Error),
#[error("Invalid packet: {0}")]
InvalidPacket(&'static str),
#[error("Invalid packet: Got root for initivexpand2, but length {0} != 32")]
InvalidRootKeyLength(usize),
#[error("The server license signature is invalid: {0}")]
InvalidSignature(#[source] tsproto_types::crypto::Error),
#[error("Invalid packet: Got multiple {0} in one packet")]
MultipleCommands(&'static str),
#[error("Got initserver without accepted client id")]
NoClientId,
#[error("Got no ot=1, the server is probably outdated")]
OutdatedServer,
#[error("Failed to parse license: {0}")]
ParseLicense(#[source] crate::license::Error),
#[error("Failed to solve RSA puzzle")]
RsaPuzzle,
#[error("Requested RSA puzzle level {0} is too high")]
RsaPuzzleTooHighLevel(u32),
#[error(transparent)]
TsProto(#[from] crate::Error),
#[error("Expected {0} but got {1}")]
UnexpectedPacket(&'static str, String),
}
pub struct Client {
con: Connection,
pub private_key: EccKeyPrivP256,
}
impl Client {
pub fn new(
address: SocketAddr, udp_socket: Box<dyn Socket + Send>, private_key: EccKeyPrivP256,
) -> Self {
Self { con: Connection::new(true, address, udp_socket), private_key }
}
async fn get_init(&mut self, init_steps: &[u8]) -> Result<InS2CInitBuf> {
self.filter_items(|con, i| {
Ok(match i {
StreamItem::S2CInit(packet) => {
if init_steps.contains(&packet.data().data().get_step()) {
Some(packet)
} else {
// Resent packet
warn!(parent: &con.span, "Got wrong init packet");
con.hand_back_buffer(packet.into_buffer());
None
}
}
StreamItem::C2SInit(packet) => {
warn!(parent: &con.span, "Got init packet from the wrong direction");
con.hand_back_buffer(packet.into_buffer());
None
}
StreamItem::Error(error) => {
warn!(parent: &con.span, %error, "Got connection error");
None
}
StreamItem::AckPacket(_) => None,
StreamItem::NetworkStatsUpdated => None,
i => {
warn!(parent: &con.span, got = ?i, "Unexpected packet, wanted S2CInit");
None
}
})
})
.await
}
async fn get_command(&mut self) -> Result<InCommandBuf> {
self.filter_items(|con, i| {
Ok(match i {
StreamItem::S2CInit(packet) => {
con.hand_back_buffer(packet.into_buffer());
None
}
StreamItem::C2SInit(packet) => {
con.hand_back_buffer(packet.into_buffer());
None
}
StreamItem::Command(packet) => Some(packet),
StreamItem::Error(error) => {
warn!(parent: &con.span, %error, "Got connection error");
None
}
StreamItem::AckPacket(_) => None,
StreamItem::NetworkStatsUpdated => None,
i => {
warn!(parent: &con.span, got = ?i, "Unexpected packet, wanted Command");
None
}
})
})
.await
}
/// Drop all packets until the given packet is acknowledged.
pub async fn wait_for_ack(&mut self, id: PacketId) -> Result<()> {
self.filter_items(|con, i| {
Ok(match i {
StreamItem::S2CInit(packet) => {
con.hand_back_buffer(packet.into_buffer());
None
}
StreamItem::C2SInit(packet) => {
con.hand_back_buffer(packet.into_buffer());
None
}
StreamItem::Command(packet) => {
con.hand_back_buffer(packet.into_buffer());
None
}
StreamItem::Error(error) => {
warn!(parent: &con.span, %error, "Got connection error");
None
}
StreamItem::AckPacket(ack) => {
if id <= ack {
Some(())
} else {
None
}
}
StreamItem::NetworkStatsUpdated => None,
i => {
warn!(parent: &con.span, got = ?i, "Unexpected packet, wanted Ack");
None
}
})
})
.await
}
/// Drop all packets until the send queue is not full anymore.
pub async fn wait_until_can_send(&mut self) -> Result<()> {
if !self.is_send_queue_full() {
return Ok(());
}
self.filter_items(|con, i| {
Ok(match i {
StreamItem::S2CInit(packet) => {
con.hand_back_buffer(packet.into_buffer());
None
}
StreamItem::C2SInit(packet) => {
con.hand_back_buffer(packet.into_buffer());
None
}
StreamItem::Command(packet) => {
con.hand_back_buffer(packet.into_buffer());
None
}
StreamItem::Error(error) => {
warn!(parent: &con.span, %error, "Got connection error");
None
}
StreamItem::AckPacket(_) => {
if !con.is_send_queue_full() {
Some(())
} else {
None
}
}
StreamItem::NetworkStatsUpdated => None,
i => {
warn!(parent: &con.span, got = ?i, "Unexpected packet, wanted Ack");
None
}
})
})
.await
}
/// Send a packet. The `send_packet` function will resolve to a future when
/// the packet has been sent.
///
/// If the packet has an acknowledgement (ack or pong), the returned future
/// will resolve when it is received. Otherwise it will resolve instantly.
pub fn send_packet(&mut self, packet: OutPacket) -> Result<PacketId> {
let _span = self.con.span.clone().entered();
if packet.header().packet_type() == PacketType::Command
&& packet.content().starts_with(b"clientdisconnect")
{
let this = &mut **self;
this.resender.set_state(ResenderState::Disconnecting);
}
Ok(self.con.send_packet(packet)?)
}
pub async fn connect(&mut self) -> Result<()> {
// Send the first init packet
// Get the current timestamp
let now = OffsetDateTime::now_utc();
let timestamp = now.unix_timestamp() as u32;
// TeamSpeak offsets the timestamp for the version by some years
let version = timestamp - 1356998400;
// Random bytes
let random0 = rand::thread_rng().gen::<[u8; 4]>();
let alpha;
loop {
// Wait for Init1
{
self.send_packet(OutC2SInit0::new(version, timestamp, random0))?;
let init1 = self.get_init(&[1]).await?;
match init1.data().data() {
S2CInitData::Init1 { random1, random0_r } => {
// Check the response
// Most of the time, random0_r is the reversed random0, but
// sometimes it isn't so do not check it.
// random0.as_ref().iter().rev().eq(random0_r.as_ref())
// The packet is correct.
// Send next init packet
self.send_packet(OutC2SInit2::new(version, random1, **random0_r))?;
}
_ => {
return Err(Error::UnexpectedPacket(
"Init1",
format!("{:?}", init1.data().packet().header().packet_type()),
));
}
}
}
// Wait for Init3
{
let init3 = self.get_init(&[3, 127]).await?;
if init3.data().data().get_step() == 127 {
continue;
}
match init3.data().data() {
S2CInitData::Init3 { x, n, level, random2 } => {
let level = *level;
// Solve RSA puzzle: y = x ^ (2 ^ level) % n
// Use Montgomery Reduction
if level > 10_000_000 {
// Reject too high exponents
return Err(Error::RsaPuzzleTooHighLevel(level));
}
// Create clientinitiv
alpha = rand::thread_rng().gen::<[u8; 10]>();
// omega is an ASN.1-DER encoded public key from
// the ECDH parameters.
let ip = self.con.address.ip();
let x = **x;
let n = **n;
let random2 = **random2;
// Use gmp for faster computations if it is
// available.
#[cfg(feature = "rug")]
let y = {
let mut e = Integer::new();
let n = Integer::from_digits(&n[..], Order::Msf);
let x = Integer::from_digits(&x[..], Order::Msf);
e.set_bit(level, true);
let y = x.pow_mod(&e, &n).map_err(|_| Error::RsaPuzzle)?;
let mut yi = [0; 64];
y.write_digits(&mut yi, Order::Msf);
yi
};
#[cfg(not(feature = "rug"))]
let y = {
let xi = BigUint::from_bytes_be(&x);
let ni = BigUint::from_bytes_be(&n);
let e = BigUint::one() << level as usize;
let yi = xi.modpow(&e, &ni);
info!(parent: &self.con.span, level, x = %xi, n = %ni, y = %yi,
"Solve RSA puzzle");
algs::biguint_to_array(&yi)
};
// Create the command string
// omega is an ASN.1-DER encoded public key from
// the ECDH parameters.
let omega = self.private_key.to_pub().to_tomcrypt();
let ip = if crate::utils::is_global_ip(&ip) {
ip.to_string()
} else {
String::new()
};
// Send next init packet
self.send_packet(OutC2SInit4::new(
version, &x, &n, level, &random2, &y, &alpha, &omega, &ip,
))?;
}
_ => {
return Err(Error::UnexpectedPacket(
"Init3",
format!("{:?}", init3.data().packet().header().packet_type()),
));
}
}
}
break;
}
let clientek_id;
{
let command = self.get_command().await?;
let (name, args) = CommandParser::new(command.data().packet().content());
if name != b"initivexpand2" {
return Err(Error::UnexpectedPacket(
"initivexpand2",
format!("{:?}", str::from_utf8(name)),
));
}
let mut l = None;
let mut beta_vec = None;
let mut server_key = None;
let mut proof = None;
let mut ot = false;
let mut root = None;
for item in args {
match item {
CommandItem::NextCommand => {
return Err(Error::MultipleCommands("initivexpand2"));
}
CommandItem::Argument(arg) => match arg.name() {
b"l" => {
l = Some(
BASE64_STANDARD
.decode(arg.value().get())
.map_err(|e| Error::InvalidBase64Arg("proof", e))?,
)
}
b"beta" => {
beta_vec = Some(
BASE64_STANDARD
.decode(arg.value().get())
.map_err(|e| Error::InvalidBase64Arg("proof", e))?,
)
}
b"omega" => {
server_key = Some(
EccKeyPubP256::from_ts(
&arg.value().get_str().map_err(Error::InvalidOmegaString)?,
)
.map_err(Error::InvalidOmegaKey)?,
)
}
b"proof" => {
proof = Some(
BASE64_STANDARD
.decode(arg.value().get())
.map_err(|e| Error::InvalidBase64Arg("proof", e))?,
)
}
b"ot" => ot = arg.value().get_raw() == b"1",
b"root" => {
let data = BASE64_STANDARD
.decode(arg.value().get())
.map_err(|e| Error::InvalidBase64Arg("root", e))?;
let mut data2 = [0; 32];
if data.len() != 32 {
return Err(Error::InvalidRootKeyLength(data.len()));
}
data2.copy_from_slice(&data);
root = Some(EccKeyPubEd25519::from_bytes(data2));
}
_ => {}
},
}
}
if !ot {
return Err(Error::OutdatedServer);
}
if l.is_none() || beta_vec.is_none() || server_key.is_none() || proof.is_none() {
return Err(Error::InvalidInitivexpand2);
}
let l = l.unwrap();
let beta_vec = beta_vec.unwrap();
let server_key = server_key.unwrap();
let proof = proof.unwrap();
let root = root.unwrap_or_else(|| EccKeyPubEd25519::from_bytes(crate::ROOT_KEY));
// Check signature of l (proof)
server_key.verify(&l, &proof).map_err(Error::InvalidSignature)?;
if beta_vec.len() != 54 {
return Err(Error::InvalidBetaLength(beta_vec.len()));
}
let mut beta = [0; 54];
beta.copy_from_slice(&beta_vec);
// Parse license argument
let licenses = Licenses::parse(l).map_err(Error::ParseLicense)?;
// Ephemeral key of server
let server_ek = licenses.derive_public_key(root).map_err(Error::ParseLicense)?;
// Create own ephemeral key
let ek = EccKeyPrivEd25519::create();
let (iv, mac) = algs::compute_iv_mac(&alpha, &beta, &ek, &server_ek);
self.con.params = Some(ConnectedParams::new(server_key, iv, mac));
// Send clientek
let ek_pub = ek.to_pub();
let ek_s = BASE64_STANDARD.encode(ek_pub.0.as_bytes());
// Proof: ECDSA signature of ek || beta
let mut all = Vec::with_capacity(32 + 54);
all.extend_from_slice(ek_pub.0.as_bytes());
all.extend_from_slice(&beta);
let proof = self.private_key.clone().sign(&all);
let proof_s = BASE64_STANDARD.encode(&proof);
// Send clientek
let mut cmd =
OutCommand::new(Direction::C2S, Flags::empty(), PacketType::Command, "clientek");
cmd.write_arg("ek", &ek_s);
cmd.write_arg("proof", &proof_s);
clientek_id = self.send_packet(cmd.into_packet())?;
}
self.wait_for_ack(clientek_id).await?;
Ok(())
}
/// Filter the incoming items.
pub async fn filter_items<T, F: Fn(&mut Client, StreamItem) -> Result<Option<T>>>(
&mut self, filter: F,
) -> Result<T> {
loop {
let item = self.next().await;
match item {
None => return Err(Error::ConnectionEnd),
Some(r) => {
if let Some(r) = filter(self, r?)? {
return Ok(r);
}
}
}
}
}
/// Filter the incoming items. Drops audio packets.
pub async fn filter_commands<T, F: Fn(&mut Client, InCommandBuf) -> Result<Option<T>>>(
&mut self, filter: F,
) -> Result<T> {
loop {
let item = self.next().await;
match item {
None => return Err(Error::ConnectionEnd),
Some(Err(e)) => return Err(e),
Some(Ok(StreamItem::Error(error))) => {
warn!(%error, "Got connection error");
}
Some(Ok(StreamItem::AckPacket(_))) => {}
Some(Ok(StreamItem::S2CInit(packet))) => {
self.hand_back_buffer(packet.into_buffer());
}
Some(Ok(StreamItem::C2SInit(packet))) => {
self.hand_back_buffer(packet.into_buffer());
}
Some(Ok(StreamItem::Audio(packet))) => {
self.hand_back_buffer(packet.into_buffer());
}
Some(Ok(StreamItem::Command(packet))) => {
if let Some(r) = filter(self, packet)? {
return Ok(r);
}
}
Some(Ok(StreamItem::NetworkStatsUpdated)) => {}
}
}
}
pub async fn wait_disconnect(&mut self) -> Result<()> {
loop {
let item = self.next().await;
match item {
None => return Ok(()),
Some(Err(e)) => return Err(e),
Some(Ok(StreamItem::AckPacket(_))) => {}
Some(Ok(StreamItem::Error(error))) => {
warn!(%error, "Got connection error");
}
Some(Ok(StreamItem::S2CInit(packet))) => {
self.hand_back_buffer(packet.into_buffer());
}
Some(Ok(StreamItem::C2SInit(packet))) => {
self.hand_back_buffer(packet.into_buffer());
}
Some(Ok(StreamItem::Audio(packet))) => {
self.hand_back_buffer(packet.into_buffer());
}
Some(Ok(StreamItem::Command(packet))) => {
self.hand_back_buffer(packet.into_buffer());
}
Some(Ok(StreamItem::NetworkStatsUpdated)) => {}
}
}
}
fn handle_command(&mut self, command: InCommandBuf) -> Result<Option<InCommandBuf>> {
let _span = self.con.span.clone().entered();
let (name, args) = CommandParser::new(command.data().packet().content());
if name == b"initserver" {
// Handle an initserver
if let Some(params) = &mut self.params {
let mut c_id = None;
for item in args {
match item {
CommandItem::NextCommand => {
return Err(Error::MultipleCommands("initserver"));
}
CommandItem::Argument(arg) => {
if arg.name() == b"aclid" {
c_id = Some(
arg.value()
.get_parse::<tsproto_packets::Error, u16>()
.map_err(|e| {
Error::InvalidClientId(
arg.value().get_raw().to_vec(),
e,
)
})?,
);
break;
}
}
}
}
if let Some(c_id) = c_id {
params.c_id = c_id;
} else {
return Err(Error::NoClientId);
}
} else {
return Err(Error::EarlyInitserver);
}
// Notify the resender that we are connected
let this = &mut **self;
this.resender.set_state(ResenderState::Connected);
} else if name == b"notifyclientleftview" {
// Handle disconnect
if let Some(params) = &mut self.params {
let mut own_client = false;
for item in args {
match item {
CommandItem::NextCommand => {}
CommandItem::Argument(arg) => {
if arg.name() == b"clid" {
let c_id = arg
.value()
.get_parse::<tsproto_packets::Error, u16>()
.map_err(|e| {
Error::InvalidClientId(arg.value().get_raw().to_vec(), e)
})?;
own_client |= c_id == params.c_id;
}
}
}
}
if own_client {
// We are disconnected
let this = &mut **self;
this.resender.set_state(ResenderState::Disconnected);
}
}
} else if name == b"notifyplugincmd" {
let mut is_getversion = false;
let mut is_getversion_request = false;
let mut sender: Option<u16> = None;
for item in args.chain(std::iter::once(CommandItem::NextCommand)) {
match item {
CommandItem::Argument(arg) => match arg.name() {
b"name" => is_getversion = arg.value().get_raw() == b"getversion",
b"data" => is_getversion_request = arg.value().get_raw() == b"request",
b"invokerid" => {
if let Ok(id) = arg.value().get_parse::<tsproto_packets::Error, _>() {
sender = Some(id);
}
}
_ => {}
},
CommandItem::NextCommand => {
if let Some(sender) = sender {
if is_getversion && is_getversion_request {
let mut version = format!(
"{} {}",
env!("CARGO_PKG_NAME"),
git_testament::render_testament!(crate::TESTAMENT),
);
#[cfg(debug_assertions)]
version.push_str(" (Debug)");
#[cfg(not(debug_assertions))]
version.push_str(" (Release)");
let mut cmd = OutCommand::new(
Direction::C2S,
Flags::empty(),
PacketType::Command,
"plugincmd",
);
cmd.write_arg("name", &"getversion");
cmd.write_arg("data", &version);
cmd.write_arg("targetmode", &2);
cmd.write_arg("target", &sender);
self.send_packet(cmd.into_packet())?;
}
}
}
}
}
}
Ok(Some(command))
}
}
impl Deref for Client {
type Target = Connection;
fn deref(&self) -> &Self::Target { &self.con }
}
impl DerefMut for Client {
fn deref_mut(&mut self) -> &mut Self::Target { &mut self.con }
}
/// Return queued errors and inspect packets.
impl Stream for Client {
type Item = Result<StreamItem>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
loop {
match Pin::new(&mut self.con).poll_next(cx) {
Poll::Ready(Some(Ok(StreamItem::Command(command)))) => {
match self.handle_command(command) {
Err(e) => return Poll::Ready(Some(Err(e))),
Ok(Some(cmd)) => {
return Poll::Ready(Some(Ok(StreamItem::Command(cmd))));
}
Ok(None) => {}
}
}
Poll::Ready(Some(Err(e))) => return Poll::Ready(Some(Err(e.into()))),
Poll::Ready(Some(Ok(r))) => return Poll::Ready(Some(Ok(r))),
Poll::Ready(None) => return Poll::Ready(None),
Poll::Pending => return Poll::Pending,
}
}
}
}
#[cfg(test)]
mod tests {
use std::cell::Cell;
use std::collections::VecDeque;
use std::io;
use std::sync::{Arc, Mutex};
use std::task::Waker;
use anyhow::{bail, Result};
use num_traits::ToPrimitive;
use once_cell::sync::Lazy;
use tokio::io::ReadBuf;
use tokio::sync::oneshot;
use tokio::time::{self, Duration};
use super::*;
use crate::connection::Event;
use crate::resend::PartialPacketId;
static TRACING: Lazy<()> = Lazy::new(|| tracing_subscriber::fmt().with_test_writer().init());
#[derive(Clone, Debug)]
struct SimulatedSocketState {
buffer: [VecDeque<Vec<u8>>; 2],
wakers: [Option<Waker>; 2],
}
/// Simulate a connection
#[derive(Clone, Debug)]
struct SimulatedSocket {
state: Arc<Mutex<SimulatedSocketState>>,
/// Index into the wakers list.
i: usize,
addr: SocketAddr,
}
#[derive(Clone, Debug)]
struct SimulatedMixSocketState {
/// The order in which packets are sent to the inner socket.
///
/// The default (if the list is empty) is `0, 1, 2, 3, 4, ...`.
/// To send the third packet first use `2, 0, 1, 3, 4, ...`.
order: Vec<usize>,
/// Queued packets that are waiting to be sent to the inner socket
send_buffer: Vec<Vec<u8>>,
}
/// Simulate a connection but mix the packet order
#[derive(Debug)]
struct SimulatedMixSocket {
inner: SimulatedSocket,
state: Mutex<SimulatedMixSocketState>,
}
impl SimulatedSocketState {
fn new() -> Self { Self { buffer: Default::default(), wakers: Default::default() } }
}
impl SimulatedSocket {
fn new(state: Arc<Mutex<SimulatedSocketState>>, i: usize, addr: SocketAddr) -> Self {
Self { state, i, addr }
}
/// Create a pair of simulated sockets which are connected with each
/// other.
fn pair(addr0: SocketAddr, addr1: SocketAddr) -> (SimulatedSocket, SimulatedSocket) {
let state = Arc::new(Mutex::new(SimulatedSocketState::new()));
// Switch addresses as we use them as address from receiving packets
(Self::new(state.clone(), 0, addr1), Self::new(state, 1, addr0))
}
}
impl Socket for SimulatedSocket {
fn poll_recv_from(
&self, cx: &mut Context, buf: &mut ReadBuf,
) -> Poll<io::Result<SocketAddr>> {
let mut state = self.state.lock().unwrap();
if let Some(packet) = state.buffer[self.i].pop_front() {
let len = std::cmp::min(buf.remaining(), packet.len());
buf.put_slice(&packet[..len]);
info!(packet = ?&packet[..len], "{} receives packet", self.i);
Poll::Ready(Ok(self.addr))
} else {
state.wakers[self.i] = Some(cx.waker().clone());
Poll::Pending
}
}
fn poll_send_to(
&self, _: &mut Context, buf: &[u8], _: SocketAddr,
) -> Poll<io::Result<usize>> {
let mut state = self.state.lock().unwrap();
info!(packet = ?buf, "{} sends packet", self.i);
state.buffer[1 - self.i].push_back(buf.to_vec());
if let Some(waker) = state.wakers[1 - self.i].take() {
waker.wake();
}
Poll::Ready(Ok(buf.len()))
}
fn local_addr(&self) -> io::Result<SocketAddr> { Ok(self.addr) }
}
impl SimulatedMixSocket {
fn new(inner: SimulatedSocket, order: Vec<usize>) -> Self {
let mut check_order = order.clone();
check_order.sort();
assert!(check_order.is_empty() || check_order.windows(2).all(|win| win[0] != win[1]));
Self {
inner,
state: Mutex::new(SimulatedMixSocketState {
order,
send_buffer: Default::default(),
}),
}
}
fn pair(
addr0: SocketAddr, addr1: SocketAddr, order0: Vec<usize>, order1: Vec<usize>,
) -> (SimulatedMixSocket, SimulatedMixSocket) {
let (inner0, inner1) = SimulatedSocket::pair(addr0, addr1);
(Self::new(inner0, order0), Self::new(inner1, order1))
}
}
impl Socket for SimulatedMixSocket {
fn poll_recv_from(
&self, cx: &mut Context, buf: &mut ReadBuf,
) -> Poll<io::Result<SocketAddr>> {
self.inner.poll_recv_from(cx, buf)
}
fn poll_send_to(
&self, cx: &mut Context, buf: &[u8], target: SocketAddr,
) -> Poll<io::Result<usize>> {
let mut state = self.state.lock().unwrap();
state.send_buffer.push(buf.to_vec());
// Send packets from send_buffer which are ready
while !state.order.is_empty() {
let next = state.order[0];
if state.send_buffer.len() <= next {
break;
}
state.order.remove(0);
info!("Sending packet {}", next);
let packet = state.send_buffer.remove(next);
let send_res = self.inner.poll_send_to(cx, &packet, target);
let Poll::Ready(Ok(len)) = send_res else {
panic!("Unexpected send result {:?}", send_res)
};
assert_eq!(len, packet.len());
// Shift all following packet ids
// After sending packet 2
// 2, 0, 1, 3, 4
// becomes
// 0, 1, 2, 3
for order in &mut state.order {
if *order > next {
*order -= 1;
}
}
}
if state.order.is_empty() {
for packet in std::mem::take(&mut state.send_buffer) {
// Send all packets in order
info!("Sending packet in order");
let send_res = self.inner.poll_send_to(cx, &packet, target);
let Poll::Ready(Ok(len)) = send_res else {
panic!("Unexpected send result {:?}", send_res)
};
assert_eq!(len, packet.len());
}
}
Poll::Ready(Ok(buf.len()))
}
fn local_addr(&self) -> io::Result<SocketAddr> { self.inner.local_addr() }
}
pub struct TestConnection {
pub client: Client,
pub server: Client,
}
impl TestConnection {
pub fn new() -> Result<Self> {
let addr = "127.0.0.1:0".parse()?;
let (socket0, socket1) = SimulatedSocket::pair(addr, addr);
Self::new_with_sockets(Box::new(socket0), Box::new(socket1))
}
pub fn new_with_sockets(
socket0: Box<dyn Socket + Send>, socket1: Box<dyn Socket + Send>,
) -> Result<Self> {
Lazy::force(&TRACING);
let addr = "127.0.0.1:0".parse()?;
let client_key = EccKeyPrivP256::create();
let server_key = EccKeyPrivP256::create();
let mut client = Client::new(addr, socket0, client_key);
let mut server = Client::new(addr, socket1, server_key);
server.is_client = false;
// TODO Span is = "client"
crate::log::add_logger_with_verbosity(3, &mut client);
// TODO Span is = "server"
crate::log::add_logger_with_verbosity(3, &mut server);
Ok(Self { client, server })
}
pub fn set_connected(&mut self) {
Self::set_con_connected(&mut self.client, self.server.private_key.to_pub());
Self::set_con_connected(&mut self.server, self.client.private_key.to_pub());
}
/// Set the connection to connected.
fn set_con_connected(con: &mut Client, other_key: EccKeyPubP256) {
let con = &mut **con;
con.resender.set_state(ResenderState::Connected);
con.codec.outgoing_p_ids[PacketType::Command.to_usize().unwrap()] =
PartialPacketId { generation_id: 0, packet_id: 1 };
con.codec.incoming_p_ids[PacketType::Command.to_usize().unwrap()] =
PartialPacketId { generation_id: 0, packet_id: 1 };
con.codec.outgoing_p_ids[PacketType::Ack.to_usize().unwrap()] =
PartialPacketId { generation_id: 0, packet_id: 1 };
con.codec.incoming_p_ids[PacketType::Ack.to_usize().unwrap()] =
PartialPacketId { generation_id: 0, packet_id: 1 };
// Set params
let mut params = ConnectedParams::new(other_key, [0; 64], [0x42; 8]);
params.c_id = 1;
con.params = Some(params);
}
}
/// Check if init packet is sent and connect timeout is working.
#[tokio::test]
async fn connect_timeout() -> Result<()> {
let mut state = TestConnection::new()?;
let (send, recv) = oneshot::channel();
let send = Cell::new(Some(send));
let listener = move |event: &Event| {
if let Event::ReceivePacket(packet) = event {
if packet.header().packet_type() == PacketType::Init {
if let Some(s) = send.replace(None) {
s.send(()).unwrap();
}
}
}
};
state.server.event_listeners.push(Box::new(listener));
tokio::select!(
(r, err) = future::join(
time::timeout(Duration::from_secs(10), recv),
state.client.connect(),
) => {
r??;
assert!(err.is_err(), "Connect should timeout");
}
_ = state.server.wait_disconnect() => {
bail!("Server should just run in the background");
}
);
Ok(())
}
#[tokio::test]
async fn disconnect_timeout() -> Result<()> {
let mut state = TestConnection::new()?;
state.set_connected();
let (send, recv) = oneshot::channel();
let send = Cell::new(Some(send));
let listener = move |event: &Event| {
if let Event::ReceivePacket(packet) = event {
if packet.header().packet_type() == PacketType::Command {
send.replace(None).unwrap().send(()).unwrap();
}
}
};
state.server.event_listeners.push(Box::new(listener));
let mut cmd = OutCommand::new(
Direction::C2S,
Flags::empty(),
PacketType::Command,
"clientdisconnect",
);
cmd.write_arg("reasonid", &8);
cmd.write_arg("reasonmsg", &"Bye");
state.client.send_packet(cmd.into_packet())?;
tokio::select!(
(r, err) = future::join(
time::timeout(Duration::from_secs(10), recv),
state.client.wait_disconnect(),
) => {
r??;
assert!(err.is_err(), "Disconnect should timeout");
}
_ = state.server.wait_disconnect() => {
bail!("Server should just run in the background");
}
);
Ok(())
}
#[tokio::test]
async fn timeout() -> Result<()> {
let mut state = TestConnection::new()?;
state.set_connected();
let r = state.client.wait_disconnect().await;
assert!(
matches!(r, super::Result::<()>::Err(Error::TsProto(crate::Error::Timeout(_)))),
"Connection should timeout (result: {:?})",
r
);
Ok(())
}
/// Send ping and check that a pong is received.
#[tokio::test]
async fn pong() -> Result<()> {
let mut state = TestConnection::new()?;
state.set_connected();
let packet = OutPacket::new_with_dir(Direction::S2C, Flags::UNENCRYPTED, PacketType::Ping);
state.server.send_packet(packet.clone())?;
state.server.send_packet(packet.clone())?;
let (send, recv) = oneshot::channel();
let send = Cell::new(Some(send));
let counter = Cell::new(0u8);
let listener = move |event: &Event| {
if let Event::ReceivePacket(packet) = event {
if packet.header().packet_type() == PacketType::Pong {
counter.set(counter.get() + 1);
// Until 2 pongs are received
if counter.get() == 2 {
send.replace(None).unwrap().send(()).unwrap();
}
}
}
};
state.server.event_listeners.push(Box::new(listener));
tokio::select!(
r = time::timeout(Duration::from_secs(5), recv) => {
r??;
return Ok(());
}
_ = state.client.wait_disconnect() => {}
_ = state.server.wait_disconnect() => {}
);
bail!("Unexpected disconnect")
}
/// Check that the packet id wraps around.
#[tokio::test]
async fn generation_id() -> Result<()> {
let mut state = TestConnection::new()?;
state.set_connected();
// Sending 70 000 messages takes about 7 minutes in debug mode so we
// start at 65 500 and send only 100.
let count = 100;
// Set current id
for c in [&mut state.client, &mut state.server] {
c.codec.outgoing_p_ids[PacketType::Command.to_usize().unwrap()] =
PartialPacketId { generation_id: 0, packet_id: 65_500 };
c.codec.incoming_p_ids[PacketType::Command.to_usize().unwrap()] =
PartialPacketId { generation_id: 0, packet_id: 65_500 };
c.codec.outgoing_p_ids[PacketType::Ack.to_usize().unwrap()] =
PartialPacketId { generation_id: 0, packet_id: 65_500 };
c.codec.incoming_p_ids[PacketType::Ack.to_usize().unwrap()] =
PartialPacketId { generation_id: 0, packet_id: 65_500 };
}
let (send, recv) = oneshot::channel();
let send = Cell::new(Some(send));
let counter = Cell::new(0u8);
let listener = move |event: &Event| {
if let Event::ReceivePacket(packet) = event {
if packet.header().packet_type() == PacketType::Command {
counter.set(counter.get() + 1);
if counter.get() == count {
send.replace(None).unwrap().send(()).unwrap();
}
}
}
};
state.client.event_listeners.push(Box::new(listener));
for i in 0..count {
let mut cmd = OutCommand::new(
Direction::S2C,
Flags::empty(),
PacketType::Command,
"notifytextmessage",
);
cmd.write_arg("msg", &format!("message {}", i));
state.server.send_packet(cmd.into_packet())?;
}
tokio::select!(
r = recv => {
r?;
return Ok(());
}
_ = state.client.wait_disconnect() => {}
_ = state.server.wait_disconnect() => {}
);
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | true |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/src/algorithms.rs | tsproto/src/algorithms.rs | //! Handle packet splitting and cryptography
use curve25519_dalek_ng::edwards::EdwardsPoint;
use eax::aead::consts::{U16, U8};
use eax::{AeadInPlace, Eax, KeyInit};
use generic_array::GenericArray;
use num_bigint::BigUint;
use num_traits::ToPrimitive;
use omnom::WriteExt;
use quicklz::CompressionLevel;
use sha1::Sha1;
use sha2::{Digest, Sha256, Sha512};
use tsproto_types::crypto::{EccKeyPrivEd25519, EccKeyPubP256};
use crate::connection::CachedKey;
use crate::{Error, Result};
use tsproto_packets::packets::*;
pub fn must_encrypt(t: PacketType) -> bool {
match t {
PacketType::Command | PacketType::CommandLow => true,
PacketType::Voice
| PacketType::Ack
| PacketType::AckLow
| PacketType::VoiceWhisper
| PacketType::Ping
| PacketType::Pong
| PacketType::Init => false,
}
}
pub fn should_encrypt(t: PacketType, voice_encryption: bool) -> bool {
must_encrypt(t)
|| t == PacketType::Ack
|| t == PacketType::AckLow
|| (voice_encryption && t.is_voice())
}
/// Compresses and splits the packet data of a `Command` or `CommandLow` packet.
///
/// Returns the splitted packet data and their headers.
/// The headers have their type and the compressed and fragmented flag set
/// to the right value.
///
/// Returns an error if the packet is too large but cannot be splitted.
/// Only `Command` and `CommandLow` packets can be compressed and splitted.
pub fn compress_and_split(is_client: bool, packet: OutPacket) -> Vec<OutPacket> {
// Everything except whisper packets has to be less than 500 bytes
let header_size =
if is_client { tsproto_packets::C2S_HEADER_LEN } else { tsproto_packets::S2C_HEADER_LEN };
let data = packet.content();
// The maximum packet size (including header) is 500 bytes.
let max_size = 500 - header_size;
// Split the data if it is necessary.
let compressed;
let datas = if data.len() > max_size {
// Compress with QuickLZ
let cdata = ::quicklz::compress(data, CompressionLevel::Lvl1);
// Use only if it is efficient
let mut data = if cdata.len() > data.len() {
compressed = false;
data
} else {
compressed = true;
&cdata
};
// Ignore size limit for whisper packets
if data.len() <= max_size || packet.header().packet_type() == PacketType::VoiceWhisper {
let mut v = vec![0; header_size + data.len()];
v[header_size..].copy_from_slice(data);
vec![v]
} else {
// Split
let count = (data.len() + max_size - 1) / max_size;
let mut splitted = Vec::with_capacity(count);
while data.len() > max_size {
let (first, last) = data.split_at(max_size);
let mut v = vec![0; header_size + max_size];
v[header_size..].copy_from_slice(first);
splitted.push(v);
data = last;
}
// Rest
let mut v = vec![0; header_size + data.len()];
v[header_size..].copy_from_slice(data);
splitted.push(v);
splitted
}
} else {
return vec![packet];
};
let len = datas.len();
let fragmented = len > 1;
let orig_header = packet.header_bytes();
let dir = packet.direction();
let mut packets = Vec::with_capacity(datas.len());
for (i, mut d) in datas.into_iter().enumerate() {
d[..header_size].copy_from_slice(orig_header);
let mut packet = OutPacket::new_from_data(dir, d);
// Only set flags on first fragment
if i == 0 && compressed {
packet.flags(packet.header().flags() | Flags::COMPRESSED);
}
// Set fragmented flag on first and last part
if fragmented && (i == 0 || i == len - 1) {
packet.flags(packet.header().flags() | Flags::FRAGMENTED);
}
packets.push(packet);
}
packets
}
fn create_key_nonce(
p_type: PacketType, c_id: Option<u16>, p_id: u16, generation_id: u32, iv: &[u8; 64],
cache: &mut [[CachedKey; 2]; 8],
) -> (GenericArray<u8, U16>, GenericArray<u8, U16>) {
// Check if this generation is cached
let cache = &mut cache[p_type.to_usize().unwrap()][if c_id.is_some() { 1 } else { 0 }];
if cache.generation_id != generation_id {
// Update the cache
let mut temp = [0; 70];
if c_id.is_some() {
temp[0] = 0x31;
} else {
temp[0] = 0x30;
}
temp[1] = p_type.to_u8().unwrap();
(&mut temp[2..6]).write_be(generation_id).unwrap();
temp[6..].copy_from_slice(iv);
let keynonce = Sha256::digest(&temp[..]);
let keynonce = keynonce.as_slice();
cache.generation_id = generation_id;
cache.key.copy_from_slice(&keynonce[..16]);
cache.nonce.copy_from_slice(&keynonce[16..]);
}
// Use the cached version
let mut key = cache.key;
let nonce = cache.nonce;
key[0] ^= (p_id >> 8) as u8;
key[1] ^= (p_id & 0xff) as u8;
(key, nonce)
}
pub fn encrypt_key_nonce(
packet: &mut OutPacket, key: &GenericArray<u8, U16>, nonce: &GenericArray<u8, U16>,
) -> Result<()> {
let meta = packet.header().get_meta().to_vec();
let cipher = Eax::<aes::Aes128, U8>::new(key);
let mac = cipher
.encrypt_in_place_detached(nonce, &meta, packet.content_mut())
.map_err(|_| Error::MaxLengthExceeded("encryption data"))?;
packet.mac().copy_from_slice(mac.as_slice());
Ok(())
}
pub fn encrypt_fake(packet: &mut OutPacket) -> Result<()> {
encrypt_key_nonce(packet, &crate::FAKE_KEY.into(), &crate::FAKE_NONCE.into())
}
pub fn encrypt(
packet: &mut OutPacket, generation_id: u32, iv: &[u8; 64], cache: &mut [[CachedKey; 2]; 8],
) -> Result<()> {
let header = packet.header();
let (key, nonce) = create_key_nonce(
header.packet_type(),
header.client_id(),
header.packet_id(),
generation_id,
iv,
cache,
);
encrypt_key_nonce(packet, &key, &nonce)
}
pub fn decrypt_key_nonce(
packet: &InPacket, key: &GenericArray<u8, U16>, nonce: &GenericArray<u8, U16>,
) -> Result<Vec<u8>> {
let header = packet.header();
let meta = header.get_meta();
// TODO decrypt in-place in packet
let mut content = packet.content().to_vec();
let cipher = Eax::<aes::Aes128, U8>::new(key);
cipher
.decrypt_in_place_detached(nonce, meta, &mut content, header.mac().into())
.map(|()| content)
.map_err(|_| Error::WrongMac {
p_type: header.packet_type(),
generation_id: 0,
packet_id: header.packet_id(),
})
}
pub fn decrypt_fake(packet: &InPacket) -> Result<Vec<u8>> {
decrypt_key_nonce(packet, &crate::FAKE_KEY.into(), &crate::FAKE_NONCE.into())
}
pub fn decrypt(
packet: &InPacket, generation_id: u32, iv: &[u8; 64], cache: &mut [[CachedKey; 2]; 8],
) -> Result<Vec<u8>> {
let header = packet.header();
let (key, nonce) = create_key_nonce(
header.packet_type(),
header.client_id(),
header.packet_id(),
generation_id,
iv,
cache,
);
decrypt_key_nonce(packet, &key, &nonce).map_err(|e| {
if let Error::WrongMac { p_type, packet_id, .. } = e {
Error::WrongMac { p_type, generation_id, packet_id }
} else {
e
}
})
}
/// Compute shared iv and shared mac.
pub fn compute_iv_mac(
alpha: &[u8; 10], beta: &[u8; 54], our_key: &EccKeyPrivEd25519, other_key: &EdwardsPoint,
) -> ([u8; 64], [u8; 8]) {
let shared_secret = our_key.create_shared_secret(other_key);
let mut shared_iv = [0; 64];
shared_iv.copy_from_slice(Sha512::digest(shared_secret).as_slice());
for i in 0..10 {
shared_iv[i] ^= alpha[i];
}
for i in 0..54 {
shared_iv[i + 10] ^= beta[i];
}
let mut shared_mac = [0; 8];
shared_mac.copy_from_slice(&Sha1::digest(shared_iv).as_slice()[..8]);
(shared_iv, shared_mac)
}
pub fn hash_cash(key: &EccKeyPubP256, level: u8) -> u64 {
let omega = key.to_ts();
let mut offset = 0;
while offset < u64::MAX && get_hash_cash_level(&omega, offset) < level {
offset += 1;
}
offset
}
#[inline]
pub fn get_hash_cash_level(omega: &str, offset: u64) -> u8 {
let data = Sha1::digest(format!("{}{}", omega, offset).as_bytes());
let mut res = 0;
for &d in data.as_slice() {
if d == 0 {
res += 8;
} else {
res += d.trailing_zeros() as u8;
break;
}
}
res
}
pub fn biguint_to_array(i: &BigUint) -> [u8; 64] {
let mut v = i.to_bytes_le();
// Extend with zeroes until 64 bytes
let len = v.len();
v.append(&mut vec![0; 64 - len]);
v.reverse();
let mut a = [0; 64];
a.copy_from_slice(&v);
a
}
pub fn array_to_biguint(i: &[u8; 64]) -> BigUint { BigUint::from_bytes_be(i) }
#[cfg(test)]
mod tests {
use base64::prelude::*;
use super::*;
use crate::license::Licenses;
use crate::packets::PacketType;
use crate::utils;
use tsproto_types::crypto::EccKeyPubEd25519;
#[test]
fn test_fake_crypt() {
let data = (0..100).into_iter().collect::<Vec<_>>();
let mut packet = OutPacket::new_with_dir(Direction::C2S, Flags::empty(), PacketType::Ack);
packet.data_mut().extend_from_slice(&data);
encrypt_fake(&mut packet).unwrap();
let packet =
InPacket::try_new(Direction::C2S, packet.data_mut().as_slice().into()).unwrap();
let dec_data = decrypt_fake(&packet).unwrap();
assert_eq!(&data, &dec_data);
}
#[test]
fn test_fake_encrypt() {
let mut packet = OutAck::new(Direction::C2S, PacketType::Command, 0);
encrypt_fake(&mut packet).unwrap();
let real_res: &[u8] =
&[0xa4, 0x7b, 0x47, 0x94, 0xdb, 0xa9, 0x6a, 0xc5, 0, 0, 0, 0, 0x6, 0xfe, 0x18];
assert_eq!(real_res, packet.data_mut().as_slice());
}
#[test]
fn shared_iv31() {
let licenses = Licenses::parse_ignore_expired(BASE64_STANDARD.decode("AQA1hUFJiiSs\
0wFXkYuPUJVcDa6XCrZTcsvkB0Ffzz4CmwIITRXgCqeTYAcAAAAgQW5vbnltb3VzAAC\
4R+5mos+UQ/KCbkpQLMI5WRp4wkQu8e5PZY4zU+/FlyAJwaE8CcJJ/A==")
.unwrap()).unwrap();
let derived_key =
licenses.derive_public_key(EccKeyPubEd25519::from_bytes(crate::ROOT_KEY)).unwrap();
let client_ek = [
0xb0, 0x4e, 0xa1, 0xd9, 0x5c, 0x72, 0x64, 0xdf, 0x0d, 0xe8, 0xb3, 0x6b, 0xaa, 0x7c,
0xa1, 0x5f, 0x75, 0x71, 0xf5, 0x1f, 0xa0, 0x54, 0xb5, 0x51, 0x27, 0x08, 0x8e, 0xdd,
0x96, 0x3d, 0x6e, 0x79,
];
let priv_key = EccKeyPrivEd25519::from_bytes(client_ek);
let alpha_b64 = BASE64_STANDARD.decode("Jkxq1wIvvhzaCA==").unwrap();
let mut alpha = [0; 10];
alpha.copy_from_slice(&alpha_b64);
let beta_b64 = BASE64_STANDARD
.decode("wU5T/MM6toW6Wge9th7VlTlzVZ9JDWypw2P9migfc25pjGP2Tj7Hm6rJpmKeHRr08Ch7BEAR")
.unwrap();
let mut beta = [0; 54];
beta.copy_from_slice(&beta_b64);
let expected_shared_shared_iv: [u8; 64] = [
0x58, 0x78, 0xae, 0x08, 0x08, 0x72, 0x05, 0xb0, 0x13, 0x27, 0x10, 0xe9, 0x81, 0xb4,
0xaf, 0x14, 0x14, 0x71, 0xad, 0xcd, 0x82, 0x98, 0xf3, 0xd1, 0x1d, 0x07, 0x20, 0x72,
0x7e, 0xb2, 0x1b, 0x89, 0x47, 0x82, 0x1e, 0xfb, 0x02, 0x53, 0x5a, 0x8a, 0x52, 0x4d,
0x9a, 0x7a, 0x09, 0x2c, 0x1b, 0xe7, 0x1f, 0xd1, 0x9d, 0x2a, 0x9d, 0x4f, 0xbd, 0xe3,
0x22, 0x09, 0xe4, 0x86, 0x7d, 0x63, 0x49, 0x07,
];
let expected_xored_shared_shared_iv: [u8; 64] = [
0x7e, 0x34, 0xc4, 0xdf, 0x0a, 0x5d, 0xbb, 0xac, 0xc9, 0x2f, 0xd1, 0xa7, 0xd2, 0x48,
0x6c, 0x2e, 0xa2, 0xf4, 0x17, 0x97, 0x85, 0x25, 0x45, 0xcf, 0xc8, 0x92, 0x19, 0x01,
0x2b, 0x2d, 0x52, 0x84, 0x2b, 0x2b, 0xdd, 0x98, 0xff, 0xc9, 0x72, 0x95, 0x21, 0x23,
0xf3, 0xf6, 0x6a, 0xda, 0x55, 0xd9, 0xd8, 0x4a, 0x37, 0xe3, 0x3b, 0x2d, 0x23, 0xfe,
0x38, 0xfd, 0x14, 0xae, 0x06, 0x67, 0x09, 0x16,
];
let (mut shared_iv, _shared_mac) = compute_iv_mac(&alpha, &beta, &priv_key, &derived_key);
assert_eq!(&shared_iv as &[u8], &expected_xored_shared_shared_iv as &[u8]);
for i in 0..10 {
shared_iv[i] ^= alpha[i];
}
for i in 0..54 {
shared_iv[i + 10] ^= beta[i];
}
assert_eq!(&shared_iv as &[u8], &expected_shared_shared_iv as &[u8]);
let mut temp = [0; 70];
temp[0] = 0x31;
temp[1] = 0x2 & 0xf;
temp[6..].copy_from_slice(&expected_xored_shared_shared_iv);
let temporary: [u8; 70] = [
0x31, 0x02, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x34, 0xc4, 0xdf, 0x0a, 0x5d, 0xbb, 0xac,
0xc9, 0x2f, 0xd1, 0xa7, 0xd2, 0x48, 0x6c, 0x2e, 0xa2, 0xf4, 0x17, 0x97, 0x85, 0x25,
0x45, 0xcf, 0xc8, 0x92, 0x19, 0x01, 0x2b, 0x2d, 0x52, 0x84, 0x2b, 0x2b, 0xdd, 0x98,
0xff, 0xc9, 0x72, 0x95, 0x21, 0x23, 0xf3, 0xf6, 0x6a, 0xda, 0x55, 0xd9, 0xd8, 0x4a,
0x37, 0xe3, 0x3b, 0x2d, 0x23, 0xfe, 0x38, 0xfd, 0x14, 0xae, 0x06, 0x67, 0x09, 0x16,
];
assert!(&temp as &[u8] == &temporary as &[u8]);
let keynonce = Sha256::digest(&temp);
let expected_keynonce: [u8; 32] = [
0xf3, 0x70, 0xd3, 0x43, 0xe7, 0x78, 0x15, 0x70, 0x7a, 0xff, 0x60, 0x48, 0xfb, 0xd9,
0xac, 0x6b, 0xb6, 0x33, 0x35, 0x79, 0x31, 0x9b, 0x88, 0x0e, 0x2d, 0x25, 0xef, 0x9c,
0xe9, 0x9e, 0x77, 0x5c,
];
assert!(keynonce.as_slice() == &expected_keynonce as &[u8]);
}
#[test]
fn test_new_decrypt() {
let shared_iv = utils::read_hex(
"C2 45 6F CB FC 22 08 AE 44 2B 7D E7 3A 67 1B DA 93 09 B2 00 F2 CD 10 49 08 CD 3A B0 \
7B DD 58 AD",
)
.unwrap();
let mut shared_iv = Sha512::digest(&shared_iv).as_slice().to_vec();
assert_eq!(
utils::read_hex(
"4D 3F DA B7 D8 B0 2C 82 70 6A 39 3E 97 17 61 09 FA 03 AB 30 5C BB 78 7A 9A 82 D5 \
39 9A 60 FD A9 F6 7A D9 04 52 F2 AE 00 3E 35 E8 19 10 89 40 43 80 58 27 1F 0A E0 \
E0 3D E0 9C F0 A3 4D 15 6B F0"
)
.unwrap()
.as_slice(),
shared_iv.as_slice()
);
let alpha = [0; 10];
let beta = BASE64_STANDARD
.decode("I4onb0zMyAD6bd24QANDls40eOES7qmjonBFtt5wRWzAfIIQWTSxjEas6TGTZIJ8QSJNX+Pl")
.unwrap();
for i in 0..10 {
shared_iv[i] ^= alpha[i];
}
for i in 0..54 {
shared_iv[i + 10] ^= beta[i];
}
let mut temp = [0; 70];
temp[0] = 0x31;
temp[1] = 0x2 & 0xf;
temp[6..].copy_from_slice(&shared_iv);
let keynonce = Sha256::digest(&temp);
let keynonce = keynonce.as_slice();
let all = utils::read_hex("2b982443ab38be6b00020000329abf64d4572e1349897b5e1e96fbc4a763a4c4ce1f64f0c1e3febd0a5f04a82ab1f2bc2344bb374fd16181beb8233b5b06944280470e9b6893290a1da0776ffcd89f3beec2ce23b9694930c09efaaea0d88a6895a08ede4d5cbfea61291fc553ac651f1e2bc1d2bd277a8bd9ab5386415579a9e56fac46d8b6b119f454bebd99179cd317dec60af205341d11f274d02bbacdd7e9773f72a426358ca1d39016dd95bde2409cd81bf99b340887e997ea982370c6790cf4d23150460820224766838ea4ec4d71dd102ede701ea0001f392623aa410dd9ab0e45874da82e29e6e370515ec30a37dd73f5a364c233ff014384beab5f1708c9f48dfba33a520f8fcdcef055789c54693c3fe72c5bfaca7cb4ca1fed77b8624660b8abc882f4b95b1284cb6dc55019c6082dd6dd146fa50383662d7298bef04ababaf1af80e15cd4c1f81326f085788e2918e00324147dce39b23db71326abc3de4b94df10f1531e9cce202bba71fa3ebeefd77b21fa3260a62e92eeee2183421d384a8c48777e2f9efbc58d4f442c5f0529c7c0e27e81b2b6b1b05eb8fa19256886248d553582dfd24c7cfab3c3f7317a5cebc6504b53fa0e86fc8c1100fc1d506fcf96caa76a7c0b6a27e577f2efdecd4070e847a559bf37d75bfdbe9e814c702426ce696d8645bc300b5f28f9e7f1ce").unwrap();
let packet = InPacket::try_new(Direction::C2S, &all).unwrap();
println!("Packet: {:?}", packet);
let mut key = [0; 16];
let mut nonce = [0; 16];
key.copy_from_slice(&keynonce[..16]);
nonce.copy_from_slice(&keynonce[16..]);
let expected_key =
utils::read_hex("D2 42 75 71 9C EE 83 35 EF 8A CE E0 B7 28 40 B8").unwrap();
let expected_nonce =
utils::read_hex("9C D9 30 B7 58 FE 50 23 64 66 11 C5 36 0E A2 5F").unwrap();
assert_eq!(&expected_nonce, &nonce);
assert_eq!(&expected_key, &key);
key[1] ^= 2; // packet id
let dec = decrypt_key_nonce(&packet, &key.into(), &nonce.into()).unwrap();
println!("Decrypted: {:?}", String::from_utf8_lossy(&dec));
}
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/src/license.rs | tsproto/src/license.rs | use std::convert::{TryFrom, TryInto};
use std::fmt;
use std::io::Cursor;
use std::str;
use curve25519_dalek_ng::edwards::EdwardsPoint;
use curve25519_dalek_ng::scalar::Scalar;
use num_derive::{FromPrimitive, ToPrimitive};
use num_traits::{FromPrimitive as _, ToPrimitive as _};
use omnom::{ReadExt, WriteExt};
use sha2::{Digest, Sha512};
use thiserror::Error;
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
use tracing::trace;
use tsproto_packets::HexSlice;
use tsproto_types::{
crypto::{EccKeyPrivEd25519, EccKeyPubEd25519},
LicenseType,
};
pub const TIMESTAMP_OFFSET: i64 = 0x50e2_2700;
const BLOCK_MIN_LEN: usize = 42;
const BLOCK_TYPE_OFFSET: usize = 33;
const BLOCK_NOT_VALID_BEFORE_OFFSET: usize = 34;
const BLOCK_NOT_VALID_AFTER_OFFSET: usize = 38;
type Result<T> = std::result::Result<T, Error>;
#[derive(Error, Debug)]
#[non_exhaustive]
pub enum Error {
#[error(
"License must not exceed bounds {outer_start} - {outer_end} but has {inner_start} - \
{inner_end}"
)]
Bounds {
outer_start: OffsetDateTime,
outer_end: OffsetDateTime,
inner_start: OffsetDateTime,
inner_end: OffsetDateTime,
},
#[error("Failed to deserialize license: {0}")]
Deserialize(#[source] std::io::Error),
#[error("Failed to deserialize license: {0}")]
DeserializeString(#[source] std::str::Utf8Error),
#[error("Failed to deserialize date: {0}")]
DeserializeDate(#[source] time::error::ComponentRange),
#[error("License is only valid between {start} and {end}")]
Expired { start: OffsetDateTime, end: OffsetDateTime },
#[error("Cannot uncompress license public key")]
InvalidPublicKey,
#[error("Invalid data {0:#x} in intermediate license")]
IntermediateInvalidData(u32),
#[error("Invalid public root key for license")]
InvalidRootKey,
#[error("Non-null-terminated string")]
NonterminatedString,
#[error("License contains no private key")]
NoPrivateKey,
#[error("Too many license blocks: {0}")]
TooManyBlocks(usize),
#[error("License too short ({0}): {1}")]
TooShort(usize, &'static str),
#[error("Unknown license block type {0}")]
UnknownBlockType(u8),
#[error("Unknown license type {0}")]
UnknownLicenseType(u8),
#[error("Unsupported license version {0}")]
UnsupportedVersion(u8),
#[error("Wrong key kind {0} in license")]
WrongKeyKind(u8),
#[error("There is no {0} in this block")]
NoSuchProperty(&'static str),
#[error("Wrong property type for id {id}: Expected type {expected} but got type {actual}")]
WrongPropertyType { id: u8, expected: u8, actual: u8 },
#[error(
"Propery with id {id} and type {typ} has wrong length: Expected length {expected} but got \
length {actual}"
)]
WrongPropertyLength { id: u8, typ: u8, expected: u8, actual: u8 },
}
/// A single block of a license
#[derive(Clone, Debug)]
pub struct License {
/// Length of the block
pub len: usize,
/// A private key for the block to derive a private key
pub private_key: Option<EccKeyPrivEd25519>,
pub inner: InnerLicense,
}
/// Helper struct to debug print a license block.
pub struct DebugLicense<'a>(&'a License, &'a [u8]);
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, FromPrimitive, ToPrimitive)]
pub enum LicenseBlockType {
Intermediate,
Website,
/// Ts3_Server
Server,
Code,
// Not existing in the license parameter:
// 4: Token, 5: License_Sign, 6: MyTsId_Sign, 7: Updater
/// Ts_Server_License
Ts5Server = 8,
/// 32, Ephemeral_Key
Ephemeral = 32,
}
#[derive(Debug, Clone)]
pub enum InnerLicense {
Ts5Server {
/// Properties from the license, offset from the start of properties for every field
properties: Vec<usize>,
},
Other,
}
/// A list of blocks, forming a complete license
#[derive(Clone)]
pub struct Licenses {
pub data: Vec<u8>,
pub blocks: Vec<License>,
}
#[derive(Clone, Debug)]
pub enum LicenseProperty<'a> {
/// Unknown property with id 1 and type 1
Unknown1(u32),
Issuer(&'a str),
MaxClients(u32),
Unknown {
id: u8,
typ: u8,
data: &'a [u8],
},
}
#[derive(Debug)]
pub struct LicenseBuilder<'a> {
licenses: &'a mut Licenses,
}
#[derive(Debug)]
pub struct LicenseBlockBuilder<'a, 'b> {
builder: &'b mut LicenseBuilder<'a>,
/// The start offset for the new block
offset: usize,
typ: LicenseBlockType,
}
type LicenseTimeBounds = (OffsetDateTime, OffsetDateTime);
impl Licenses {
pub fn new() -> Self { Self::default() }
/// Parse a license but ignore expired licenses.
///
/// This is useful for tests but should not be used otherwise.
pub fn parse_ignore_expired(data: Vec<u8>) -> Result<Self> { Self::parse_internal(data, false) }
pub fn parse(data: Vec<u8>) -> Result<Self> { Self::parse_internal(data, true) }
pub fn parse_internal(data: Vec<u8>, check_expired: bool) -> Result<Self> {
let version = data[0];
if version != 0 && version != 1 {
return Err(Error::UnsupportedVersion(version));
}
// Read licenses
let mut blocks = Vec::new();
let now = OffsetDateTime::now_utc();
let mut offset = 1;
let mut bounds = None;
while data.len() > offset {
if blocks.len() >= 8 {
// Accept only 8 blocks
return Err(Error::TooManyBlocks(blocks.len()));
}
// Read next license
let (license, b) = Self::parse_block(&data[offset..], check_expired, now, bounds)?;
offset += license.len;
bounds = Some(b);
blocks.push(license);
}
Ok(Licenses { data, blocks })
}
fn parse_block(
data: &[u8], check_expired: bool, now: OffsetDateTime, bounds: Option<LicenseTimeBounds>,
) -> Result<(License, LicenseTimeBounds)> {
let license = License::parse(data)?;
let license_data = &data[..license.len];
// Check if the certificate is valid
let new_bounds = (
license.get_not_valid_before(license_data)?,
license.get_not_valid_after(license_data)?,
);
if new_bounds.0 > now && check_expired {
return Err(Error::Expired { start: new_bounds.0, end: new_bounds.1 });
}
if new_bounds.1 < now && check_expired {
return Err(Error::Expired { start: new_bounds.0, end: new_bounds.1 });
}
if let Some((start, end)) = bounds {
// The inner license must not have wider bounds
if new_bounds.0 < start || new_bounds.1 > end {
return Err(Error::Bounds {
outer_start: start,
outer_end: end,
inner_start: new_bounds.0,
inner_end: new_bounds.1,
});
}
}
Ok((license, new_bounds))
}
pub fn derive_public_key(&self, root: EccKeyPubEd25519) -> Result<EdwardsPoint> {
let mut last_round = root.0.decompress().ok_or(Error::InvalidRootKey)?;
let mut offset = 1;
trace!("Deriving public key for license");
for l in &self.blocks {
trace!(current_key = %HexSlice((&last_round.compress().0) as &[u8]));
last_round = l.derive_public_key(&self.data[offset..offset + l.len], &last_round)?;
offset += l.len;
}
trace!(
final_key = %HexSlice((&last_round.compress().0) as &[u8]),
"Finished deriving public key"
);
Ok(last_round)
}
/// Derive the private key of this license, starting with a specific block.
///
/// The keys which are stored inside the licenses from the starting block on
/// have to be private keys.
///
/// # Arguments
///
/// `starting_block`: The index of the first block for the key computation.
/// `first_key`: The starting private key (the key of a parent block or the
/// root private key).
///
/// # Panics
///
/// Panics if `starting_block` is not a valid license block index.
pub fn derive_private_key(
&self, starting_block: usize, first_key: EccKeyPrivEd25519,
) -> Result<EccKeyPrivEd25519> {
let mut res = first_key;
let mut offset = 1;
for (i, l) in self.blocks.iter().enumerate() {
if i >= starting_block {
res = l.derive_private_key(&self.data[offset..offset + l.len], &res)?;
}
offset += l.len;
}
Ok(res)
}
pub fn build<F: FnOnce(LicenseBuilder)>(mut self, f: F) -> Result<Self> {
let len = self.data.len();
f(LicenseBuilder { licenses: &mut self });
// Parse new blocks
let now = OffsetDateTime::now_utc();
let mut offset = len;
let mut bounds = self
.blocks
.last()
.map(|b| {
let b_offset = len - b.len;
let b_data = &self.data[b_offset..len];
Ok((b.get_not_valid_before(b_data)?, b.get_not_valid_after(b_data)?))
})
.transpose()?;
while self.data.len() > offset {
if self.blocks.len() >= 8 {
// Accept only 8 blocks
return Err(Error::TooManyBlocks(self.blocks.len()));
}
// Read next license
let (license, b) = Self::parse_block(&self.data[offset..], false, now, bounds)?;
offset += license.len;
bounds = Some(b);
self.blocks.push(license);
}
Ok(self)
}
pub fn is_valid(&self) -> Result<()> {
let mut offset = 1;
for b in &self.blocks {
b.is_valid(&self.data[offset..offset + b.len])?;
offset += b.len;
}
Ok(())
}
}
impl Default for Licenses {
fn default() -> Self { Self { data: vec![1], blocks: Default::default() } }
}
impl fmt::Debug for Licenses {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Licenses ")?;
let mut offset = 1;
f.debug_list()
.entries(self.blocks.iter().map(|b| {
let o = offset;
offset += b.len;
DebugLicense(b, &self.data[o..o + b.len])
}))
.finish()?;
Ok(())
}
}
impl License {
/// Parse license and return read length.
pub fn parse(data: &[u8]) -> Result<Self> {
let len = data.len();
if data.len() < BLOCK_MIN_LEN {
return Err(Error::TooShort(len, "less than minimum block length"));
}
if data[0] != 0 {
return Err(Error::WrongKeyKind(data[0]));
}
let typ_i = data[BLOCK_TYPE_OFFSET];
let typ = LicenseBlockType::from_u8(typ_i).ok_or(Error::UnknownBlockType(typ_i))?;
let data = &data[BLOCK_MIN_LEN..];
let (inner, extra_len) = match typ {
LicenseBlockType::Intermediate => {
if data.len() < 5 {
return Err(Error::TooShort(
len,
"less than minimum length of intermediate license",
));
}
// 4 byte unknown
// Issuer string
let len = if let Some(len) = data[4..].iter().position(|&b| b == 0) {
len
} else {
return Err(Error::NonterminatedString);
};
(InnerLicense::Other, 5 + len)
}
LicenseBlockType::Server => {
if data.len() < 6 {
return Err(Error::TooShort(
len,
"less than minimum length of TS3 server license",
));
}
// 1 byte server license type
// 4 byte max clients
// Issuer string
let len = if let Some(len) = data[5..].iter().position(|&b| b == 0) {
len
} else {
return Err(Error::NonterminatedString);
};
(InnerLicense::Other, 6 + len)
}
LicenseBlockType::Ts5Server => {
if data.len() < 2 {
return Err(Error::TooShort(
len,
"less than minimum length of TS5 server license",
));
}
// 1 byte server license type
// Property count and properties
let property_count = data[1];
let mut pos = 2;
let properties = (0..property_count)
.map(|_| {
let p = pos - 2;
if pos >= data.len() {
return Err(Error::TooShort(len, "missing TS5 license property"));
}
let len = usize::from(data[pos]);
pos += 1;
if pos + len >= data.len() {
return Err(Error::TooShort(len, "cut off TS5 license property"));
}
pos += len;
Ok(p)
})
.collect::<Result<_>>()?;
(InnerLicense::Ts5Server { properties }, pos)
}
LicenseBlockType::Ephemeral => (InnerLicense::Other, 0),
_ => {
return Err(Error::UnknownBlockType(typ_i));
}
};
Ok(License { len: BLOCK_MIN_LEN + extra_len, private_key: None, inner })
}
pub fn get_type(&self, data: &[u8]) -> Result<LicenseBlockType> {
let typ_i = data[BLOCK_TYPE_OFFSET];
LicenseBlockType::from_u8(typ_i).ok_or(Error::UnknownBlockType(typ_i))
}
pub fn get_public_key(&self, data: &[u8]) -> Result<EdwardsPoint> {
let k = EccKeyPubEd25519::from_bytes(data[1..33].try_into().unwrap());
k.0.decompress().ok_or(Error::InvalidPublicKey)
}
fn get_timestamp(&self, data: &[u8], block_offset: usize) -> Result<OffsetDateTime> {
let num: u32 = (&data[block_offset..]).read_be().map_err(Error::Deserialize)?;
OffsetDateTime::from_unix_timestamp(i64::from(num) + TIMESTAMP_OFFSET)
.map_err(Error::DeserializeDate)
}
pub fn get_not_valid_before(&self, data: &[u8]) -> Result<OffsetDateTime> {
self.get_timestamp(data, BLOCK_NOT_VALID_BEFORE_OFFSET)
}
pub fn get_not_valid_after(&self, data: &[u8]) -> Result<OffsetDateTime> {
self.get_timestamp(data, BLOCK_NOT_VALID_AFTER_OFFSET)
}
pub fn get_license_type(&self, data: &[u8]) -> Result<LicenseType> {
if ![LicenseBlockType::Server, LicenseBlockType::Ts5Server].contains(&self.get_type(data)?)
{
return Err(Error::NoSuchProperty("license type"));
}
LicenseType::from_u8(data[BLOCK_MIN_LEN])
.ok_or(Error::UnknownLicenseType(data[BLOCK_MIN_LEN]))
}
pub fn get_issuer<'a>(&self, data: &'a [u8]) -> Result<&'a str> {
let typ = self.get_type(data)?;
if typ == LicenseBlockType::Ts5Server {
// Search in properties
let res = self
.get_properties(data)?
.find_map(|p| if let Ok(LicenseProperty::Issuer(i)) = p { Some(i) } else { None })
.ok_or(Error::NoSuchProperty("issuer"));
return res;
}
let offset = BLOCK_MIN_LEN
+ match typ {
LicenseBlockType::Intermediate => 4,
LicenseBlockType::Website | LicenseBlockType::Code => 0,
LicenseBlockType::Server => 5,
_ => return Err(Error::NoSuchProperty("issuer")),
};
let len = match typ {
LicenseBlockType::Intermediate
| LicenseBlockType::Website
| LicenseBlockType::Code
| LicenseBlockType::Server => self.len - offset - 1,
_ => return Err(Error::NoSuchProperty("issuer")),
};
str::from_utf8(&data[offset..offset + len]).map_err(Error::DeserializeString)
}
pub fn get_max_clients(&self, data: &[u8]) -> Result<u32> {
let typ = self.get_type(data)?;
if typ == LicenseBlockType::Ts5Server {
// Search in properties
let res = self
.get_properties(data)?
.find_map(
|p| if let Ok(LicenseProperty::MaxClients(i)) = p { Some(i) } else { None },
)
.ok_or(Error::NoSuchProperty("max clients"));
return res;
}
let offset = BLOCK_MIN_LEN
+ match typ {
LicenseBlockType::Intermediate => 0,
LicenseBlockType::Server => 1,
_ => return Err(Error::NoSuchProperty("max clients")),
};
(&data[offset..offset + 4]).read_be().map_err(Error::Deserialize)
}
pub fn get_properties<'a: 'b, 'b>(
&'b self, data: &'a [u8],
) -> Result<impl Iterator<Item = Result<LicenseProperty<'a>>> + 'b> {
if let InnerLicense::Ts5Server { properties } = &self.inner {
Ok(properties.iter().map(move |p| {
let o = BLOCK_MIN_LEN + 2 + p;
let len = data[o];
let len_usize = len as usize;
let id = data[o + 1];
let typ = data[o + 2];
// Check length
if let Some(expected) = match typ {
// String, check that it is null-terminated
0 => {
if data[o + len_usize] != 0 {
return Err(Error::NonterminatedString);
}
None
}
1 | 3 => Some(4),
2 | 4 => Some(8),
_ => None,
} {
if len != expected {
return Err(Error::WrongPropertyLength { id, typ, expected, actual: len });
}
}
match id {
1 => {
if typ != 1 {
return Err(Error::WrongPropertyType { id, expected: 1, actual: typ });
}
Ok(LicenseProperty::Unknown1(
(&data[o + 3..o + 3 + 4]).read_be().map_err(Error::Deserialize)?,
))
}
2 => {
if typ != 0 {
return Err(Error::WrongPropertyType { id, expected: 0, actual: typ });
}
let s = str::from_utf8(&data[o + 3..o + len_usize])
.map_err(Error::DeserializeString)?;
Ok(LicenseProperty::Issuer(s))
}
3 => {
if typ != 1 {
return Err(Error::WrongPropertyType { id, expected: 1, actual: typ });
}
Ok(LicenseProperty::MaxClients(
(&data[o + 3..o + 3 + 4]).read_be().map_err(Error::Deserialize)?,
))
}
_ => {
Ok(LicenseProperty::Unknown { id, typ, data: &data[o + 3..o + len_usize] })
}
}
}))
} else {
Err(Error::NoSuchProperty("properties"))
}
}
/// Get the private key from the hash of this license block.
pub fn get_hash_key(&self, data: &[u8]) -> Scalar {
// Make a valid private key from the hash
let mut hash_key = Sha512::digest(&data[1..]);
hash_key[0] &= 248;
hash_key[31] &= 63;
hash_key[31] |= 64;
Scalar::from_bytes_mod_order(hash_key.as_slice()[..32].try_into().unwrap())
}
pub fn derive_public_key(
&self, data: &[u8], parent_key: &EdwardsPoint,
) -> Result<EdwardsPoint> {
let pub_key = self.get_public_key(data)?;
let hash_key = self.get_hash_key(data);
Ok(pub_key * hash_key + parent_key)
}
/// Derive the private key of this license block.
///
/// The key which is stored inside this license has to be a private key.
///
/// # Arguments
///
/// `parent_key`: The resulting private key of the previous block.
pub fn derive_private_key(
&self, data: &[u8], parent_key: &EccKeyPrivEd25519,
) -> Result<EccKeyPrivEd25519> {
let priv_key = if let Some(k) = &self.private_key {
&k.0
} else {
return Err(Error::NoPrivateKey);
};
let hash_key = self.get_hash_key(data);
Ok(EccKeyPrivEd25519(priv_key * hash_key + parent_key.0))
}
/// Check if this is a valid license.
///
/// Does some sanity checking on fields. Does not need to be called for correctness.
pub fn is_valid(&self, data: &[u8]) -> Result<()> {
self.get_public_key(data)?;
self.get_not_valid_before(data)?;
self.get_not_valid_after(data)?;
if let Err(e) = self.get_license_type(data) {
if !matches!(e, Error::NoSuchProperty(_)) {
return Err(e);
}
}
if let Err(e) = self.get_issuer(data) {
if !matches!(e, Error::NoSuchProperty(_)) {
return Err(e);
}
}
match self.get_properties(data) {
Ok(ps) => {
for p in ps {
p?;
}
}
Err(e) => {
if !matches!(e, Error::NoSuchProperty(_)) {
return Err(e);
}
}
}
Ok(())
}
}
impl fmt::Debug for DebugLicense<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let from = self
.0
.get_not_valid_before(self.1)
.ok()
.and_then(|d| d.format(&Rfc3339).ok())
.unwrap_or_else(|| "error".to_string());
let to = self
.0
.get_not_valid_after(self.1)
.ok()
.and_then(|d| d.format(&Rfc3339).ok())
.unwrap_or_else(|| "error".to_string());
let mut d = f.debug_struct("License");
let typ = self.0.get_type(self.1).unwrap();
d.field("type", &typ);
if let Some(key) = &self.0.private_key {
d.field("private_key", key);
}
d.field("not_valid_before", &from).field("not_valid_after", &to);
match typ {
LicenseBlockType::Intermediate => {
if let Ok(num) = self.0.get_max_clients(self.1) {
d.field("data", &num);
} else {
d.field("data", &"error");
}
}
LicenseBlockType::Website | LicenseBlockType::Code => {
if let Ok(issuer) = self.0.get_issuer(self.1) {
d.field("issuer", &issuer);
} else {
d.field("issuer", &"error");
}
}
LicenseBlockType::Server => {
if let Ok(license_type) = self.0.get_license_type(self.1) {
d.field("license_type", &license_type);
} else {
d.field("license_type", &"error");
}
if let Ok(issuer) = self.0.get_issuer(self.1) {
d.field("issuer", &issuer);
} else {
d.field("issuer", &"error");
}
if let Ok(max_clients) = self.0.get_max_clients(self.1) {
d.field("max_clients", &max_clients);
} else {
d.field("max_clients", &"error");
}
}
LicenseBlockType::Ts5Server => {
if let Ok(props) =
self.0.get_properties(self.1).and_then(|ps| ps.collect::<Result<Vec<_>>>())
{
d.field("properties", &props);
} else {
d.field("properties", &"error");
}
}
LicenseBlockType::Ephemeral => {}
}
d.finish()?;
Ok(())
}
}
impl LicenseBlockType {
fn min_len(&self) -> usize {
BLOCK_MIN_LEN
+ match self {
Self::Intermediate => 5,
Self::Website | Self::Code => 1,
Self::Server => 6,
Self::Ts5Server => 2,
Self::Ephemeral => 0,
}
}
}
impl<'a> LicenseBuilder<'a> {
pub fn add_block<'b>(&'b mut self, typ: LicenseBlockType) -> LicenseBlockBuilder<'a, 'b> {
let len = self.licenses.data.len();
self.licenses.data.resize(len + typ.min_len(), 0);
self.licenses.data[len + BLOCK_TYPE_OFFSET] = typ.to_u8().unwrap();
LicenseBlockBuilder { builder: self, offset: len, typ }
}
}
impl LicenseBlockBuilder<'_, '_> {
pub fn public_key(&mut self, key: &EccKeyPubEd25519) -> &mut Self {
let o = self.offset + 1;
self.builder.licenses.data[o..o + 32].copy_from_slice(&key.0.0);
self
}
fn write_timestamp(&mut self, offset: usize, time: OffsetDateTime) -> &mut Self {
let o = self.offset + offset;
Cursor::new(&mut self.builder.licenses.data[o..o + 4])
.write_be((time.unix_timestamp() - TIMESTAMP_OFFSET) as u32)
.unwrap();
self
}
pub fn not_valid_before(&mut self, time: OffsetDateTime) -> &mut Self {
self.write_timestamp(BLOCK_NOT_VALID_BEFORE_OFFSET, time)
}
pub fn not_valid_after(&mut self, time: OffsetDateTime) -> &mut Self {
self.write_timestamp(BLOCK_NOT_VALID_AFTER_OFFSET, time)
}
/// Only valid for Server and Ts5Server license types
pub fn server_license_type(&mut self, typ: LicenseType) -> &mut Self {
if ![LicenseBlockType::Server, LicenseBlockType::Ts5Server].contains(&self.typ) {
panic!(
"Setting the license type is only allowed for Server and Ts5Server license types, \
but this is a {:?}",
self.typ
);
}
self.builder.licenses.data[self.offset + BLOCK_MIN_LEN] = typ.to_u8().unwrap();
self
}
/// Add a property in a TS5 license block.
///
/// `len` is the length of the content, excluding id, type and length field.
/// Return the byte offset to where the contents of the propery starts.
fn ts5_add_property(&mut self, id: u8, typ: u8, len: usize) -> usize {
let o = self.builder.licenses.data.len();
self.builder.licenses.data.resize(o + len + 3, 0);
// Increment property count
self.builder.licenses.data[self.offset + BLOCK_MIN_LEN + 1] += 1;
self.builder.licenses.data[o] = u8::try_from(len).unwrap() + 2;
self.builder.licenses.data[o + 1] = id;
self.builder.licenses.data[o + 2] = typ;
o + 3
}
/// Only valid for Intermediate, Server and Ts5Server license types
pub fn max_clients(&mut self, max_clients: u32) -> &mut Self {
let o = if self.typ == LicenseBlockType::Ts5Server {
self.ts5_add_property(3, 1, 4)
} else {
self.offset
+ BLOCK_MIN_LEN
+ match self.typ {
LicenseBlockType::Intermediate => 0,
LicenseBlockType::Server => 1,
_ => panic!(
"Setting max clients is only allowed for Intermediate, Server and \
Ts5Server license types, but this is a {:?}",
self.typ
),
}
};
Cursor::new(&mut self.builder.licenses.data[o..o + 4]).write_be(max_clients).unwrap();
self
}
/// Only valid for Intermediate, Server and Ts5Server license types
pub fn issuer(&mut self, issuer: &str) -> &mut Self {
// Check that the issuer does not contain null bytes
assert!(
!issuer.contains('\0'),
"The issuer is written as a null-terminated string so it cannot contain null bytes"
);
let o = if self.typ == LicenseBlockType::Ts5Server {
self.ts5_add_property(2, 0, issuer.len() + 1)
} else {
if ![LicenseBlockType::Intermediate, LicenseBlockType::Server].contains(&self.typ) {
panic!(
"Setting the issuer is only allowed for Intermediate, Server and Ts5Server \
license types, but this is a {:?}",
self.typ
);
}
// Check that no issuer was written so far
assert!(
self.builder.licenses.data.len() - self.offset == self.typ.min_len(),
"Only a single issuer can be written into a license block"
);
let o = self.builder.licenses.data.len();
self.builder.licenses.data.resize(o + issuer.len(), 0);
o - 1
};
self.builder.licenses.data[o..o + issuer.len()].copy_from_slice(issuer.as_bytes());
self
}
}
#[cfg(test)]
mod tests {
use super::*;
use base64::prelude::*;
#[test]
fn parse_standard_license() {
Licenses::parse_ignore_expired(BASE64_STANDARD.decode("AQA1hUFJiiSs0wFXkYuPUJVcDa6XCrZTcsvkB0\
Ffzz4CmwIITRXgCqeTYAcAAAAgQW5vbnltb3VzAACiIBip9hQaK6P3QhwOJs/BkPn0i\
oyIDPaNgzJ6M8x0kiAJf4hxCYAxMQ==").unwrap()).unwrap();
}
#[test]
fn parse_standard_license_expired() {
assert!(Licenses::parse(BASE64_STANDARD.decode("AQA1hUFJiiSs0wFXkYuPUJVcDa6XCrZTcsvkB0\
Ffzz4CmwIITRXgCqeTYAcAAAAgQW5vbnltb3VzAACiIBip9hQaK6P3QhwOJs/BkPn0i\
oyIDPaNgzJ6M8x0kiAJf4hxCYAxMQ==").unwrap()).is_err());
}
#[test]
fn parse_aal_license() {
Licenses::parse_ignore_expired(BASE64_STANDARD.decode("AQCvbHFTQDY/terPeilrp/ECU9xCH5U3xC92lY\
TNaY/0KQAJFueAazbsgAAAACVUZWFtU3BlYWsgU3lzdGVtcyBHbWJIAABhl9gwla/UJ\
p2Eszst9TRVXO/PeE6a6d+CTI6Pg7OEVgAJc5CrL4Nh8gAAACRUZWFtU3BlYWsgc3lz\
dGVtcyBHbWJIAACvTQIgpv6zmLZq3znh7ygmOSokGFkFjz4bTigrOnetrgIJdIIACdS\
/gAYAAAAAU29zc2VuU3lzdGVtcy5iaWQAADY7+uV1CQ1niOvYSdGzsu83kPTNWijovr\
3B78eHGeePIAm98vQJvpu0").unwrap()).unwrap();
}
#[test]
fn parse_ts5_server_license_long() {
// Slightly older format having a block with a Ts5 license block in it
Licenses::parse_ignore_expired(BASE64_STANDARD.decode("AQDVsMGbcrMmGif1vSXPWWXNW2CB5\
Fe9oZ/2uxP29j1EXQAQSfiAazbsgAAAASVUZWFtU3BlYWsgU3lzdGVtcyBHbWJIAAALB6Qfbe\
JyN+9foJhe+/KPFwyU+i++4MAA0q1/WCnizwARRuEPN1aeBQAAASBUZWFtU3BlYWsgc3lzdGV\
tcyBHbWJIAADrhbI5gUR3thsS7FqKV5P5h7djnwMSJfF2vi58lm1VcwgRUFMAE0P7gAUCGQIA\
VGVhbVNwZWFrIFN5c3RlbXMgR21iSAAGAwEAAAAFAAf2KhQ7WLjOvwwY0Bi7LxAcWmQeT+LQt\
uaOzjhYoA+YIBGNq1kRjlQZ").unwrap()).unwrap();
}
#[test]
fn parse_ts5_server_license_long2() {
// Also slightly older, has 3 properties
Licenses::parse_ignore_expired(BASE64_STANDARD.decode("AQDVsMGbcrMmGif1vSXPWWXNW2CB5\
Fe9oZ/2uxP29j1EXQAQSfiAazbsgAAAASVUZWFtU3BlYWsgU3lzdGVtcyBHbWJIAAAtXG5p2n\
iXlDfpVAGuD88w8hetKYL4vqHRkB5xB8ASRwAR2t/MN+ttjAAAASBUZWFtU3BlYWsgc3lzdGV\
tcyBHbWJIAAAdZYGtwkeZFhzqnoV1uk+Tcphe8GgcqiPVtELF9y4wOAgR4qmAF4jnAAkDGQIA\
VGVhbVNwZWFrIFN5c3RlbXMgR21iSAAGAwEAAAAFBgEBAAGGoADzyFvD+9G6uhIxmh0jK+Uo8\
z8fYGJVH81vWFULDS0l8yATKe4cEyqW3A==").unwrap()).unwrap();
}
#[test]
fn parse_ts5_server_license_single_block_with_issuer() {
Licenses::parse_ignore_expired(
BASE64_STANDARD
.decode(
"AQBgjAAqtcBUrw5futTtkl3+EM3OW4Lal6OTPlwuv4xV/\
gIRFlEAG0NlAAcAAAAgQW5vbnltb3VzAACKNY+/\
9qCbonCSxG18vBb7y7zPIgDdjTmcZoAHHclnJSATPa69Ez5XfQ==",
)
.unwrap(),
)
.unwrap();
}
#[test]
fn parse_ts5_server_license_single_block() {
// Only a single server block and the ephemeral block
Licenses::parse_ignore_expired(BASE64_STANDARD.decode("AQAuio9ZxThXKE+hmzQyzBRedysp9\
79JBTv2xP3s2oCkiAgQI70AE+YkAAcBBgMBAAAABQBoazM313063zaipPTH06zrXc91ch3huB\
YrUET9sEbz1CATKgK8EyqrfA==").unwrap()).unwrap();
}
#[test]
fn derive_public_key() {
let licenses = Licenses::parse_ignore_expired(BASE64_STANDARD.decode("AQA1hUFJiiSs0wFXkYuPUJVcDa6XCrZTcsvkB0Ffzz4CmwIITRXgCqeTYAcAAAAgQW5vbnltb3VzAAC4R+5mos+UQ/KCbkpQLMI5WRp4wkQu8e5PZY4zU+/FlyAJwaE8CcJJ/A==").unwrap()).unwrap();
let derived_key =
licenses.derive_public_key(EccKeyPubEd25519::from_bytes(crate::ROOT_KEY)).unwrap();
let expected_key = [
0x40, 0xe9, 0x50, 0xc4, 0x61, 0xba, 0x18, 0x3a, 0x1e, 0xb7, 0xcb, 0xb1, 0x9a, 0xc3,
0xd8, 0xd9, 0xc4, 0xd5, 0x24, 0xdb, 0x38, 0xf7, 0x2d, 0x3d, 0x66, 0x75, 0x77, 0x2a,
0xc5, 0x9c, 0xc5, 0xc6,
];
let derived_key = derived_key.compress().0;
assert_eq!(derived_key, expected_key);
}
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/src/utils.rs | tsproto/src/utils.rs | use std::net::IpAddr;
use crate::{Error, Result};
/// Try to approximate the not stabilized ip.is_global().
pub fn is_global_ip(ip: &IpAddr) -> bool {
if !ip.is_unspecified() && !ip.is_loopback() && !ip.is_multicast() {
match *ip {
IpAddr::V4(ref ip) => !ip.is_broadcast() && !ip.is_link_local() && !ip.is_private(),
IpAddr::V6(_) => true,
}
} else {
false
}
}
pub fn read_hex(s: &str) -> Result<Vec<u8>> {
// Detect hex format
if s.chars().nth(2) == Some(':') {
// Wireshark
Ok(s.split(':')
.map(|s| u8::from_str_radix(s, 16))
.collect::<::std::result::Result<Vec<_>, _>>()
.map_err(Error::InvalidHex)?)
} else if s.chars().nth(2) == Some(' ') {
// Wireshark
Ok(s.split(' ')
.map(|s| u8::from_str_radix(s, 16))
.collect::<::std::result::Result<Vec<_>, _>>()
.map_err(Error::InvalidHex)?)
} else if s.starts_with("0x") {
let s: String = s.chars().filter(|c| !c.is_whitespace()).collect();
// Own dumps
Ok(s[2..]
.split(',')
.map(|s| u8::from_str_radix(s.trim_start_matches("0x"), 16))
.collect::<::std::result::Result<Vec<_>, _>>()
.map_err(Error::InvalidHex)?)
} else {
let s: String = s.chars().filter(|c| !c.is_whitespace()).collect();
Ok(s.as_bytes()
.chunks(2)
.map(|s| u8::from_str_radix(::std::str::from_utf8(s).unwrap(), 16))
.collect::<::std::result::Result<Vec<_>, _>>()
.map_err(Error::InvalidHex)?)
}
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/src/resend.rs | tsproto/src/resend.rs | use std::cmp::{min, Ord, Ordering};
use std::collections::{BTreeMap, BinaryHeap};
use std::convert::From;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::ops::{Add, Sub};
use std::pin::Pin;
use std::task::{Context, Poll};
use futures::prelude::*;
use num_traits::ToPrimitive;
use pin_project_lite::pin_project;
use tokio::time::{Duration, Instant, Sleep};
use tracing::{info, trace, warn};
use tsproto_packets::packets::*;
use crate::connection::{Connection, StreamItem};
use crate::{Error, Result, UDP_SINK_CAPACITY};
// TODO implement fast retransmit: 2 Acks received but earlier packet not acked -> retransmit
// TODO implement slow start and redo slow start when send window reaches 1, also reset all tries then
// Use cubic for congestion control: https://en.wikipedia.org/wiki/CUBIC_TCP
// But scaling with number of sent packets instead of time because we might not
// send packets that often.
/// Congestion windows gets down to 0.3*w_max for BETA=0.7
const BETA: f32 = 0.7;
/// Increase over w_max after roughly 5 packets (C=0.2 needs seven packets).
const C: f32 = 0.5;
/// Store that many pings, if all of them get lost, it is a timeout.
const PING_COUNT: usize = 30;
/// Duration in seconds between sending two ping packets.
const PING_SECONDS: u64 = 1;
/// Duration in seconds between resetting the statistic counters.
const STAT_SECONDS: u64 = 1;
/// Size of an UDP header in bytes.
const UDP_HEADER_SIZE: u32 = 8;
/// Size of an IPv4 header in bytes.
const IPV4_HEADER_SIZE: u32 = 20;
// TODO Use correct IPv6 header size to compute bandwidth
// Size of an IPv6 header in bytes.
//const IPV6_HEADER_SIZE: u32 = 40;
/// Events to inform a resender of the current state of a connection.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
pub enum ResenderState {
/// The connection is starting, reduce the timeout time.
Connecting,
/// The handshake is completed, this is the normal operation mode.
Connected,
/// The connection is tearing down, reduce the timeout time.
Disconnecting,
/// The connection is gone, we only send ack packets.
Disconnected,
}
#[derive(Clone, Copy, Default, Eq, Hash, PartialEq)]
pub struct PartialPacketId {
pub generation_id: u32,
pub packet_id: u16,
}
#[derive(Clone, Copy, Eq, Hash, PartialEq)]
pub struct PacketId {
pub packet_type: PacketType,
pub part: PartialPacketId,
}
#[derive(Clone, Debug)]
pub struct SendRecordId {
/// The last time when the packet was sent.
pub last: Instant,
/// How often the packet was already resent.
pub tries: usize,
id: PacketId,
}
/// A record of a packet that can be resent.
#[derive(Debug)]
struct SendRecord {
/// When this packet was originally sent.
pub sent: Instant,
pub id: SendRecordId,
pub packet: OutUdpPacket,
}
#[derive(Debug)]
struct Ping {
id: PartialPacketId,
/// When the ping packet was sent.
sent: Instant,
}
/// Enum for different collected packet statistics.
#[derive(Clone, Copy, Debug)]
pub enum PacketStat {
/// Commands and acks
InControl,
/// Ping and Pong
InKeepalive,
/// Voice
InSpeech,
/// Commands and acks
OutControl,
/// Ping and Pong
OutKeepalive,
/// Voice
OutSpeech,
}
/// Metrics collected to compute packet loss.
///
/// The count of incoming packets includes packets that were lost. It is computed by the difference
/// of packet ids.
///
/// Packet losses that are impossible to track: Outgoing voice, incoming commands, outgoing pongs.
#[derive(Clone, Copy, Debug)]
enum PacketLossStat {
/// Count of incoming voice packets.
VoiceInCount,
/// Count of packet ids that we should have received but did not get.
VoiceInLost,
/// Count of outgoing acks.
AckOutCount,
/// Count of already acked commands that were received.
AckOutLost,
/// Count of incoming ack packets.
AckInCount,
/// Count of packet ids that we should have received but did not get.
AckInLost,
/// Count of command packets that were resent.
///
/// If a command packet needs to be resent, either the command or the ack was lost.
AckInOrCommandOutLost,
/// Count of outgoing command packets.
CommandOutCount,
/// Count of incoming pong packets.
PongInCount,
/// Count of packet ids that we should have received but did not get.
PongInLost,
// Count of ping packets for which we did not get an answer.
//
// If no pong is received for a ping, either the ping or the pong was lost.
// We do not handle many pings, so just use PingOutCount - PongInCount.
//PongInOrPingOutLost,
/// Count of outgoing pings.
PingOutCount,
/// Count of incoming pings.
PingInCount,
/// Count of packet ids that we should have received but did not get.
PingInLost,
}
/// Network statistics of a connection.
#[derive(Clone)]
pub struct ConnectionStats {
/// Non-smoothed Round Trip Time.
pub rtt: Duration,
/// Deviation of the rtt.
pub rtt_dev: Duration,
/// Total count of packets since start of the connection. Indexed by `PacketStat`.
pub total_packets: [u64; 6],
/// Total count of bytes since start of the connection. Indexed by `PacketStat`.
pub total_bytes: [u64; 6],
/// Stats collected to compute packet loss. Indexed by `PacketLossStat`.
packet_loss: [u32; 13],
/// Bytes in the last second. Indexed by `PacketStat`.
///
/// For the last 60 seconds, so the per minute stats can be computed.
pub last_second_bytes: [[u32; 6]; 60],
/// The index in `last_second_bytes` that will be written next.
next_second_index: u8,
}
/// An intermediate struct to collect network statistics.
///
/// Collected for one second and reset after moving them to `ConnectionStats`.
#[derive(Clone, Debug, Default)]
struct ConnectionStatsCounter {
bytes: [u32; 6],
/// Stats collected to compute packet loss. Indexed by `PacketLossStat`.
packet_loss: [u32; 13],
}
pin_project! {
/// Timers are extra because they need to be pinned.
#[derive(Debug)]
struct Timers {
// The future to wake us up when the next packet should be resent.
#[pin]
timeout: Sleep,
// The timer used for sending ping packets.
#[pin]
ping_timeout: Sleep,
// The timer used for disconnecting the connection.
#[pin]
state_timeout: Sleep,
// The timer used to update network statistics.
#[pin]
stats_timeout: Sleep,
}
}
/// Resend command and init packets until the other side acknowledges them.
#[derive(Debug)]
pub struct Resender {
/// Send queue ordered by when a packet has to be sent.
///
/// The maximum in this queue is the next packet that should be resent.
/// This is a part of `full_send_queue`.
send_queue: BinaryHeap<SendRecordId>,
/// Send queue ordered by packet id.
///
/// There is one queue per packet type: `Init`, `Command` and `CommandLow`.
full_send_queue: [BTreeMap<PartialPacketId, SendRecord>; 3],
/// All packets with an id less than this index id are currently in the
/// `send_queue`. Packets with an id greater or equal to this index are not
/// in the send queue.
send_queue_indices: [PartialPacketId; 3],
config: ResendConfig,
state: ResenderState,
/// A list of the last sent pings that were not yet acknowledged.
last_pings: Vec<Ping>,
/// In progress counters.
stats_counter: ConnectionStatsCounter,
/// Current up to date statistics.
pub stats: ConnectionStats,
// Congestion control
/// The maximum send window before the last reduction.
w_max: u16,
/// The time when the last packet loss occurred.
///
/// This is not necessarily the accurate time, but the duration until
/// now/no_congestion_since is accurate.
last_loss: Instant,
/// The send queue was never full since this time. We use this to not
/// increase the send window in this case.
no_congestion_since: Option<Instant>,
/// When the last packet was added to the send queue or received.
///
/// This is used to decide when to send ping packets.
last_receive: Instant,
/// When the last packet was added to the send queue.
///
/// This is used to handle timeouts when disconnecting.
last_send: Instant,
/// When the statistics were last reset.
last_stat: Instant,
timers: Pin<Box<Timers>>,
}
#[derive(Clone, Debug)]
pub struct ResendConfig {
// Close the connection after no packet is received for this duration.
pub connecting_timeout: Duration,
pub normal_timeout: Duration,
pub disconnect_timeout: Duration,
/// Smoothed Round Trip Time.
pub srtt: Duration,
/// Deviation of the srtt.
pub srtt_dev: Duration,
}
impl Ord for PartialPacketId {
fn cmp(&self, other: &Self) -> Ordering {
self.generation_id
.cmp(&other.generation_id)
.then_with(|| self.packet_id.cmp(&other.packet_id))
}
}
impl PartialOrd for PartialPacketId {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) }
}
impl Add<u16> for PartialPacketId {
type Output = Self;
fn add(self, rhs: u16) -> Self::Output {
let (packet_id, next_gen) = self.packet_id.overflowing_add(rhs);
Self {
generation_id: if next_gen {
self.generation_id.wrapping_add(1)
} else {
self.generation_id
},
packet_id,
}
}
}
impl Sub<u16> for PartialPacketId {
type Output = Self;
fn sub(self, rhs: u16) -> Self::Output {
let (packet_id, last_gen) = self.packet_id.overflowing_sub(rhs);
Self {
generation_id: if last_gen {
self.generation_id.wrapping_sub(1)
} else {
self.generation_id
},
packet_id,
}
}
}
impl Sub<PartialPacketId> for PartialPacketId {
type Output = i64;
fn sub(self, rhs: Self) -> Self::Output {
let gen_diff = i64::from(self.generation_id) - i64::from(rhs.generation_id);
let id_diff = i64::from(self.packet_id) - i64::from(rhs.packet_id);
gen_diff * i64::from(u16::MAX) + id_diff
}
}
impl PartialOrd for PacketId {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
if self.packet_type == other.packet_type { Some(self.part.cmp(&other.part)) } else { None }
}
}
impl From<&OutUdpPacket> for PacketId {
fn from(packet: &OutUdpPacket) -> Self {
Self {
packet_type: packet.packet_type(),
part: PartialPacketId {
generation_id: packet.generation_id(),
packet_id: packet.packet_id(),
},
}
}
}
impl fmt::Debug for PacketId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}{:?}", self.packet_type, self.part)?;
Ok(())
}
}
impl fmt::Debug for PartialPacketId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({:x}:{:x})", self.generation_id, self.packet_id)?;
Ok(())
}
}
impl Ord for SendRecordId {
fn cmp(&self, other: &Self) -> Ordering {
// If the packet was not already sent, it is more important
if self.tries == 0 {
if other.tries != 0 {
return Ordering::Greater;
}
} else if other.tries == 0 {
return Ordering::Less;
}
// The smallest time is the most important time
self.last.cmp(&other.last).reverse().then_with(||
// Else, the lower packet id is more important
self.id.part.cmp(&other.id.part).reverse())
}
}
impl PartialOrd for SendRecordId {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) }
}
impl PartialEq for SendRecordId {
fn eq(&self, other: &Self) -> bool { self.id.eq(&other.id) }
}
impl Eq for SendRecordId {}
impl Hash for SendRecordId {
fn hash<H: Hasher>(&self, state: &mut H) { self.id.hash(state); }
}
impl Default for Resender {
fn default() -> Self {
let now = Instant::now();
Self {
send_queue: Default::default(),
full_send_queue: Default::default(),
send_queue_indices: Default::default(),
config: Default::default(),
state: ResenderState::Connecting,
last_pings: Default::default(),
stats: Default::default(),
stats_counter: Default::default(),
w_max: UDP_SINK_CAPACITY as u16,
last_loss: now,
no_congestion_since: Some(now),
last_receive: now,
last_send: now,
last_stat: now,
timers: Box::pin(Timers {
timeout: tokio::time::sleep(std::time::Duration::from_secs(1)),
ping_timeout: tokio::time::sleep(std::time::Duration::from_secs(PING_SECONDS)),
state_timeout: tokio::time::sleep(std::time::Duration::from_secs(1)),
stats_timeout: tokio::time::sleep(std::time::Duration::from_secs(STAT_SECONDS)),
}),
}
}
}
impl Resender {
fn packet_type_to_index(t: PacketType) -> usize {
match t {
PacketType::Init => 0,
PacketType::Command => 1,
PacketType::CommandLow => 2,
_ => panic!("Resender cannot handle packet type {:?}", t),
}
}
pub fn ack_packet(con: &mut Connection, cx: &mut Context, p_type: PacketType, p_id: u16) {
if p_type == PacketType::Ping {
Self::ack_ping(con, p_id);
return;
}
// Remove from ordered queue
let queue = &mut con.resender.full_send_queue[Self::packet_type_to_index(p_type)];
let mut queue_iter = queue.iter();
if let Some((first, _)) = queue_iter.next() {
let id = if first.packet_id == p_id {
let p_id = if let Some((_, rec2)) = queue_iter.next() {
// Ack all until the next packet
rec2.id.id.part
} else if p_type == PacketType::Init {
// Ack the current packet
PartialPacketId { generation_id: 0, packet_id: p_id + 1 }
} else {
// Ack all until the next packet to send
con.codec.outgoing_p_ids[p_type.to_usize().unwrap()]
};
let id = PacketId { packet_type: p_type, part: p_id - 1 };
con.stream_items.push_back(StreamItem::AckPacket(id));
*first
} else {
PartialPacketId {
generation_id: if p_id < first.packet_id {
first.generation_id.wrapping_add(1)
} else {
first.generation_id
},
packet_id: p_id,
}
};
if let Some(rec) = queue.remove(&id) {
// Update srtt if the packet was not resent
if rec.id.tries == 1 {
let now = Instant::now();
con.resender.update_srtt(now - rec.sent);
}
// Notify the waker that we can send another packet from the
// send queue.
cx.waker().wake_by_ref();
}
}
}
pub fn ack_ping(con: &mut Connection, p_id: u16) {
if let Ok(i) = con.resender.last_pings.binary_search_by_key(&p_id, |p| p.id.packet_id) {
let ping = con.resender.last_pings.remove(i);
let now = Instant::now();
con.resender.update_srtt(now - ping.sent);
}
}
pub fn received_packet(&mut self) { self.last_receive = Instant::now(); }
pub fn handle_loss_incoming(&mut self, packet: &InPacket, in_recv_win: bool, cur_next: u16) {
let p_type = packet.header().packet_type();
let id = packet.header().packet_id();
let id_diff = u32::from(id.wrapping_sub(cur_next));
let p_stat = PacketStat::from(p_type, true);
let len = (packet.header().data().len() + packet.content().len()) as u32
+ UDP_HEADER_SIZE
+ IPV4_HEADER_SIZE;
self.stats_counter.bytes[p_stat as usize] += len;
self.stats.total_packets[p_stat as usize] += 1;
self.stats.total_bytes[p_stat as usize] += u64::from(len);
let stats;
if p_type.is_voice() {
stats = Some((PacketLossStat::VoiceInCount, PacketLossStat::VoiceInLost));
} else if p_type.is_ack() {
stats = Some((PacketLossStat::AckInCount, PacketLossStat::AckInLost));
} else if p_type == PacketType::Pong {
stats = Some((PacketLossStat::PongInCount, PacketLossStat::PongInLost));
} else if p_type == PacketType::Ping {
stats = Some((PacketLossStat::PingInCount, PacketLossStat::PingInLost));
} else {
stats = None;
}
if let Some((count, lost)) = stats {
if in_recv_win {
self.stats_counter.packet_loss[count as usize] += id_diff + 1;
self.stats_counter.packet_loss[lost as usize] += id_diff;
} else {
self.stats_counter.packet_loss[lost as usize] =
self.stats_counter.packet_loss[lost as usize].saturating_sub(1);
}
}
}
pub fn handle_loss_outgoing(&mut self, packet: &OutUdpPacket) {
let p_type = packet.data().header().packet_type();
let p_stat = PacketStat::from(p_type, false);
// 8 byte UDP header
let len = packet.data().data().len() as u32 + UDP_HEADER_SIZE + IPV4_HEADER_SIZE;
self.stats_counter.handle_loss_outgoing(packet, p_stat, len);
self.stats.handle_loss_outgoing(p_stat, u64::from(len));
}
pub fn handle_loss_resend_ack(&mut self) {
self.stats_counter.packet_loss[PacketLossStat::AckOutLost as usize] += 1;
}
fn get_timeout(&self) -> Duration {
match self.state {
ResenderState::Connecting => self.config.connecting_timeout,
ResenderState::Disconnecting | ResenderState::Disconnected => {
self.config.disconnect_timeout
}
ResenderState::Connected => self.config.normal_timeout,
}
}
/// Inform the resender of state changes of the connection.
pub fn set_state(&mut self, state: ResenderState) {
info!(from = ?self.state, to = ?state, "Resender: Changed state");
self.state = state;
self.last_send = Instant::now();
let new_timeout = self.last_send + self.get_timeout();
let timers = self.timers.as_mut().project();
timers.state_timeout.reset(new_timeout);
}
pub fn get_state(&self) -> ResenderState { self.state }
/// If the send queue is full if it reached the congestion window size or
/// it contains packets that were not yet sent once.
pub fn is_full(&self) -> bool { self.full_send_queue.len() >= self.get_window() as usize }
/// If the send queue is empty.
pub fn is_empty(&self) -> bool { self.full_send_queue.iter().all(|q| q.is_empty()) }
/// Take the first packets from `to_send_ordered` and put them into
/// `to_send`.
///
/// This is done on packet loss, when the send queue is rebuilt.
fn rebuild_send_queue(&mut self) {
self.send_queue.clear();
self.send_queue_indices = Default::default();
self.fill_up_send_queue();
}
/// Fill up to the send window size.
fn fill_up_send_queue(&mut self) {
let get_skip_closure = |i: usize| {
let start = self.send_queue_indices[i];
move |r: &&SendRecord| r.id.id.part < start
};
let mut iters = [
self.full_send_queue[0].values().skip_while(get_skip_closure(0)).peekable(),
self.full_send_queue[1].values().skip_while(get_skip_closure(1)).peekable(),
self.full_send_queue[2].values().skip_while(get_skip_closure(2)).peekable(),
];
for _ in self.send_queue.len()..(self.get_window() as usize) {
let mut max_i = None;
let mut min_time = None;
for (i, iter) in iters.iter_mut().enumerate() {
if let Some(rec) = iter.peek() {
if min_time.map(|t| t < rec.sent).unwrap_or(true) {
max_i = Some(i);
min_time = Some(rec.sent);
}
}
}
if let Some(max_i) = max_i {
let max = iters[max_i].next().unwrap().id.clone();
self.send_queue_indices[max_i] = max.id.part + 1;
self.send_queue.push(max);
} else {
if self.no_congestion_since.is_none() {
self.no_congestion_since = Some(Instant::now());
}
return;
}
}
if let Some(until) = self.no_congestion_since.take() {
self.last_loss = Instant::now() - (until - self.last_loss);
}
}
/// The amount of packets that can be in-flight concurrently.
///
/// The CUBIC congestion control window.
fn get_window(&self) -> u16 {
let time = self.no_congestion_since.unwrap_or_else(Instant::now) - self.last_loss;
let res = C
* (time.as_secs_f32() - (self.w_max as f32 * BETA / C).powf(1.0 / 3.0)).powf(3.0)
+ self.w_max as f32;
let max = u16::MAX / 2;
if res > max as f32 {
max
} else if res < 1.0 {
1
} else {
res as u16
}
}
/// Add another duration to the stored smoothed rtt.
fn update_srtt(&mut self, rtt: Duration) {
let diff =
if rtt > self.config.srtt { rtt - self.config.srtt } else { self.config.srtt - rtt };
self.config.srtt_dev = self.config.srtt_dev * 3 / 4 + diff / 4;
self.config.srtt = self.config.srtt * 7 / 8 + rtt / 8;
// Rtt (non-smoothed)
let diff = if rtt > self.stats.rtt { rtt - self.stats.rtt } else { self.stats.rtt - rtt };
self.stats.rtt_dev = self.stats.rtt_dev * 3 / 4 + diff / 4;
self.stats.rtt = self.stats.rtt / 2 + rtt / 2;
}
pub fn send_packet(con: &mut Connection, packet: OutUdpPacket) {
con.resender.last_send = Instant::now();
let rec = SendRecord {
sent: Instant::now(),
id: SendRecordId { last: Instant::now(), tries: 0, id: (&packet).into() },
packet,
};
trace!(record = ?rec, window = con.resender.get_window(), "Adding send record");
let i = Self::packet_type_to_index(rec.id.id.packet_type);
// Reduce index if necessary (needed e.g. for resending lower init packets)
if con.resender.send_queue_indices[i] > rec.id.id.part {
con.resender.send_queue_indices[i] = rec.id.id.part;
}
con.resender.full_send_queue[i].insert(rec.id.id.part, rec);
con.resender.fill_up_send_queue();
trace!(state = ?con.resender.send_queue, indices = ?con.resender.send_queue_indices, "After adding send record");
}
/// Returns an error if the timeout is exceeded and the connection is
/// considered dead or another unrecoverable error occurs.
pub fn poll_resend(con: &mut Connection, cx: &mut Context) -> Result<()> {
trace!(state = ?con.resender.send_queue, "Poll resend");
let timeout = con.resender.get_timeout();
// Send a packet at least every second
let max_send_rto = Duration::from_secs(1);
// Check if there are packets to send.
loop {
let now = Instant::now();
let window = con.resender.get_window();
// Retransmission timeout
let mut rto: Duration = con.resender.config.srtt + con.resender.config.srtt_dev * 4;
if rto > max_send_rto {
rto = max_send_rto;
}
let last_threshold = now - rto;
let mut rec = if let Some(rec) = con.resender.send_queue.peek_mut() {
rec
} else {
break;
};
trace!("Polling send record");
// Skip if not contained in full_send_queue. This happens when the
// packet was acknowledged.
let full_queue =
&mut con.resender.full_send_queue[Self::packet_type_to_index(rec.id.packet_type)];
let full_rec = if let Some(r) = full_queue.get_mut(&rec.id.part) {
r
} else {
drop(rec);
con.resender.send_queue.pop();
con.resender.fill_up_send_queue();
continue;
};
// Check if we should resend this packet or not
if rec.tries != 0 && rec.last > last_threshold {
// Schedule next send
let dur = rec.last - last_threshold;
let timers = con.resender.timers.as_mut().project();
timers.timeout.reset(now + dur);
let timers = con.resender.timers.as_mut().project();
if let Poll::Ready(()) = timers.timeout.poll(cx) {
continue;
}
break;
}
if now - full_rec.sent > timeout {
return Err(Error::Timeout("Packet was not acked"));
}
// Try to send this packet
trace!("Try sending packet");
match Connection::static_poll_send_udp_packet(
&*con.udp_socket,
con.address,
&con.event_listeners,
cx,
&full_rec.packet,
) {
Poll::Pending => break,
Poll::Ready(r) => {
r?;
if rec.tries != 0 {
let to_s = if con.is_client { "S" } else { "C" };
warn!(id = ?rec.id,
tries = rec.tries,
last = %format!("{:?} ago", now - rec.last),
to = to_s,
srtt = ?con.resender.config.srtt,
srtt_dev = ?con.resender.config.srtt_dev,
?rto,
send_window = window,
"Resend"
);
con.resender.stats_counter.handle_loss_resend_command();
}
// Successfully started sending the packet, update record
rec.last = now;
rec.tries += 1;
full_rec.id = rec.clone();
let p_type = full_rec.packet.data().header().packet_type();
let p_stat = PacketStat::from(p_type, false);
let len = full_rec.packet.data().data().len() as u32
+ UDP_HEADER_SIZE + IPV4_HEADER_SIZE;
con.resender.stats_counter.handle_loss_outgoing(&full_rec.packet, p_stat, len);
con.resender.stats.handle_loss_outgoing(p_stat, u64::from(len));
if rec.tries != 1 {
drop(rec);
// Double srtt on packet loss
con.resender.config.srtt *= 2;
if con.resender.config.srtt > timeout {
con.resender.config.srtt = timeout;
}
// Handle congestion window
con.resender.w_max = con.resender.get_window();
con.resender.last_loss = Instant::now();
con.resender.no_congestion_since = None;
con.resender.rebuild_send_queue();
} else {
drop(rec);
}
}
}
}
Ok(())
}
/// Returns an error if the timeout is exceeded and the connection is
/// considered dead or another unrecoverable error occurs.
pub fn poll_ping(con: &mut Connection, cx: &mut Context) -> Result<()> {
let now = Instant::now();
let timeout = con.resender.get_timeout();
if con.resender.state == ResenderState::Disconnecting {
if now - con.resender.last_send >= timeout {
return Err(Error::Timeout("No disconnect ack received"));
}
let timers = con.resender.timers.as_mut().project();
timers.state_timeout.reset(con.resender.last_send + timeout);
let timers = con.resender.timers.as_mut().project();
if let Poll::Ready(()) = timers.state_timeout.poll(cx) {
return Err(Error::Timeout("No disconnect ack received"));
}
}
// Update stats
loop {
let next_reset = con.resender.last_stat + Duration::from_secs(STAT_SECONDS);
let timers = con.resender.timers.as_mut().project();
timers.stats_timeout.reset(next_reset);
let timers = con.resender.timers.as_mut().project();
if let Poll::Ready(()) = timers.stats_timeout.poll(cx) {
// Reset stats
con.resender.last_stat = Instant::now();
let stats = &mut con.resender.stats;
let counter = &mut con.resender.stats_counter;
let second_index = stats.next_second_index;
stats.last_second_bytes[second_index as usize] = counter.bytes;
stats.next_second_index = (second_index + 1) % stats.last_second_bytes.len() as u8;
stats.packet_loss = counter.packet_loss;
counter.reset();
con.stream_items.push_back(StreamItem::NetworkStatsUpdated);
} else {
break;
}
}
if con.resender.get_state() == ResenderState::Connected
&& !con.resender.full_send_queue.iter().any(|q| !q.is_empty())
{
// Send pings if we are connected and there are no packets in the queue
loop {
let now = Instant::now();
let mut next_ping = con.resender.last_receive + Duration::from_secs(PING_SECONDS);
if let Some(p) = con.resender.last_pings.last() {
if p.sent > con.resender.last_receive {
next_ping = p.sent + Duration::from_secs(PING_SECONDS);
} else {
// We received a packet, clear the ping queue
con.resender.last_pings.clear();
}
}
let timers = con.resender.timers.as_mut().project();
timers.ping_timeout.reset(next_ping);
let timers = con.resender.timers.as_mut().project();
if let Poll::Ready(()) = timers.ping_timeout.poll(cx) {
// Check for timeouts
if con.resender.last_pings.len() >= PING_COUNT {
let diff = con.resender.last_pings.last().unwrap().id
- con.resender.last_pings.first().unwrap().id;
// We did not receive a pong in between the last pings
if diff as usize == con.resender.last_pings.len() - 1 {
return Err(Error::Timeout("Server did not respond to pings"));
}
}
// Send ping
let dir = if con.is_client { Direction::C2S } else { Direction::S2C };
let packet = OutPacket::new_with_dir(dir, Flags::empty(), PacketType::Ping);
let p_id = con.send_packet(packet)?;
con.resender.last_pings.push(Ping { id: p_id.part, sent: now });
if con.resender.last_pings.len() > PING_COUNT {
con.resender.last_pings.remove(0);
}
} else {
break;
}
}
}
Ok(())
}
}
impl ConnectionStatsCounter {
fn handle_loss_outgoing(&mut self, packet: &OutUdpPacket, p_stat: PacketStat, len: u32) {
let p_type = packet.data().header().packet_type();
self.bytes[p_stat as usize] += len;
if p_type.is_ack() {
self.packet_loss[PacketLossStat::AckOutCount as usize] += 1;
} else if p_type == PacketType::Ping {
self.packet_loss[PacketLossStat::PingOutCount as usize] += 1;
} else if p_type.is_command() {
self.packet_loss[PacketLossStat::CommandOutCount as usize] += 1;
}
}
pub(crate) fn handle_loss_resend_command(&mut self) {
self.packet_loss[PacketLossStat::AckInOrCommandOutLost as usize] += 1;
}
fn reset(&mut self) { *self = Default::default(); }
}
impl ConnectionStats {
fn handle_loss_outgoing(&mut self, p_stat: PacketStat, len: u64) {
self.total_packets[p_stat as usize] += 1;
self.total_bytes[p_stat as usize] += len;
}
pub fn get_last_second_bytes(&self) -> &[u32; 6] {
&self.last_second_bytes[((self.next_second_index + 60 - 1) % 60) as usize]
}
pub fn get_last_minute_bytes(&self) -> [u64; 6] {
let mut bytes = [0; 6];
for bs in &self.last_second_bytes[..] {
for (i, b) in bs.iter().enumerate() {
bytes[i] += u64::from(*b);
}
}
bytes
}
// Packetloss stats for TeamSpeak
pub fn get_packetloss_s2c_speech(&self) -> f32 {
let count = self.packet_loss[PacketLossStat::VoiceInCount as usize];
if count == 0 {
0.0
} else {
self.packet_loss[PacketLossStat::VoiceInLost as usize] as f32 / count as f32
}
}
pub fn get_packetloss_s2c_keepalive(&self) -> f32 {
let count = self.packet_loss[PacketLossStat::PingInCount as usize]
+ self.packet_loss[PacketLossStat::PongInCount as usize];
if count == 0 {
0.0
} else {
(self.packet_loss[PacketLossStat::PingInLost as usize] as f32
+ self.packet_loss[PacketLossStat::PongInLost as usize] as f32)
/ count as f32
}
}
pub fn get_packetloss_s2c_control(&self) -> f32 {
let count = self.packet_loss[PacketLossStat::AckInCount as usize];
if count == 0 {
0.0
} else {
self.packet_loss[PacketLossStat::AckInLost as usize] as f32 / count as f32
}
}
pub fn get_packetloss_s2c_total(&self) -> f32 {
let count = self.packet_loss[PacketLossStat::VoiceInCount as usize]
+ self.packet_loss[PacketLossStat::PingInCount as usize]
+ self.packet_loss[PacketLossStat::PongInCount as usize]
+ self.packet_loss[PacketLossStat::AckInCount as usize];
if count == 0 {
0.0
} else {
(self.packet_loss[PacketLossStat::VoiceInLost as usize] as f32
+ self.packet_loss[PacketLossStat::PingInLost as usize] as f32
+ self.packet_loss[PacketLossStat::PongInLost as usize] as f32
+ self.packet_loss[PacketLossStat::AckInLost as usize] as f32)
/ count as f32
}
}
/// Compute the average incoming and outgoing packet loss.
pub fn get_packetloss(&self) -> f32 {
let count = self.packet_loss[PacketLossStat::VoiceInCount as usize]
+ self.packet_loss[PacketLossStat::PingInCount as usize]
+ self.packet_loss[PacketLossStat::PongInCount as usize]
+ self.packet_loss[PacketLossStat::AckInCount as usize]
+ self.packet_loss[PacketLossStat::AckOutCount as usize]
+ self.packet_loss[PacketLossStat::CommandOutCount as usize];
// Do not use ping count, it is unreliable
if count == 0 {
0.0
} else {
let command_out_lost = min(
self.packet_loss[PacketLossStat::AckInOrCommandOutLost as usize]
.saturating_sub(self.packet_loss[PacketLossStat::AckInLost as usize]),
self.packet_loss[PacketLossStat::CommandOutCount as usize],
);
(self.packet_loss[PacketLossStat::VoiceInLost as usize] as f32
+ self.packet_loss[PacketLossStat::PingInLost as usize] as f32
+ self.packet_loss[PacketLossStat::PongInLost as usize] as f32
+ self.packet_loss[PacketLossStat::AckInLost as usize] as f32
+ self.packet_loss[PacketLossStat::AckOutLost as usize] as f32
+ command_out_lost as f32)
/ count as f32
}
}
}
impl Default for ResendConfig {
fn default() -> Self {
Self {
connecting_timeout: Duration::from_secs(5),
normal_timeout: Duration::from_secs(30),
disconnect_timeout: Duration::from_secs(5),
srtt: Duration::from_millis(500),
srtt_dev: Duration::from_millis(0),
}
}
}
impl Default for ConnectionStats {
fn default() -> Self {
Self {
rtt: Duration::from_millis(0),
rtt_dev: Duration::from_millis(0),
total_packets: Default::default(),
total_bytes: Default::default(),
packet_loss: Default::default(),
last_second_bytes: [Default::default(); 60],
next_second_index: Default::default(),
}
}
}
impl fmt::Debug for ConnectionStats {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ConnectionStats")
.field("rtt", &self.rtt)
.field("rtt_dev", &self.rtt_dev)
.field("total_packets", &self.total_packets)
.field("total_bytes", &self.total_bytes)
.field("packet_loss", &self.packet_loss)
.field("last_second_bytes", self.get_last_second_bytes())
.field("next_second_index", &self.next_second_index)
.finish()
}
}
impl PacketStat {
fn from(t: PacketType, incoming: bool) -> Self {
match t {
PacketType::Voice | PacketType::VoiceWhisper => {
if incoming {
PacketStat::InSpeech
} else {
PacketStat::OutSpeech
}
}
PacketType::Command
| PacketType::CommandLow
| PacketType::Ack
| PacketType::AckLow
| PacketType::Init => {
if incoming {
PacketStat::InControl
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | true |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/src/packet_codec.rs | tsproto/src/packet_codec.rs | use std::io::Cursor;
use std::task::Context;
use num_traits::ToPrimitive;
use omnom::WriteExt;
use tracing::warn;
use tsproto_packets::packets::*;
use crate::algorithms as algs;
use crate::connection::{Connection, Event, StreamItem};
use crate::resend::{PartialPacketId, Resender};
use crate::{Error, Result, MAX_FRAGMENTS_LENGTH, MAX_QUEUE_LEN};
/// Encodes outgoing packets.
///
/// This part does the compression, encryption and fragmentation.
#[derive(Clone, Debug, Default)]
pub struct PacketCodec {
/// The next packet id that should be sent.
///
/// This list is indexed by the `PacketType`, `PacketType::Init` is an
/// invalid index.
pub outgoing_p_ids: [PartialPacketId; 8],
/// Used for incoming out-of-order packets.
///
/// Only used for `Command` and `CommandLow` packets.
pub receive_queue: [Vec<Vec<u8>>; 2],
/// Used for incoming fragmented packets.
///
/// Contains the accumulated data from fragmented packets, the header of the
/// first packet is at the beginning, all other headers are stripped away.
/// Only used for `Command` and `CommandLow` packets.
pub fragmented_queue: [Option<Vec<u8>>; 2],
/// The next packet id that is expected.
///
/// Works like the `outgoing_p_ids`.
pub incoming_p_ids: [PartialPacketId; 8],
}
impl PacketCodec {
/// Handle a packet for a specific connection.
///
/// This part does the defragmentation, decryption and decompression.
pub fn handle_udp_packet(
con: &mut Connection, cx: &mut Context, mut packet_data: Vec<u8>,
) -> Result<()> {
let mut ack = false;
let dir = if con.is_client { Direction::S2C } else { Direction::C2S };
let packet = InPacket::new(dir, &packet_data);
let p_type = packet.header().packet_type();
let type_i = p_type.to_usize().unwrap();
let id = packet.header().packet_id();
let (in_recv_win, gen_id, cur_next, limit) = con.in_receive_window(p_type, id);
con.resender.handle_loss_incoming(&packet, in_recv_win, cur_next);
if let Some(params) = &con.params {
if p_type == PacketType::Init {
con.stream_items.push_back(StreamItem::Error(Error::UnexpectedInitPacket));
return Ok(());
}
if !con.is_client {
let c_id = packet.header().client_id().unwrap();
// Accept any client id for the first few acks
let is_first_ack = gen_id == 0 && id <= 3 && p_type == PacketType::Ack;
if c_id != params.c_id && !is_first_ack {
con.stream_items.push_back(StreamItem::Error(Error::WrongClientId(c_id)));
return Ok(());
}
}
}
// Ignore range for acks and audio packets
if [PacketType::Ack, PacketType::AckLow, PacketType::Voice, PacketType::VoiceWhisper]
.contains(&p_type)
|| in_recv_win
{
if !packet.header().flags().contains(Flags::UNENCRYPTED) {
if p_type == PacketType::Ack && id == 1 && con.is_client {
// This is the ack packet for the clientinit, we take the
// initserver as ack instead.
return Ok(());
}
// If it is the first ack packet of a client, try to fake
// decrypt it.
let new_content = if (p_type == PacketType::Ack && id <= 1 && con.is_client)
|| con.params.is_none()
{
match algs::decrypt_fake(&packet).or_else(|_| {
if let Some(params) = &mut con.params {
// Decrypt the packet
algs::decrypt(&packet, gen_id, ¶ms.shared_iv, &mut params.key_cache)
} else {
// Failed to fake decrypt the packet
Err(Error::WrongMac { p_type, generation_id: gen_id, packet_id: id })
}
}) {
Ok(r) => r,
Err(e) => {
con.stream_items.push_back(StreamItem::Error(e));
return Ok(());
}
}
} else if let Some(params) = &mut con.params {
// Decrypt the packet
match algs::decrypt(&packet, gen_id, ¶ms.shared_iv, &mut params.key_cache) {
Ok(r) => r,
Err(e) => {
con.stream_items.push_back(StreamItem::Error(e));
return Ok(());
}
}
} else {
// Failed to fake decrypt the packet
con.stream_items.push_back(StreamItem::Error(Error::WrongMac {
p_type,
generation_id: gen_id,
packet_id: id,
}));
return Ok(());
};
let start = packet.header().data().len();
packet_data[start..].copy_from_slice(&new_content);
} else if algs::must_encrypt(p_type) {
// Check if it is ok for the packet to be unencrypted
con.stream_items.push_back(StreamItem::Error(Error::UnallowedUnencryptedPacket));
return Ok(());
}
let packet = InPacket::new(dir, &packet_data);
match p_type {
PacketType::Command | PacketType::CommandLow => {
ack = true;
let commands = Self::handle_command_packet(con, packet_data)?;
// Be careful with command packets, they are guaranteed to
// be in the right order now.
for c in commands {
// Send again
let packet = InPacket::new(dir, &c);
let event = Event::ReceivePacket(&packet);
con.send_event(&event);
let item = match InCommandBuf::try_new(dir, c) {
Ok(c) => {
// initivexpand2 is the ack for the last init packet
if con.is_client
&& c.data().packet().content().starts_with(b"initivexpand2 ")
{
Resender::ack_packet(con, cx, PacketType::Init, 4);
} else if con.is_client
&& c.data().packet().content().starts_with(b"initserver ")
{
// initserver acks clientinit
Resender::ack_packet(con, cx, PacketType::Command, 2);
} else if !con.is_client
&& c.data().packet().content().starts_with(b"clientek ")
{
// clientek acks initivexpand2
Resender::ack_packet(con, cx, PacketType::Command, 0);
}
StreamItem::Command(c)
}
Err(e) => return Err(Error::PacketParse("command", e)),
};
con.stream_items.push_back(item);
}
}
_ => {
if p_type == PacketType::Ping {
ack = true;
}
// Update packet ids
let in_ids = &mut con.codec.incoming_p_ids;
if p_type != PacketType::Init {
let (id, next_gen) = id.overflowing_add(1);
let generation_id = if next_gen { gen_id + 1 } else { gen_id };
in_ids[type_i] = PartialPacketId { generation_id, packet_id: id };
}
match packet.ack_packet() {
Ok(Some(ack_id)) => {
// Remove command packet from send queue if the fitting ack is received.
let p_type = if p_type == PacketType::Ack {
PacketType::Command
} else if p_type == PacketType::AckLow {
PacketType::CommandLow
} else if p_type == PacketType::Pong {
PacketType::Ping
} else {
p_type
};
Resender::ack_packet(con, cx, p_type, ack_id);
}
Ok(None) => {}
Err(e) => {
con.stream_items.push_back(StreamItem::Error(Error::CreateAck(e)));
return Ok(());
}
}
// Send event after handling acks
let event = Event::ReceivePacket(&packet);
con.send_event(&event);
if p_type.is_voice() {
con.stream_items.push_back(match InAudioBuf::try_new(dir, packet_data) {
Ok(r) => StreamItem::Audio(r),
Err(e) => StreamItem::Error(Error::PacketParse("audio", e)),
});
} else if p_type == PacketType::Init {
con.stream_items.push_back(if con.is_client {
match InS2CInitBuf::try_new(dir, packet_data) {
Ok(r) => StreamItem::S2CInit(r),
Err(e) => StreamItem::Error(Error::PacketParse("s2cinit", e)),
}
} else {
match InC2SInitBuf::try_new(dir, packet_data) {
Ok(r) => StreamItem::C2SInit(r),
Err(e) => StreamItem::Error(Error::PacketParse("c2sinit", e)),
}
});
}
}
}
} else {
// Send an ack for the case when it was lost
if p_type == PacketType::Command || p_type == PacketType::CommandLow {
ack = true;
con.resender.handle_loss_resend_ack();
}
con.stream_items.push_back(StreamItem::Error(Error::NotInReceiveWindow {
id,
next: cur_next,
limit,
p_type,
}));
}
// Send ack
if ack {
con.send_ack_packet(cx, OutAck::new(dir.reverse(), p_type, id))?;
}
Ok(())
}
/// Handle `Command` and `CommandLow` packets.
///
/// They have to be handled in the right order.
fn handle_command_packet(
con: &mut Connection, mut packet_data: Vec<u8>,
) -> Result<Vec<Vec<u8>>> {
let dir = if con.is_client { Direction::S2C } else { Direction::C2S };
let mut packet = InPacket::new(dir, &packet_data);
let header = packet.header();
let p_type = header.packet_type();
let mut id = header.packet_id();
let type_i = p_type.to_usize().unwrap();
let cmd_i = if p_type == PacketType::Command { 0 } else { 1 };
let r_queue = &mut con.codec.receive_queue[cmd_i];
let frag_queue = &mut con.codec.fragmented_queue[cmd_i];
let in_ids = &mut con.codec.incoming_p_ids[type_i];
let cur_next = in_ids.packet_id;
if cur_next == id {
// In order
let mut packets = Vec::new();
loop {
// Update next packet id
*in_ids = *in_ids + 1;
let flags = packet.header().flags();
let res_packet = if flags.contains(Flags::FRAGMENTED) {
if let Some(mut frag_queue) = frag_queue.take() {
// Last fragmented packet
frag_queue.extend_from_slice(packet.content());
let header = InPacket::new(dir, &frag_queue);
// Decompress
if header.header().flags().contains(Flags::COMPRESSED) {
let decompressed = quicklz::decompress(
&mut Cursor::new(header.content()),
crate::MAX_DECOMPRESSED_SIZE,
)
.map_err(Error::DecompressPacket)?;
let start = header.header().data().len();
frag_queue.truncate(start);
frag_queue.extend_from_slice(&decompressed);
}
Some(frag_queue)
} else {
// Enqueue
*frag_queue = Some(packet_data);
None
}
} else if let Some(frag_queue) = frag_queue {
// The packet is fragmented
if frag_queue.len() < MAX_FRAGMENTS_LENGTH {
frag_queue.extend_from_slice(packet.content());
None
} else {
return Err(Error::MaxLengthExceeded("fragment queue"));
}
} else {
// Decompress
if flags.contains(Flags::COMPRESSED) {
let decompressed = quicklz::decompress(
&mut Cursor::new(packet.content()),
crate::MAX_DECOMPRESSED_SIZE,
)
.map_err(Error::DecompressPacket)?;
let start = packet.header().data().len();
packet_data.truncate(start);
packet_data.extend_from_slice(&decompressed);
};
Some(packet_data)
};
if let Some(p) = res_packet {
packets.push(p);
}
// Check if there are following packets in the receive queue.
id = id.wrapping_add(1);
if let Some(pos) =
r_queue.iter().position(|p| InHeader::new(dir, p).packet_id() == id)
{
packet_data = r_queue.remove(pos);
packet = InPacket::new(dir, &packet_data);
} else {
break;
}
}
Ok(packets)
} else {
// Out of order
warn!(got = id, expected = cur_next, "Out of order command packet");
let (limit, next_gen) = cur_next.overflowing_add(MAX_QUEUE_LEN);
if (!next_gen && id >= cur_next && id < limit)
|| (next_gen && (id >= cur_next || id < limit))
{
r_queue.push(packet_data);
Ok(vec![])
} else {
Err(Error::MaxLengthExceeded("command queue"))
}
}
}
pub fn encode_packet(con: &mut Connection, mut packet: OutPacket) -> Result<Vec<OutUdpPacket>> {
let p_type = packet.header().packet_type();
let type_i = p_type.to_usize().unwrap();
// TODO Needed, commands should set their own flag?
if (p_type == PacketType::Command || p_type == PacketType::CommandLow) && con.is_client {
// Set newprotocol flag
packet.flags(packet.header().flags() | Flags::NEWPROTOCOL);
}
let p_id = if p_type == PacketType::Init {
PartialPacketId { generation_id: 0, packet_id: 0 }
} else {
con.codec.outgoing_p_ids[type_i]
};
// We fake encrypt the first command packet of the server (id 0) and the
// first command packet of the client (id 1, clientek).
let mut fake_encrypt = p_type == PacketType::Command
&& p_id.generation_id == 0
&& ((!con.is_client && p_id.packet_id == 0)
|| (con.is_client && p_id.packet_id == 1 && {
// Test if it is a clientek packet
packet.content().starts_with(b"clientek")
}));
// Also fake encrypt the first ack of the client, which is the response
// for the initivexpand2 packet.
fake_encrypt |= con.is_client
&& p_type == PacketType::Ack
&& p_id.generation_id == 0
&& p_id.packet_id == 0;
// Get values from parameters
let should_encrypt;
let c_id;
if let Some(params) = con.params.as_mut() {
should_encrypt = algs::should_encrypt(p_type, params.voice_encryption);
c_id = params.c_id;
} else {
should_encrypt = algs::should_encrypt(p_type, false);
if should_encrypt {
fake_encrypt = true;
}
c_id = 0;
}
// Client id for clients
if con.is_client {
packet.client_id(c_id);
}
if !should_encrypt && !fake_encrypt {
packet.flags(packet.header().flags() | Flags::UNENCRYPTED);
if let Some(params) = con.params.as_mut() {
packet.mac().copy_from_slice(¶ms.shared_mac);
}
}
// Compress and split packet
let packets = if p_type == PacketType::Command || p_type == PacketType::CommandLow {
algs::compress_and_split(con.is_client, packet)
} else {
// Set the inner packet id for voice packets
if con.is_client && (p_type == PacketType::Voice || p_type == PacketType::VoiceWhisper)
{
(&mut packet.content_mut()[..2])
.write_be(con.codec.outgoing_p_ids[type_i].packet_id)
.unwrap();
}
vec![packet]
};
let packets = packets
.into_iter()
.map(|mut packet| -> Result<_> {
// Get packet id
let p_id = if p_type == PacketType::Init {
PartialPacketId { generation_id: 0, packet_id: 0 }
} else {
packet.packet_id(con.codec.outgoing_p_ids[type_i].packet_id);
con.codec.outgoing_p_ids[type_i]
};
// Encrypt if necessary
if fake_encrypt {
algs::encrypt_fake(&mut packet)?;
} else if should_encrypt {
// The params are set
let params = con.params.as_mut().unwrap();
algs::encrypt(
&mut packet,
p_id.generation_id,
¶ms.shared_iv,
&mut params.key_cache,
)?;
}
// Increment outgoing_p_ids
let p_id = p_id + 1;
if p_type != PacketType::Init {
con.codec.outgoing_p_ids[type_i] = p_id;
}
Ok(OutUdpPacket::new(p_id.generation_id, packet))
})
.collect::<Result<Vec<_>>>()?;
Ok(packets)
}
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/benches/connect.rs | tsproto/benches/connect.rs | use anyhow::{Error, Result};
use criterion::{criterion_group, criterion_main, Bencher, Criterion};
use once_cell::sync::Lazy;
use tracing::{info, warn};
use tsproto::client::Client;
use tsproto::connection::StreamItem;
use tsproto_packets::commands::CommandParser;
mod utils;
use crate::utils::*;
static TRACING: Lazy<()> = Lazy::new(|| tracing_subscriber::fmt().with_test_writer().init());
async fn wait_channellistfinished(con: &mut Client) -> Result<()> {
con.filter_items(|con, i| {
Ok(match i {
StreamItem::S2CInit(packet) => {
con.hand_back_buffer(packet.into_buffer());
None
}
StreamItem::C2SInit(packet) => {
con.hand_back_buffer(packet.into_buffer());
None
}
StreamItem::Command(packet) => {
let (name, _) = CommandParser::new(packet.data().packet().content());
if name == b"channellistfinished" { Some(()) } else { None }
}
StreamItem::Error(error) => {
warn!(%error, "Got connection error");
None
}
i => {
warn!(got = ?i, "Unexpected packet, waiting for channellistfinished");
None
}
})
})
.await?;
Ok(())
}
fn one_connect(b: &mut Bencher) {
Lazy::force(&TRACING);
let local_address = "127.0.0.1:0".parse().unwrap();
let address = "127.0.0.1:9987".parse().unwrap();
let rt = tokio::runtime::Runtime::new().unwrap();
b.iter(|| {
rt.block_on(async move {
// The TS server does not accept the 3rd reconnect from the same port
// so we create a new client for every connection.
let mut con = create_client(local_address, address, 0).await?;
connect(&mut con).await?;
info!("Connected");
// Wait until channellistfinished
wait_channellistfinished(&mut con).await?;
info!("Disconnecting");
disconnect(&mut con).await?;
Ok::<_, Error>(())
})
.unwrap();
});
}
fn bench_connect(c: &mut Criterion) { c.bench_function("connect", one_connect); }
criterion_group! {
name = benches;
config = Criterion::default().sample_size(20);
targets = bench_connect
}
criterion_main!(benches);
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/benches/license.rs | tsproto/benches/license.rs | use base64::prelude::*;
use criterion::{criterion_group, criterion_main, Bencher, Criterion};
use tsproto::license::Licenses;
use tsproto_types::crypto::EccKeyPubEd25519;
fn license_parse(b: &mut Bencher, license: Vec<u8>) {
b.iter(|| {
Licenses::parse_ignore_expired(license.clone()).unwrap();
});
}
fn license_derive_key(b: &mut Bencher, license: Vec<u8>) {
let licenses = Licenses::parse_ignore_expired(license).unwrap();
b.iter(|| {
let derived_key =
licenses.derive_public_key(EccKeyPubEd25519::from_bytes(tsproto::ROOT_KEY)).unwrap();
derived_key.compress().0
});
}
const STANDARD_LICENSE: &str = "AQA1hUFJiiSs0wFXkYuPUJVcDa6XCrZTcsvkB0\
Ffzz4CmwIITRXgCqeTYAcAAAAgQW5vbnltb3VzAACiIBip9hQaK6P3QhwOJs/BkPn0i\
oyIDPaNgzJ6M8x0kiAJf4hxCYAxMQ==";
const AAL_LICENSE: &str = "AQCvbHFTQDY/terPeilrp/ECU9xCH5U3xC92lY\
TNaY/0KQAJFueAazbsgAAAACVUZWFtU3BlYWsgU3lzdGVtcyBHbWJIAABhl9gwla/UJ\
p2Eszst9TRVXO/PeE6a6d+CTI6Pg7OEVgAJc5CrL4Nh8gAAACRUZWFtU3BlYWsgc3lz\
dGVtcyBHbWJIAACvTQIgpv6zmLZq3znh7ygmOSokGFkFjz4bTigrOnetrgIJdIIACdS\
/gAYAAAAAU29zc2VuU3lzdGVtcy5iaWQAADY7+uV1CQ1niOvYSdGzsu83kPTNWijovr\
3B78eHGeePIAm98vQJvpu0";
fn standard_license_parse(b: &mut Bencher) {
license_parse(b, BASE64_STANDARD.decode(STANDARD_LICENSE).unwrap());
}
fn aal_license_parse(b: &mut Bencher) {
license_parse(b, BASE64_STANDARD.decode(AAL_LICENSE).unwrap());
}
fn standard_license_derive_key(b: &mut Bencher) {
license_derive_key(b, BASE64_STANDARD.decode(STANDARD_LICENSE).unwrap());
}
fn aal_license_derive_key(b: &mut Bencher) {
license_derive_key(b, BASE64_STANDARD.decode(AAL_LICENSE).unwrap());
}
fn bench_license(c: &mut Criterion) {
c.bench_function("parse standard license", standard_license_parse);
c.bench_function("parse aal license", aal_license_parse);
c.bench_function("derive key standard license", standard_license_derive_key);
c.bench_function("derive key aal license", aal_license_derive_key);
}
criterion_group!(benches, bench_license);
criterion_main!(benches);
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/benches/modpow.rs | tsproto/benches/modpow.rs | use criterion::{criterion_group, criterion_main, Bencher, Criterion};
use num_bigint::BigUint;
use num_traits::One;
#[cfg(feature = "rug")]
use rug::Integer;
fn num_modpow(b: &mut Bencher) {
let n = "9387019355706217197639129234358945126657617361248696932841794255538327365072557602175160199263073329488914880215590036563068284078359088114486271428098753";
let x = "2148617454765635492758175407769288127281667975788420713054995716016550287184632946544163990319181591625774561067011999700977775946073267145316355582522577";
let level = 10_000;
let n = n.parse().unwrap();
let x: BigUint = x.parse().unwrap();
let mut e = BigUint::one();
e <<= level as usize;
b.iter(|| x.modpow(&e, &n));
}
#[cfg(feature = "rug")]
fn gmp_modpow(b: &mut Bencher) {
let n = "9387019355706217197639129234358945126657617361248696932841794255538327365072557602175160199263073329488914880215590036563068284078359088114486271428098753";
let x = "2148617454765635492758175407769288127281667975788420713054995716016550287184632946544163990319181591625774561067011999700977775946073267145316355582522577";
let level = 10_000;
let n: Integer = n.parse().unwrap();
let x: Integer = x.parse().unwrap();
let mut e = Integer::new();
e.set_bit(level, true);
b.iter(|| x.pow_mod_ref(&e, &n).unwrap());
}
fn bench_modpow(c: &mut Criterion) {
c.bench_function("num bigint", num_modpow);
#[cfg(feature = "rug")]
{
c.bench_function("gmp", gmp_modpow);
}
}
criterion_group!(benches, bench_modpow);
criterion_main!(benches);
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/benches/message.rs | tsproto/benches/message.rs | use anyhow::Error;
use criterion::{criterion_group, criterion_main, Bencher, Criterion};
use tracing::info;
use tsproto_packets::packets::*;
mod utils;
use crate::utils::*;
fn send_messages(b: &mut Bencher) {
create_logger(false);
let local_address = "127.0.0.1:0".parse().unwrap();
let address = "127.0.0.1:9987".parse().unwrap();
let rt = tokio::runtime::Runtime::new().unwrap();
let mut con = rt
.block_on(async move {
let mut con = create_client(local_address, address, 0).await?;
info!("Connecting");
connect(&mut con).await?;
Ok::<_, Error>(con)
})
.unwrap();
let mut i = 0;
let mut last_id = None;
b.iter(|| {
let text = format!("Hello {}", i);
let mut packet =
OutCommand::new(Direction::C2S, Flags::empty(), PacketType::Command, "sendtextmessage");
packet.write_arg("targetmode", &"3");
packet.write_arg("msg", &text);
i += 1;
rt.block_on(async {
con.wait_until_can_send().await.unwrap();
last_id = Some(con.send_packet(packet.into_packet()).unwrap());
});
});
rt.block_on(async move {
if let Some(id) = last_id {
info!("Waiting for {:?}", id);
con.wait_for_ack(id).await?;
}
tokio::select! {
_ = tokio::time::sleep(tokio::time::Duration::from_millis(50)) => {
}
_ = con.wait_disconnect() => {
anyhow::bail!("Disconnected");
}
};
info!("Disconnecting");
disconnect(&mut con).await
})
.unwrap();
}
fn bench_message(c: &mut Criterion) { c.bench_function("message", send_messages); }
criterion_group! {
name = benches;
config = Criterion::default().sample_size(200);
targets = bench_message
}
criterion_main!(benches);
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/benches/utils/mod.rs | tsproto/benches/utils/mod.rs | use std::net::SocketAddr;
use anyhow::Result;
use tokio::net::UdpSocket;
use tsproto::algorithms as algs;
use tsproto::client::Client;
use tsproto_packets::packets::*;
use tsproto_types::crypto::EccKeyPrivP256;
#[allow(dead_code)]
pub fn create_logger(to_file: bool) {
if to_file {
tracing_subscriber::fmt()
.with_writer(|| std::fs::OpenOptions::new().append(true).open("bench.log").unwrap())
.init();
} else {
tracing_subscriber::fmt::init();
}
}
pub async fn create_client(
local_address: SocketAddr, remote_address: SocketAddr, verbose: u8,
) -> Result<Client> {
// Get P-256 ECDH key
let private_key = EccKeyPrivP256::import_str(
"MG0DAgeAAgEgAiAIXJBlj1hQbaH0Eq0DuLlCmH8bl+veTAO2+\
k9EQjEYSgIgNnImcmKo7ls5mExb6skfK2Tw+u54aeDr0OP1ITsC/50CIA8M5nm\
DBnmDM/gZ//4AAAAAAAAAAAAAAAAAAAAZRzOI").unwrap();
let udp_socket = UdpSocket::bind(local_address).await?;
let mut con = Client::new(remote_address, Box::new(udp_socket), private_key);
if verbose >= 1 {
tsproto::log::add_logger(true, verbose > 1, verbose > 2, &mut con)
}
Ok(con)
}
/// Returns the `initserver` command.
pub async fn connect(con: &mut Client) -> Result<InCommandBuf> {
con.connect().await?;
// Send clientinit
let private_key = EccKeyPrivP256::import_str(
"MG0DAgeAAgEgAiAIXJBlj1hQbaH0Eq0DuLlCmH8bl+veTAO2+\
k9EQjEYSgIgNnImcmKo7ls5mExb6skfK2Tw+u54aeDr0OP1ITsC/50CIA8M5nm\
DBnmDM/gZ//4AAAAAAAAAAAAAAAAAAAAZRzOI").unwrap();
let private_key_as_pub = private_key.to_pub();
let offset = algs::hash_cash(&private_key_as_pub, 8);
// Create clientinit packet
let offset = offset.to_string();
let mut packet =
OutCommand::new(Direction::C2S, Flags::empty(), PacketType::Command, "clientinit");
for (arg, val) in [
("client_nickname", "Bot"),
("client_version", "3.?.? [Build: 5680278000]"),
("client_platform", "Linux"),
("client_input_hardware", "1"),
("client_output_hardware", "1"),
("client_default_channel", ""),
("client_default_channel_password", ""),
("client_server_password", ""),
("client_meta_data", ""),
(
"client_version_sign",
"Hjd+N58Gv3ENhoKmGYy2bNRBsNNgm5kpiaQWxOj5HN2DXttG6REjymSwJtpJ8muC2gSwRuZi0R+8Laan5ts5CQ==",
),
("client_nickname_phonetic", ""),
("client_key_offset", &offset),
("client_default_token", ""),
("client_badges", "Overwolf=0"),
(
"hwid",
"923f136fb1e22ae6ce95e60255529c00,\
d13231b1bc33edfecfb9169cc7a63bcc",
),
] {
packet.write_arg(arg, &val);
}
con.send_packet(packet.into_packet())?;
Ok(con
.filter_commands(|con, cmd| {
Ok(if cmd.data().packet().content().starts_with(b"initserver ") {
Some(cmd)
} else {
con.hand_back_buffer(cmd.into_buffer());
None
})
})
.await?)
}
pub async fn disconnect(con: &mut Client) -> Result<()> {
let mut packet =
OutCommand::new(Direction::C2S, Flags::empty(), PacketType::Command, "clientdisconnect");
for (arg, val) in [
// Reason: Disconnect
("reasonid", "8"),
("reasonmsg", "Bye"),
] {
packet.write_arg(arg, &val);
}
con.send_packet(packet.into_packet())?;
con.wait_disconnect().await?;
Ok(())
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/examples/test-ed25519.rs | tsproto/examples/test-ed25519.rs | use base64::prelude::*;
use clap::Parser;
use curve25519_dalek_ng::edwards::CompressedEdwardsY;
use curve25519_dalek_ng::scalar::Scalar;
#[derive(Parser, Debug)]
#[command(author, about)]
struct Args {
/// Public key
#[arg(short, long = "pub")]
pub_key: String,
/// Private key
#[arg(short, long = "priv")]
priv_key: String,
}
fn main() {
// Parse command line options
let args = Args::parse();
let pub_key = BASE64_STANDARD.decode(&args.pub_key).unwrap();
let priv_key = BASE64_STANDARD.decode(&args.priv_key).unwrap();
let mut pubk = [0; 32];
pubk.copy_from_slice(&pub_key);
let mut privk = [0; 32];
privk.copy_from_slice(&priv_key);
/*privk[0] &= 248;
privk[31] &= 63;
privk[31] |= 64;*/
let priv_scal = Scalar::from_bytes_mod_order(privk);
//let priv_scal = Scalar::from_bytes_mod_order(privk);
let pub_point_compr = CompressedEdwardsY(pubk);
let pub_point = -pub_point_compr.decompress().unwrap();
//let pub_point = curve25519_dalek::constants::ED25519_BASEPOINT_POINT;
let res = (pub_point * priv_scal).compress();
println!("Result: {}", BASE64_STANDARD.encode(&res.0));
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/examples/client.rs | tsproto/examples/client.rs | use std::net::SocketAddr;
use anyhow::{bail, Result};
use clap::Parser;
use tokio::time::{self, Duration};
use tracing::info;
use tsproto_packets::packets::*;
mod utils;
use crate::utils::*;
#[derive(Parser, Debug)]
#[command(author, about)]
struct Args {
/// The address of the server to connect to
#[arg(short, long, default_value = "127.0.0.1:9987")]
address: SocketAddr,
/// The listening address of the client
#[arg(long, default_value = "0.0.0.0:0")]
local_address: SocketAddr,
/// Print the content of all packets
///
/// 0. Print nothing
/// 1. Print command string
/// 2. Print packets
/// 3. Print udp packets
#[arg(short, long, action = clap::ArgAction::Count)]
verbose: u8,
}
#[tokio::main]
async fn main() -> Result<()> { real_main().await }
async fn real_main() -> Result<()> {
// Parse command line options
let args = Args::parse();
create_logger();
let mut con = create_client(args.local_address, args.address, args.verbose).await?;
// Connect
connect(&mut con).await?;
info!("Connected");
// Wait some time
tokio::select! {
_ = time::sleep(Duration::from_secs(2)) => {}
_ = con.wait_disconnect() => {
bail!("Disconnected");
}
};
info!("Waited");
// Send packet
let mut cmd =
OutCommand::new(Direction::C2S, Flags::empty(), PacketType::Command, "sendtextmessage");
cmd.write_arg("targetmode", &3);
cmd.write_arg("msg", &"Hello");
let id = con.send_packet(cmd.into_packet())?;
con.wait_for_ack(id).await?;
// Disconnect
disconnect(&mut con).await?;
info!("Disconnected");
Ok(())
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/examples/test-decrypt.rs | tsproto/examples/test-decrypt.rs | use std::io::Write;
use clap::Parser;
use tsproto::algorithms as algs;
use tsproto::utils;
use tsproto_packets::packets::*;
#[derive(Parser, Debug)]
#[command(author, about)]
struct Args {
/// Print backtrace
#[arg(short, long = "debug")]
debug: bool,
/// Server to client
#[arg(short, long = "client")]
c2s: bool,
/// Data (hex)
#[arg()]
data: String,
}
fn main() {
// Parse command line options
let args = Args::parse();
let dir = if args.c2s { Direction::C2S } else { Direction::S2C };
let data = utils::read_hex(&args.data).unwrap();
let packet = match InPacket::try_new(dir, &data) {
Ok(p) => p,
Err(e) => {
if args.debug {
println!("Failed to decode: {:?}", e);
} else {
println!("Failed to decode: {}", e);
}
return;
}
};
let decrypted = match algs::decrypt_fake(&packet) {
Ok(d) => d,
Err(e) => {
if args.debug {
println!("Failed to decrypt: {:?}", e);
} else {
println!("Failed to decrypt: {}", e);
}
return;
}
};
std::io::stdout().write_all(&decrypted).unwrap();
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/examples/decompress.rs | tsproto/examples/decompress.rs | use std::io::{Cursor, Write};
use clap::Parser;
use tsproto::utils;
#[derive(Parser, Debug)]
#[command(author, about)]
struct Args {
/// Data (hex)
#[arg()]
data: String,
}
fn main() {
// Parse command line options
let args = Args::parse();
let data = utils::read_hex(&args.data).unwrap();
let data = quicklz::decompress(&mut Cursor::new(data), std::u32::MAX).unwrap();
std::io::stdout().write_all(&data).unwrap();
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/examples/test-proofs.rs | tsproto/examples/test-proofs.rs | use base64::prelude::*;
use clap::Parser;
use tsproto_types::crypto::EccKeyPubP256;
#[derive(Parser, Debug)]
#[command(author, about)]
struct Args {
/// Public key
#[arg(short, long)]
key: String,
/// Data (base64)
#[arg(short, long)]
data: String,
/// Signature (base64)
#[arg(short, long)]
signature: String,
}
fn main() {
// Parse command line options
let args = Args::parse();
// l → proof
// ek || beta → proof
let data = BASE64_STANDARD.decode(&args.data).unwrap();
let signature = BASE64_STANDARD.decode(&args.signature).unwrap();
let key = EccKeyPubP256::from_ts(&args.key).unwrap();
/*let keyts = tomcrypt::P256EccKey::import(&BASE64_STANDARD.decode(&args.key).unwrap())
.unwrap();
let res = keyts.verify_hash(&data, &signature).unwrap();
println!("Res: {:?}", res);*/
key.verify(&data, &signature).unwrap();
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/examples/parse-license.rs | tsproto/examples/parse-license.rs | use base64::prelude::*;
use clap::Parser;
use tsproto::license::*;
#[derive(Parser, Debug)]
#[command(author, about)]
struct Args {
/// The license data (base64)
#[arg()]
license: String,
}
fn main() {
// Parse command line options
let args = Args::parse();
let l = Licenses::parse(BASE64_STANDARD.decode(&args.license).unwrap()).unwrap();
println!("{:#?}", l);
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/examples/many-tsproto.rs | tsproto/examples/many-tsproto.rs | use std::net::SocketAddr;
use anyhow::Result;
use clap::Parser;
use futures::prelude::*;
use tokio::time::{self, Duration};
use tracing::info;
mod utils;
use crate::utils::*;
#[derive(Parser, Clone, Debug)]
#[command(author, about)]
struct Args {
/// The address of the server to connect to
#[arg(short, long, default_value = "127.0.0.1:9987")]
address: SocketAddr,
/// The listening address of the client
#[arg(long, default_value = "0.0.0.0:0")]
local_address: SocketAddr,
/// Print the content of all packets
///
/// 0. Print nothing
/// 1. Print command string
/// 2. Print packets
/// 3. Print udp packets
#[arg(short, long, action = clap::ArgAction::Count)]
verbose: u8,
/// How many connections
#[arg()]
count: usize,
}
#[tokio::main]
async fn main() -> Result<()> { real_main().await }
async fn real_main() -> Result<()> {
// Parse command line options
let args = Args::parse();
create_logger();
stream::iter(0..args.count)
.for_each_concurrent(None, |_| {
let args = args.clone();
tokio::spawn(async move {
let mut con =
create_client(args.local_address, args.address, args.verbose).await.unwrap();
// Connect
connect(&mut con).await.unwrap();
info!("Connected");
// Wait some time
tokio::select! {
_ = time::sleep(Duration::from_secs(2)) => {}
_ = con.wait_disconnect() => {
panic!("Disconnected");
}
};
info!("Waited");
// Disconnect
let _ = disconnect(&mut con).await;
info!("Disconnected");
})
.map(|_| ())
})
.await;
Ok(())
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/tsproto/examples/utils/mod.rs | tsproto/examples/utils/mod.rs | use std::net::SocketAddr;
use anyhow::Result;
use tokio::net::UdpSocket;
use tracing::{info, info_span};
use tsproto::algorithms as algs;
use tsproto::client::Client;
use tsproto_packets::packets::*;
use tsproto_types::crypto::EccKeyPrivP256;
pub fn create_logger() { tracing_subscriber::fmt::init(); }
pub async fn create_client(
local_address: SocketAddr, remote_address: SocketAddr, verbose: u8,
) -> Result<Client> {
// Get P-256 ECDH key
let private_key = EccKeyPrivP256::import_str(
"MG0DAgeAAgEgAiAIXJBlj1hQbaH0Eq0DuLlCmH8bl+veTAO2+\
k9EQjEYSgIgNnImcmKo7ls5mExb6skfK2Tw+u54aeDr0OP1ITsC/50CIA8M5nm\
DBnmDM/gZ//4AAAAAAAAAAAAAAAAAAAAZRzOI").unwrap();
let udp_socket = UdpSocket::bind(local_address).await?;
let mut con = Client::new(remote_address, Box::new(udp_socket), private_key);
if verbose >= 1 {
tsproto::log::add_logger_with_verbosity(verbose, &mut con)
}
Ok(con)
}
/// Returns the `initserver` command.
pub async fn connect(con: &mut Client) -> Result<InCommandBuf> {
con.connect().await?;
// Send clientinit
let private_key = EccKeyPrivP256::import_str(
"MG0DAgeAAgEgAiAIXJBlj1hQbaH0Eq0DuLlCmH8bl+veTAO2+\
k9EQjEYSgIgNnImcmKo7ls5mExb6skfK2Tw+u54aeDr0OP1ITsC/50CIA8M5nm\
DBnmDM/gZ//4AAAAAAAAAAAAAAAAAAAAZRzOI").unwrap();
// Compute hash cash
let offset;
{
let _span = info_span!("Compute public key hash cash level").entered();
let private_key_as_pub = private_key.to_pub();
offset = algs::hash_cash(&private_key_as_pub, 8);
let omega = private_key_as_pub.to_ts();
info!(
level = algs::get_hash_cash_level(&omega, offset),
offset, "Computed hash cash level"
);
}
// Create clientinit packet
let offset = offset.to_string();
let mut cmd =
OutCommand::new(Direction::C2S, Flags::empty(), PacketType::Command, "clientinit");
cmd.write_arg("client_nickname", &"Bot");
cmd.write_arg("client_version", &"3.?.? [Build: 5680278000]");
cmd.write_arg("client_platform", &"Linux");
cmd.write_arg("client_input_hardware", &"1");
cmd.write_arg("client_output_hardware", &"1");
cmd.write_arg("client_default_channel", &"");
cmd.write_arg("client_default_channel_password", &"");
cmd.write_arg("client_server_password", &"");
cmd.write_arg("client_meta_data", &"");
cmd.write_arg(
"client_version_sign",
&"Hjd+N58Gv3ENhoKmGYy2bNRBsNNgm5kpiaQWxOj5HN2DXttG6REjymSwJtpJ8muC2gSwRuZi0R+8Laan5ts5CQ==",
);
cmd.write_arg("client_nickname_phonetic", &"");
cmd.write_arg("client_key_offset", &offset);
cmd.write_arg("client_default_token", &"");
cmd.write_arg("client_badges", &"Overwolf=0");
cmd.write_arg("hwid", &"923f136fb1e22ae6ce95e60255529c00,d13231b1bc33edfecfb9169cc7a63bcc");
con.send_packet(cmd.into_packet())?;
Ok(con
.filter_commands(|con, cmd| {
Ok(if cmd.data().packet().content().starts_with(b"initserver ") {
Some(cmd)
} else {
con.hand_back_buffer(cmd.into_buffer());
None
})
})
.await?)
}
pub async fn disconnect(con: &mut Client) -> Result<()> {
let mut cmd =
OutCommand::new(Direction::C2S, Flags::empty(), PacketType::Command, "clientdisconnect");
cmd.write_arg("reasonid", &8);
cmd.write_arg("reasonmsg", &"Bye");
con.send_packet(cmd.into_packet())?;
con.wait_disconnect().await?;
Ok(())
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/tsproto-packets/src/lib.rs | utils/tsproto-packets/src/lib.rs | //! `tsproto-packets` parses and serializes TeamSpeak packets and commands.
use std::fmt;
use thiserror::Error;
pub mod commands;
pub mod packets;
type Result<T, E = Error> = std::result::Result<T, E>;
pub const S2C_HEADER_LEN: usize = 11;
pub const C2S_HEADER_LEN: usize = 13;
#[derive(Error, Debug)]
#[non_exhaustive]
pub enum Error {
#[error(transparent)]
Base64(#[from] base64::DecodeError),
#[error(transparent)]
Io(#[from] std::io::Error),
#[error(transparent)]
ParseInt(#[from] std::num::ParseIntError),
#[error(transparent)]
Utf8(#[from] std::str::Utf8Error),
#[error(transparent)]
StringUtf8(#[from] std::string::FromUtf8Error),
#[error("Invalid init step {0}")]
InvalidInitStep(u8),
#[error("Invalid audio codec {0}")]
InvalidCodec(u8),
#[error("Packet content is too short (length {0})")]
PacketContentTooShort(usize),
#[error("Packet is too short (length {0})")]
PacketTooShort(usize),
#[error("Cannot parse command ({0})")]
ParseCommand(String),
#[error("Got a packet with unknown type ({0})")]
UnknownPacketType(u8),
#[error("Tried to parse a packet from the wrong direction")]
WrongDirection,
#[error("Wrong mac, expected TS3INIT1 but got {0:?}")]
WrongInitMac(Vec<u8>),
#[error("Wrong packet type ({0:?})")]
WrongPacketType(packets::PacketType),
}
pub struct HexSlice<'a, T: fmt::LowerHex + 'a>(pub &'a [T]);
impl<'a> fmt::Display for HexSlice<'a, u8> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Hex[")?;
if let Some((l, m)) = self.0.split_last() {
for b in m {
write!(f, "{:02x} ", b)?;
}
write!(f, "{:02x}", l)?;
}
write!(f, "]")
}
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/tsproto-packets/src/packets.rs | utils/tsproto-packets/src/packets.rs | #![allow(clippy::new_ret_no_self)]
use std::convert::TryInto;
use std::io::prelude::*;
use std::{fmt, io, str};
use base64::prelude::*;
use bitflags::bitflags;
use num_derive::{FromPrimitive, ToPrimitive};
use num_traits::{FromPrimitive as _, ToPrimitive as _};
use omnom::{ReadExt, WriteExt};
use serde::{Deserialize, Serialize};
use crate::{Error, HexSlice, Result};
#[derive(
Clone, Copy, Debug, Deserialize, Eq, FromPrimitive, Hash, PartialEq, ToPrimitive, Serialize,
)]
#[repr(u8)]
pub enum PacketType {
Voice,
VoiceWhisper,
Command,
CommandLow,
Ping,
Pong,
Ack,
AckLow,
Init,
}
#[derive(Clone, Copy, Deserialize, Debug, Eq, PartialEq, Hash, Serialize)]
pub enum Direction {
/// Going from the server to the client.
S2C,
/// Going from the client to the server.
C2S,
}
bitflags! {
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct Flags: u8 {
const UNENCRYPTED = 0x80;
const COMPRESSED = 0x40;
const NEWPROTOCOL = 0x20;
const FRAGMENTED = 0x10;
}
}
impl PacketType {
pub fn is_command(self) -> bool {
self == PacketType::Command || self == PacketType::CommandLow
}
pub fn is_ack(self) -> bool {
self == PacketType::Ack || self == PacketType::AckLow || self == PacketType::Pong
}
pub fn is_voice(self) -> bool { self == PacketType::Voice || self == PacketType::VoiceWhisper }
}
#[repr(u8)]
#[derive(Debug, PartialEq, Eq, Clone, Copy, FromPrimitive, ToPrimitive)]
pub enum CodecType {
/// Mono, 16 bit, 8 kHz, bitrate dependent on the quality setting
SpeexNarrowband,
/// Mono, 16 bit, 16 kHz, bitrate dependent on the quality setting
SpeexWideband,
/// Mono, 16 bit, 32 kHz, bitrate dependent on the quality setting
SpeexUltrawideband,
/// Mono, 16 bit, 48 kHz, bitrate dependent on the quality setting
CeltMono,
/// Mono, 16 bit, 48 kHz, bitrate dependent on the quality setting, optimized for voice
OpusVoice,
/// Stereo, 16 bit, 48 kHz, bitrate dependent on the quality setting, optimized for music
OpusMusic,
}
macro_rules! create_buf {
($($name:ident, $borrow_name:ident, $convert:ident);*) => {
mod rentals {
use super::*;
$(self_cell::self_cell!(
pub struct $name {
owner: Vec<u8>,
#[covariant]
dependent: $borrow_name,
}
impl {Debug}
);)*
}
$(
#[derive(Debug)]
pub struct $name(rentals::$name);
impl $name {
/// `InPacket::try_new` is not checked and must succeed. Otherwise, it
/// panics.
#[inline]
pub fn try_new(direction: Direction, data: Vec<u8>) -> Result<Self> {
Ok(Self(rentals::$name::try_new(data, |data| InPacket::new(direction, data).$convert())?))
}
#[inline]
pub fn raw_data(&self) -> &[u8] { self.0.borrow_owner().as_slice() }
#[inline]
pub fn data(&self) -> &$borrow_name { self.0.borrow_dependent() }
#[inline]
pub fn into_buffer(self) -> Vec<u8> { self.0.into_owner() }
}
)*
}
}
create_buf!(InAudioBuf, InAudio, into_audio;
InCommandBuf, InCommand, into_command;
InC2SInitBuf, InC2SInit, into_c2sinit;
InS2CInitBuf, InS2CInit, into_s2cinit);
/// Used for debugging.
#[derive(Clone)]
pub struct InUdpPacket<'a>(pub InPacket<'a>);
impl<'a> InUdpPacket<'a> {
pub fn new(packet: InPacket<'a>) -> Self { Self(packet) }
}
#[derive(Clone)]
pub struct InHeader<'a> {
direction: Direction,
data: &'a [u8],
}
#[derive(Clone)]
pub struct InPacket<'a> {
header: InHeader<'a>,
content: &'a [u8],
}
#[derive(Clone, Debug)]
pub struct InCommand<'a> {
packet: InPacket<'a>,
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Hash, Serialize)]
pub struct OutUdpPacket {
generation_id: u32,
data: OutPacket,
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Hash, Serialize)]
pub struct OutPacket {
dir: Direction,
data: Vec<u8>,
}
/// The mac has to be `b"TS3INIT1"`.
///
/// `version` always contains the Teamspeak version as timestamp.
///
/// `timestamp` contains a current timestamp.
#[derive(Clone)]
pub enum C2SInitData<'a> {
Init0 {
version: u32,
timestamp: u32,
random0: &'a [u8; 4],
},
Init2 {
version: u32,
random1: &'a [u8; 16],
random0_r: &'a [u8; 4],
},
Init4 {
version: u32,
x: &'a [u8; 64],
n: &'a [u8; 64],
level: u32,
random2: &'a [u8; 100],
/// y = x ^ (2 ^ level) % n
y: &'a [u8; 64],
/// Has to be a `clientinitiv alpha=… omega=…` command.
command: &'a [u8],
},
}
#[derive(Clone)]
pub enum S2CInitData<'a> {
Init1 { random1: &'a [u8; 16], random0_r: &'a [u8; 4] },
Init3 { x: &'a [u8; 64], n: &'a [u8; 64], level: u32, random2: &'a [u8; 100] },
Init127 {},
}
#[derive(Clone, Debug)]
pub struct InS2CInit<'a> {
packet: InPacket<'a>,
data: S2CInitData<'a>,
}
#[derive(Clone, Debug)]
pub struct InC2SInit<'a> {
packet: InPacket<'a>,
data: C2SInitData<'a>,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum AudioData<'a> {
C2S {
id: u16,
codec: CodecType,
data: &'a [u8],
},
C2SWhisper {
id: u16,
codec: CodecType,
channels: Vec<u64>,
clients: Vec<u16>,
data: &'a [u8],
},
/// When the `Flags::NEWPROTOCOL` is set.
C2SWhisperNew {
id: u16,
codec: CodecType,
whisper_type: u8,
target: u8,
target_id: u64,
data: &'a [u8],
},
S2C {
id: u16,
from: u16,
codec: CodecType,
data: &'a [u8],
},
S2CWhisper {
id: u16,
from: u16,
codec: CodecType,
data: &'a [u8],
},
}
#[derive(Clone)]
pub struct InAudio<'a> {
packet: InPacket<'a>,
data: AudioData<'a>,
}
#[must_use]
pub struct OutCommand(pub OutPacket);
pub struct OutC2SInit0;
pub struct OutC2SInit2;
pub struct OutC2SInit4;
pub struct OutS2CInit1;
pub struct OutS2CInit3;
pub struct OutAck;
pub struct OutAudio;
/// A helper to escape data while writing into a buffer.
struct EscapedWriter<'a>(&'a mut Vec<u8>);
impl Direction {
#[inline]
pub fn reverse(self) -> Self {
match self {
Direction::S2C => Direction::C2S,
Direction::C2S => Direction::S2C,
}
}
}
impl<'a> InPacket<'a> {
/// Do some sanity checks before creating the object.
#[inline]
pub fn try_new(direction: Direction, data: &'a [u8]) -> Result<Self> {
let header_len =
if direction == Direction::S2C { crate::S2C_HEADER_LEN } else { crate::C2S_HEADER_LEN };
if data.len() < header_len {
return Err(Error::PacketTooShort(data.len()));
}
// Check packet type
let p_type = data[header_len - 1] & 0xf;
if p_type > 8 {
return Err(Error::UnknownPacketType(p_type));
}
Ok(Self::new(direction, data))
}
/// This method expects that `data` holds a valid packet.
///
/// If not, further function calls may panic.
#[inline]
pub fn new(direction: Direction, data: &'a [u8]) -> Self {
let header = InHeader::new(direction, data);
let header_len = header.data.len();
Self { header, content: &data[header_len..] }
}
#[inline]
pub fn header(&self) -> &InHeader<'a> { &self.header }
#[inline]
pub fn content(&self) -> &[u8] { self.content }
/// Get the acknowledged packet id if this is an ack packet.
#[inline]
pub fn ack_packet(&self) -> Result<Option<u16>> {
let p_type = self.header().packet_type();
if p_type.is_ack() {
Ok(Some(
self.content()
.read_be()
.map_err(|_| Error::PacketContentTooShort(self.content().len()))?,
))
} else if p_type == PacketType::Init {
if self.header.direction == Direction::S2C {
Ok(Some(
self.content
.first()
.ok_or_else(|| Error::PacketContentTooShort(self.content().len()))
.and_then(|i| match u16::from(*i) {
1 => Ok(0),
3 => Ok(2),
127 => Ok(2), // Have to restart sending Init0, remove Init2 anyway
_ => Err(Error::InvalidInitStep(*i)),
})?,
))
} else {
Ok(self
.content
.get(4)
.ok_or_else(|| Error::PacketContentTooShort(self.content().len()))
.and_then(|i| match u16::from(*i) {
0 => Ok(None),
2 => Ok(Some(1)),
4 => Ok(Some(3)),
_ => Err(Error::InvalidInitStep(*i)),
})?)
}
} else {
Ok(None)
}
}
/// Parse this packet into a voice packet.
pub fn into_audio(self) -> Result<InAudio<'a>> {
let p_type = self.header().packet_type();
let newprotocol = self.header().flags().contains(Flags::NEWPROTOCOL);
let data = AudioData::parse(p_type, newprotocol, self.header.direction, self.content)?;
Ok(InAudio { packet: self, data })
}
/// Put this packet into a command packet.
pub fn into_command(self) -> Result<InCommand<'a>> { Ok(InCommand { packet: self }) }
pub fn into_s2cinit(self) -> Result<InS2CInit<'a>> {
if self.header.direction != Direction::S2C {
return Err(Error::WrongDirection);
}
let p_type = self.header().packet_type();
if p_type != PacketType::Init {
return Err(Error::WrongPacketType(p_type));
}
let mac = self.header().mac();
if mac != b"TS3INIT1" {
return Err(Error::WrongInitMac(mac.to_vec()));
}
if self.content.is_empty() {
return Err(Error::PacketContentTooShort(self.content.len()));
}
let data;
if self.content[0] == 1 {
if self.content.len() < 21 {
return Err(Error::PacketContentTooShort(self.content.len()));
}
data = S2CInitData::Init1 {
random1: (&self.content[1..17]).try_into().unwrap(),
random0_r: (&self.content[17..21]).try_into().unwrap(),
};
} else if self.content[0] == 3 {
if self.content.len() < 233 {
return Err(Error::PacketContentTooShort(self.content.len()));
}
data = S2CInitData::Init3 {
x: (&self.content[1..65]).try_into().unwrap(),
n: (&self.content[65..129]).try_into().unwrap(),
level: (&self.content[129..]).read_be()?,
random2: (&self.content[133..233]).try_into().unwrap(),
};
} else if self.content[0] == 127 {
data = S2CInitData::Init127 {};
} else {
return Err(Error::InvalidInitStep(self.content[0]));
}
Ok(InS2CInit { packet: self, data })
}
pub fn into_c2sinit(self) -> Result<InC2SInit<'a>> {
if self.header.direction != Direction::C2S {
return Err(Error::WrongDirection);
}
let p_type = self.header().packet_type();
if p_type != PacketType::Init {
return Err(Error::WrongPacketType(p_type));
}
let mac = self.header().mac();
if mac != b"TS3INIT1" {
return Err(Error::WrongInitMac(mac.to_vec()));
}
if self.content.len() < 5 {
return Err(Error::PacketContentTooShort(self.content.len()));
}
let data;
let version = (&self.content[0..]).read_be()?;
if self.content[4] == 0 {
if self.content.len() < 13 {
return Err(Error::PacketContentTooShort(self.content.len()));
}
data = C2SInitData::Init0 {
version,
timestamp: (&self.content[5..]).read_be()?,
random0: (&self.content[9..13]).try_into().unwrap(),
};
} else if self.content[4] == 2 {
if self.content.len() < 25 {
return Err(Error::PacketContentTooShort(self.content.len()));
}
data = C2SInitData::Init2 {
version,
random1: (&self.content[5..21]).try_into().unwrap(),
random0_r: (&self.content[21..25]).try_into().unwrap(),
};
} else if self.content[4] == 4 {
let len = 5 + 128 + 4 + 100 + 64;
if self.content.len() < len + 20 {
return Err(Error::PacketContentTooShort(self.content.len()));
}
data = C2SInitData::Init4 {
version,
x: (&self.content[5..69]).try_into().unwrap(),
n: (&self.content[69..133]).try_into().unwrap(),
level: (&self.content[133..]).read_be()?,
random2: (&self.content[137..237]).try_into().unwrap(),
y: (&self.content[237..301]).try_into().unwrap(),
command: &self.content[len..],
};
} else {
return Err(Error::InvalidInitStep(self.content[0]));
}
Ok(InC2SInit { packet: self, data })
}
}
impl fmt::Debug for InPacket<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Packet({:?}", self.header())?;
let mut success = false;
match self.header.packet_type() {
PacketType::Voice | PacketType::VoiceWhisper => {
if let Ok(packet) = self.clone().into_audio() {
success = true;
write!(f, ", {:?}", packet)?;
}
}
PacketType::Command | PacketType::CommandLow => {
if let Ok(packet) = str::from_utf8(self.content()) {
success = true;
write!(f, ", {:?}", packet)?;
}
}
PacketType::Ping | PacketType::Pong | PacketType::Ack | PacketType::AckLow => {
success = true;
if !self.content().is_empty() {
write!(f, ", 0x")?;
}
for b in self.content() {
write!(f, "{:02x}", b)?;
}
}
PacketType::Init => {
if self.header.direction == Direction::C2S {
if let Ok(packet) = self.clone().into_c2sinit() {
success = true;
write!(f, ", {:?}", packet.data)?;
}
} else if let Ok(packet) = self.clone().into_s2cinit() {
success = true;
write!(f, ", {:?}", packet.data)?;
}
}
}
if !success {
write!(f, ", failed to parse, content: {})", HexSlice(self.content()))?;
}
write!(f, ")")?;
Ok(())
}
}
impl fmt::Debug for InUdpPacket<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Packet({:?}, content: {})", self.0.header(), HexSlice(self.0.content()))?;
Ok(())
}
}
impl fmt::Debug for InHeader<'_> {
#[rustfmt::skip]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Header(")?;
if self.mac() != &[0; 8] {
write!(f, "mac: {}, ", HexSlice(self.mac()))?;
}
write!(f, "id: {:#x}, ", self.packet_id())?;
if let Some(c_id) = self.client_id() {
write!(f, "c_id: {:#x}, ", c_id)?;
}
write!(f, "{:?}, ", self.packet_type())?;
write!(f, "")?;
let flags = self.flags();
write!(f, "{}", if flags.contains(Flags::UNENCRYPTED) { "u" } else { "-" })?;
write!(f, "{}", if flags.contains(Flags::COMPRESSED) { "c" } else { "-" })?;
write!(f, "{}", if flags.contains(Flags::NEWPROTOCOL) { "n" } else { "-" })?;
write!(f, "{}", if flags.contains(Flags::FRAGMENTED) { "f" } else { "-" })?;
write!(f, ")")?;
Ok(())
}
}
impl<'a> InHeader<'a> {
#[inline]
pub fn new(direction: Direction, data: &'a [u8]) -> Self {
let header_len =
if direction == Direction::S2C { crate::S2C_HEADER_LEN } else { crate::C2S_HEADER_LEN };
Self { direction, data: &data[..header_len] }
}
/// The offset to the packet type.
#[inline]
fn get_off(&self) -> usize { if self.direction == Direction::S2C { 10 } else { 12 } }
#[inline]
pub fn direction(&self) -> Direction { self.direction }
#[inline]
pub fn data(&self) -> &'a [u8] { self.data }
#[inline]
pub fn mac(&self) -> &'a [u8; 8] { (&self.data[..8]).try_into().unwrap() }
#[inline]
pub fn packet_id(&self) -> u16 { (&self.data[8..10]).read_be().unwrap() }
#[inline]
pub fn client_id(&self) -> Option<u16> {
if self.direction == Direction::S2C {
None
} else {
Some((&self.data[10..12]).read_be().unwrap())
}
}
#[inline]
pub fn flags(&self) -> Flags { Flags::from_bits(self.data[self.get_off()] & 0xf0).unwrap() }
#[inline]
pub fn packet_type(&self) -> PacketType {
PacketType::from_u8(self.data[self.get_off()] & 0xf).unwrap()
}
pub fn get_meta(&self) -> &'a [u8] { &self.data[8..] }
}
impl C2SInitData<'_> {
pub fn get_step(&self) -> u8 {
match self {
C2SInitData::Init0 { .. } => 0,
C2SInitData::Init2 { .. } => 2,
C2SInitData::Init4 { .. } => 4,
}
}
}
impl<'a> fmt::Debug for C2SInitData<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
C2SInitData::Init0 { .. } => write!(f, "Init0"),
C2SInitData::Init2 { .. } => write!(f, "Init2"),
C2SInitData::Init4 { level, command, .. } => {
write!(f, "Init4(level: {}, ", level)?;
if let Ok(s) = str::from_utf8(command) {
write!(f, "{:?}", s)?;
} else {
write!(f, "{}", HexSlice(command))?;
}
write!(f, ")")?;
Ok(())
}
}
}
}
impl S2CInitData<'_> {
pub fn get_step(&self) -> u8 {
match self {
S2CInitData::Init1 { .. } => 1,
S2CInitData::Init3 { .. } => 3,
S2CInitData::Init127 { .. } => 127,
}
}
}
impl<'a> fmt::Debug for S2CInitData<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
S2CInitData::Init1 { .. } => write!(f, "Init1"),
S2CInitData::Init3 { level, .. } => write!(f, "Init3(level: {})", level),
S2CInitData::Init127 { .. } => write!(f, "Init127"),
}
}
}
impl<'a> InCommand<'a> {
#[inline]
pub fn packet(&self) -> &InPacket<'a> { &self.packet }
}
impl<'a> AudioData<'a> {
pub fn parse(
p_type: PacketType, newprotocol: bool, dir: Direction, content: &'a [u8],
) -> Result<Self> {
let id = (&content[..]).read_be()?;
if p_type == PacketType::Voice {
if dir == Direction::S2C {
if content.len() < 5 {
return Err(Error::PacketContentTooShort(content.len()));
}
Ok(AudioData::S2C {
id,
from: (&content[2..]).read_be()?,
codec: CodecType::from_u8(content[4])
.ok_or_else(|| Error::InvalidCodec(content[4]))?,
data: &content[5..],
})
} else {
if content.len() < 3 {
return Err(Error::PacketContentTooShort(content.len()));
}
Ok(AudioData::C2S {
id,
codec: CodecType::from_u8(content[2])
.ok_or_else(|| Error::InvalidCodec(content[4]))?,
data: &content[3..],
})
}
} else if dir == Direction::S2C {
if content.len() < 5 {
return Err(Error::PacketContentTooShort(content.len()));
}
Ok(AudioData::S2CWhisper {
id,
from: (&content[2..]).read_be()?,
codec: CodecType::from_u8(content[4])
.ok_or_else(|| Error::InvalidCodec(content[4]))?,
data: &content[5..],
})
} else {
if content.len() < 3 {
return Err(Error::PacketContentTooShort(content.len()));
}
let codec =
CodecType::from_u8(content[2]).ok_or_else(|| Error::InvalidCodec(content[4]))?;
if newprotocol {
if content.len() < 14 {
return Err(Error::PacketContentTooShort(content.len()));
}
Ok(AudioData::C2SWhisperNew {
id,
codec,
whisper_type: content[3],
target: content[4],
target_id: (&content[5..]).read_be()?,
data: &content[13..],
})
} else {
if content.len() < 5 {
return Err(Error::PacketContentTooShort(content.len()));
}
let channel_count = content[3] as usize;
let client_count = content[4] as usize;
let channel_off = 5;
let client_off = channel_off + channel_count * 8;
let off = client_off + client_count * 2;
if content.len() < off {
return Err(Error::PacketContentTooShort(content.len()));
}
Ok(AudioData::C2SWhisper {
id,
codec,
channels: (0..channel_count)
.map(|i| (&content[channel_off + i * 8..]).read_be())
.collect::<::std::result::Result<Vec<_>, _>>()?,
clients: (0..client_count)
.map(|i| (&content[client_off + i * 2..]).read_be())
.collect::<::std::result::Result<Vec<_>, _>>()?,
data: &content[off..],
})
}
}
}
#[inline]
pub fn direction(&self) -> Direction {
match self {
AudioData::C2S { .. } => Direction::C2S,
AudioData::C2SWhisper { .. } => Direction::C2S,
AudioData::C2SWhisperNew { .. } => Direction::C2S,
AudioData::S2C { .. } => Direction::S2C,
AudioData::S2CWhisper { .. } => Direction::S2C,
}
}
#[inline]
pub fn packet_type(&self) -> PacketType {
match self {
AudioData::C2S { .. } => PacketType::Voice,
AudioData::C2SWhisper { .. } => PacketType::VoiceWhisper,
AudioData::C2SWhisperNew { .. } => PacketType::VoiceWhisper,
AudioData::S2C { .. } => PacketType::Voice,
AudioData::S2CWhisper { .. } => PacketType::VoiceWhisper,
}
}
#[inline]
pub fn codec(&self) -> CodecType {
match self {
AudioData::C2S { codec, .. } => *codec,
AudioData::C2SWhisper { codec, .. } => *codec,
AudioData::C2SWhisperNew { codec, .. } => *codec,
AudioData::S2C { codec, .. } => *codec,
AudioData::S2CWhisper { codec, .. } => *codec,
}
}
#[inline]
pub fn id(&self) -> u16 {
match self {
AudioData::C2S { id, .. } => *id,
AudioData::C2SWhisper { id, .. } => *id,
AudioData::C2SWhisperNew { id, .. } => *id,
AudioData::S2C { id, .. } => *id,
AudioData::S2CWhisper { id, .. } => *id,
}
}
#[inline]
pub fn flags(&self) -> Flags {
match self {
AudioData::C2S { .. } => Flags::empty(),
AudioData::C2SWhisper { .. } => Flags::empty(),
AudioData::C2SWhisperNew { .. } => Flags::NEWPROTOCOL,
AudioData::S2C { .. } => Flags::empty(),
AudioData::S2CWhisper { .. } => Flags::empty(),
}
}
#[inline]
pub fn data(&self) -> &[u8] {
match self {
AudioData::C2S { data, .. } => data,
AudioData::C2SWhisper { data, .. } => data,
AudioData::C2SWhisperNew { data, .. } => data,
AudioData::S2C { data, .. } => data,
AudioData::S2CWhisper { data, .. } => data,
}
}
}
impl<'a> InS2CInit<'a> {
#[inline]
pub fn packet(&self) -> &InPacket<'a> { &self.packet }
#[inline]
pub fn data(&self) -> &S2CInitData { &self.data }
}
impl<'a> InC2SInit<'a> {
#[inline]
pub fn packet(&self) -> &InPacket<'a> { &self.packet }
#[inline]
pub fn data(&self) -> &C2SInitData { &self.data }
}
impl<'a> InAudio<'a> {
#[inline]
pub fn packet(&self) -> &InPacket<'a> { &self.packet }
#[inline]
pub fn data(&self) -> &AudioData { &self.data }
}
impl fmt::Debug for InAudio<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self.data {
AudioData::C2S { id, codec, data } => {
write!(f, "Audio(id: {}, {:?}, {})", id, codec, HexSlice(data))?;
}
AudioData::C2SWhisper { id, codec, channels, clients, data } => {
write!(
f,
"Whisper(id: {}, {:?}, channels: {:?}, clients: {:?}, {})",
id,
codec,
channels,
clients,
HexSlice(data)
)?;
}
AudioData::C2SWhisperNew { id, codec, whisper_type, target, target_id, data } => {
write!(
f,
"WhisperNew(id: {}, {:?}, type: {}, target: {}, target_id: {}, {})",
id,
codec,
whisper_type,
target,
target_id,
HexSlice(data)
)?;
}
AudioData::S2C { id, from, codec, data } => {
write!(f, "Audio(id: {}, from: {}, {:?}, {})", id, from, codec, HexSlice(data))?;
}
AudioData::S2CWhisper { id, from, codec, data } => {
write!(f, "Whisper(id: {}, from: {}, {:?}, {})", id, from, codec, HexSlice(data))?;
}
}
Ok(())
}
}
impl OutPacket {
#[inline]
pub fn new(
mac: [u8; 8], packet_id: u16, client_id: Option<u16>, flags: Flags, packet_type: PacketType,
) -> Self {
let dir = if client_id.is_some() { Direction::C2S } else { Direction::S2C };
let mut res = Self::new_with_dir(dir, flags, packet_type);
res.data[..8].copy_from_slice(&mac);
res.packet_id(packet_id);
if let Some(cid) = client_id {
res.client_id(cid);
}
res
}
/// Fill packet with known data. The rest gets filled by `packet_codec`.
#[inline]
pub fn new_with_dir(dir: Direction, flags: Flags, packet_type: PacketType) -> Self {
let data =
vec![
0;
if dir == Direction::S2C { crate::S2C_HEADER_LEN } else { crate::C2S_HEADER_LEN }
];
let mut res = Self { dir, data };
res.flags(flags);
res.packet_type(packet_type);
res
}
#[inline]
pub fn new_from_data(dir: Direction, data: Vec<u8>) -> Self { Self { dir, data } }
#[inline]
pub fn into_vec(self) -> Vec<u8> { self.data }
#[inline]
fn content_offset(&self) -> usize {
if self.dir == Direction::S2C { crate::S2C_HEADER_LEN } else { crate::C2S_HEADER_LEN }
}
#[inline]
pub fn data(&self) -> &[u8] { &self.data }
#[inline]
pub fn data_mut(&mut self) -> &mut Vec<u8> { &mut self.data }
#[inline]
pub fn content(&self) -> &[u8] { &self.data[self.content_offset()..] }
#[inline]
pub fn content_mut(&mut self) -> &mut [u8] {
let off = self.content_offset();
&mut self.data[off..]
}
#[inline]
pub fn direction(&self) -> Direction { self.dir }
#[inline]
pub fn header(&self) -> InHeader { InHeader { direction: self.dir, data: self.header_bytes() } }
#[inline]
pub fn header_bytes(&self) -> &[u8] { &self.data[..self.content_offset()] }
#[inline]
pub fn packet(&self) -> InPacket { InPacket::new(self.dir, &self.data) }
#[inline]
pub fn mac(&mut self) -> &mut [u8; 8] { (&mut self.data[0..8]).try_into().unwrap() }
#[inline]
pub fn packet_id(&mut self, packet_id: u16) {
(&mut self.data[8..10]).write_be(packet_id).unwrap();
}
#[inline]
pub fn client_id(&mut self, client_id: u16) {
assert_eq!(
self.dir,
Direction::C2S,
"Client id is only valid for client to server packets"
);
(&mut self.data[10..12]).write_be(client_id).unwrap();
}
#[inline]
pub fn flags(&mut self, flags: Flags) {
let off = self.header().get_off();
self.data[off] = (self.data[off] & 0xf) | flags.bits();
}
#[inline]
pub fn packet_type(&mut self, packet_type: PacketType) {
let off = self.header().get_off();
self.data[off] = (self.data[off] & 0xf0) | packet_type.to_u8().unwrap();
}
}
impl OutUdpPacket {
#[inline]
pub fn new(generation_id: u32, data: OutPacket) -> Self { Self { generation_id, data } }
#[inline]
pub fn generation_id(&self) -> u32 { self.generation_id }
#[inline]
pub fn data(&self) -> &OutPacket { &self.data }
#[inline]
pub fn packet_id(&self) -> u16 {
if self.packet_type() == PacketType::Init {
if self.data.dir == Direction::S2C {
u16::from(self.data.content()[0])
} else {
u16::from(self.data.content()[4])
}
} else {
self.data.header().packet_id()
}
}
#[inline]
pub fn packet_type(&self) -> PacketType { self.data.header().packet_type() }
}
impl io::Write for EscapedWriter<'_> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.reserve(buf.len());
for c in buf {
match c {
b'\x0b' => self.0.extend_from_slice(b"\\v"),
b'\x0c' => self.0.extend_from_slice(b"\\f"),
b'\\' => self.0.extend_from_slice(b"\\\\"),
b'\t' => self.0.extend_from_slice(b"\\t"),
b'\r' => self.0.extend_from_slice(b"\\r"),
b'\n' => self.0.extend_from_slice(b"\\n"),
b'|' => self.0.extend_from_slice(b"\\p"),
b' ' => self.0.extend_from_slice(b"\\s"),
b'/' => self.0.extend_from_slice(b"\\/"),
c => self.0.push(*c),
}
}
Ok(buf.len())
}
#[inline]
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
impl OutCommand {
#[inline]
pub fn new(dir: Direction, flags: Flags, p_type: PacketType, name: &str) -> Self {
let mut res = Self(OutPacket::new_with_dir(dir, Flags::empty(), p_type));
res.0.flags(flags);
res.0.data.extend_from_slice(name.as_bytes());
res
}
/// For binary arguments. The value will still be escaped.
#[inline]
pub fn write_bin_arg(&mut self, name: &str, value: &[u8]) {
if self.0.content().last() != Some(&b'|') && !self.0.content().is_empty() {
self.0.data.push(b' ');
}
self.0.data.extend_from_slice(name.as_bytes());
if !value.is_empty() {
self.0.data.push(b'=');
EscapedWriter(&mut self.0.data).write_all(value).unwrap();
}
}
/// The value will be formatted and escaped.
#[inline]
pub fn write_arg(&mut self, name: &str, value: &dyn fmt::Display) {
if self.0.content().last() != Some(&b'|') && !self.0.content().is_empty() {
self.0.data.push(b' ');
}
self.0.data.extend_from_slice(name.as_bytes());
self.0.data.push(b'=');
let len = self.0.data.len();
write!(EscapedWriter(&mut self.0.data), "{}", value).unwrap();
if self.0.data.len() == len {
// Nothing was written, remove =
self.0.data.pop();
}
}
/// Adds a pipe symbol `|` to the command.
#[inline]
pub fn start_new_part(&mut self) { self.0.data.push(b'|'); }
#[inline]
pub fn into_packet(self) -> OutPacket { self.0 }
}
impl OutC2SInit0 {
pub fn new(version: u32, timestamp: u32, random0: [u8; 4]) -> OutPacket {
let mut res = OutPacket::new_with_dir(Direction::C2S, Flags::empty(), PacketType::Init);
res.mac().copy_from_slice(b"TS3INIT1");
res.packet_id(0x65);
let content = res.data_mut();
content.write_be(version).unwrap();
content.write_be(0u8).unwrap();
content.write_be(timestamp).unwrap();
content.write_all(&random0).unwrap();
// Reserved
content.write_all(&[0u8; 8]).unwrap();
res
}
}
impl OutC2SInit2 {
pub fn new(version: u32, random1: &[u8; 16], random0_r: [u8; 4]) -> OutPacket {
let mut res = OutPacket::new_with_dir(Direction::C2S, Flags::empty(), PacketType::Init);
res.mac().copy_from_slice(b"TS3INIT1");
res.packet_id(0x65);
let content = res.data_mut();
content.write_be(version).unwrap();
content.write_be(2u8).unwrap();
content.write_all(random1).unwrap();
content.write_all(&random0_r).unwrap();
res
}
}
impl OutC2SInit4 {
pub fn new(
version: u32, x: &[u8; 64], n: &[u8; 64], level: u32, random2: &[u8; 100], y: &[u8; 64],
alpha: &[u8], omega: &[u8], ip: &str,
) -> OutPacket {
let mut res = OutPacket::new_with_dir(Direction::C2S, Flags::empty(), PacketType::Init);
res.mac().copy_from_slice(b"TS3INIT1");
res.packet_id(0x65);
let content = res.data_mut();
content.write_be(version).unwrap();
content.write_be(4u8).unwrap();
content.write_all(x).unwrap();
content.write_all(n).unwrap();
content.write_be(level).unwrap();
content.write_all(random2).unwrap();
content.write_all(y).unwrap();
let ip = if ip.is_empty() { String::new() } else { format!("={}", ip) };
content
.write_all(
format!(
"clientinitiv alpha={} omega={} ot=1 ip{}",
BASE64_STANDARD.encode(alpha),
BASE64_STANDARD.encode(omega),
ip
)
.as_bytes(),
)
.unwrap();
res
}
}
impl OutS2CInit1 {
pub fn new(random1: &[u8; 16], random0_r: [u8; 4]) -> OutPacket {
let mut res = OutPacket::new_with_dir(Direction::S2C, Flags::empty(), PacketType::Init);
res.mac().copy_from_slice(b"TS3INIT1");
res.packet_id(0x65);
let content = res.data_mut();
content.write_be(1u8).unwrap();
content.write_all(random1).unwrap();
content.write_all(&random0_r).unwrap();
res
}
}
impl OutS2CInit3 {
pub fn new(x: &[u8; 64], n: &[u8; 64], level: u32, random2: &[u8; 100]) -> OutPacket {
let mut res = OutPacket::new_with_dir(Direction::S2C, Flags::empty(), PacketType::Init);
res.mac().copy_from_slice(b"TS3INIT1");
res.packet_id(0x65);
let content = res.data_mut();
content.write_be(3u8).unwrap();
content.write_all(x).unwrap();
content.write_all(n).unwrap();
content.write_be(level).unwrap();
content.write_all(random2).unwrap();
res
}
}
impl OutAck {
/// `for_type` is the packet type which gets acknowledged, so e.g. `Command`.
pub fn new(dir: Direction, for_type: PacketType, packet_id: u16) -> OutPacket {
let p_type = if for_type == PacketType::Command {
PacketType::Ack
} else if for_type == PacketType::CommandLow {
PacketType::AckLow
} else if for_type == PacketType::Ping {
PacketType::Pong
} else {
panic!("Invalid packet type to create ack {:?}", for_type);
};
let mut res = OutPacket::new_with_dir(dir, Flags::empty(), p_type);
let content = res.data_mut();
content.write_be(packet_id).unwrap();
res
}
}
impl OutAudio {
pub fn new(data: &AudioData) -> OutPacket {
let mut res = OutPacket::new_with_dir(data.direction(), data.flags(), data.packet_type());
let content = res.data_mut();
content.write_be(data.id()).unwrap();
match data {
AudioData::C2S { codec, data, .. } => {
content.write_be(codec.to_u8().unwrap()).unwrap();
content.extend_from_slice(data);
}
AudioData::C2SWhisper { codec, channels, clients, data, .. } => {
content.write_be(codec.to_u8().unwrap()).unwrap();
content.write_be(channels.len() as u8).unwrap();
content.write_be(clients.len() as u8).unwrap();
for c in channels {
content.write_be(*c).unwrap();
}
for c in clients {
content.write_be(*c).unwrap();
}
content.extend_from_slice(data);
}
AudioData::C2SWhisperNew { codec, whisper_type, target, target_id, data, .. } => {
content.write_be(codec.to_u8().unwrap()).unwrap();
content.write_be(whisper_type.to_u8().unwrap()).unwrap();
content.write_be(*target).unwrap();
content.write_be(*target_id).unwrap();
content.extend_from_slice(data);
}
AudioData::S2C { from, codec, data, .. }
| AudioData::S2CWhisper { from, codec, data, .. } => {
content.write_be(*from).unwrap();
content.write_be(codec.to_u8().unwrap()).unwrap();
content.extend_from_slice(data);
}
}
res
}
}
#[cfg(test)]
mod tests {
use super::*;
fn test_audio_roundtrip(dir: Direction, data: &AudioData) {
let out_p = OutAudio::new(data);
let in_p = InPacket::try_new(dir, out_p.data()).unwrap();
let audio = in_p.into_audio().unwrap();
assert_eq!(audio.data(), data);
}
#[test]
fn test_audio_c2s() {
let data = AudioData::C2S { codec: CodecType::OpusVoice, id: 0x1234, data: &[1, 2, 3] };
test_audio_roundtrip(Direction::C2S, &data);
}
#[test]
fn test_audio_c2s_whisper() {
let data = AudioData::C2SWhisper {
codec: CodecType::OpusVoice,
channels: vec![4, 5, 6],
clients: vec![7, 8],
id: 0x1234,
data: &[1, 2, 3],
};
test_audio_roundtrip(Direction::C2S, &data);
}
#[test]
fn test_audio_c2s_whisper_new() {
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | true |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/tsproto-packets/src/commands.rs | utils/tsproto-packets/src/commands.rs | use std::borrow::Cow;
use std::str::{self, FromStr};
use crate::{Error, Result};
/// Parses arguments of a command.
#[derive(Clone, Debug)]
pub struct CommandParser<'a> {
data: &'a [u8],
index: usize,
}
#[derive(Clone, Debug)]
pub enum CommandItem<'a> {
Argument(CommandArgument<'a>),
/// Pipe symbol marking the start of the next command.
NextCommand,
}
#[derive(Clone, Debug)]
pub struct CommandArgument<'a> {
name: &'a [u8],
value: CommandArgumentValue<'a>,
}
#[derive(Clone, Debug)]
pub struct CommandArgumentValue<'a> {
raw: &'a [u8],
/// The number of escape sequences in this value.
escapes: usize,
}
impl<'a> CommandParser<'a> {
/// Returns the name and arguments of the given command.
#[inline]
pub fn new(data: &'a [u8]) -> (&'a [u8], Self) {
let mut name_end = 0;
while name_end < data.len() {
if !data[name_end].is_ascii_alphanumeric() {
if data[name_end] == b' ' {
break;
}
// Not a command name
name_end = 0;
break;
}
name_end += 1;
}
(&data[..name_end], Self { data, index: name_end })
}
fn cur(&self) -> u8 { self.data[self.index] }
fn cur_in(&self, cs: &[u8]) -> bool { cs.contains(&self.cur()) }
/// Advance
fn adv(&mut self) { self.index += 1; }
fn at_end(&self) -> bool { self.index >= self.data.len() }
fn skip_space(&mut self) {
while !self.at_end() && self.cur_in(b"\x0b\x0c\t\r\n ") {
self.adv();
}
}
}
impl<'a> Iterator for CommandParser<'a> {
type Item = CommandItem<'a>;
fn next(&mut self) -> Option<Self::Item> {
self.skip_space();
if self.at_end() {
return None;
}
if self.cur() == b'|' {
self.adv();
return Some(CommandItem::NextCommand);
}
let name_start = self.index;
while !self.at_end() && !self.cur_in(b" =|") {
self.adv();
}
let name_end = self.index;
if self.at_end() || self.cur() != b'=' {
return Some(CommandItem::Argument(CommandArgument {
name: &self.data[name_start..name_end],
value: CommandArgumentValue { raw: &[], escapes: 0 },
}));
}
self.adv();
let value_start = self.index;
let mut escapes = 0;
while !self.at_end() {
if self.cur_in(b"\x0b\x0c\t\r\n| ") {
break;
}
if self.cur() == b'\\' {
escapes += 1;
self.adv();
if self.at_end() || self.cur_in(b"\x0b\x0c\t\r\n| ") {
break;
}
}
self.adv();
}
let value_end = self.index;
Some(CommandItem::Argument(CommandArgument {
name: &self.data[name_start..name_end],
value: CommandArgumentValue { raw: &self.data[value_start..value_end], escapes },
}))
}
}
impl<'a> CommandArgument<'a> {
#[inline]
pub fn name(&self) -> &'a [u8] { self.name }
#[inline]
pub fn value(&self) -> &CommandArgumentValue<'a> { &self.value }
}
impl<'a> CommandArgumentValue<'a> {
fn unescape(&self) -> Vec<u8> {
let mut res = Vec::with_capacity(self.raw.len() - self.escapes);
let mut i = 0;
while i < self.raw.len() {
if self.raw[i] == b'\\' {
i += 1;
if i == self.raw.len() {
return res;
}
res.push(match self.raw[i] {
b'v' => b'\x0b',
b'f' => b'\x0c',
b't' => b'\t',
b'r' => b'\r',
b'n' => b'\n',
b'p' => b'|',
b's' => b' ',
c => c,
});
} else {
res.push(self.raw[i]);
}
i += 1;
}
res
}
#[inline]
pub fn get_raw(&self) -> &'a [u8] { self.raw }
#[inline]
pub fn get(&self) -> Cow<'a, [u8]> {
if self.escapes == 0 { Cow::Borrowed(self.raw) } else { Cow::Owned(self.unescape()) }
}
#[inline]
pub fn get_str(&self) -> Result<Cow<'a, str>> {
if self.escapes == 0 {
Ok(Cow::Borrowed(str::from_utf8(self.raw)?))
} else {
Ok(Cow::Owned(String::from_utf8(self.unescape())?))
}
}
#[inline]
pub fn get_parse<E, T: FromStr>(&self) -> std::result::Result<T, E>
where
E: From<<T as FromStr>::Err>,
E: From<Error>,
{
Ok(self.get_str()?.as_ref().parse()?)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::packets::{Direction, Flags, OutCommand, PacketType};
use std::str;
/// Parse and write again.
fn test_loop_with_result(data: &[u8], result: &[u8]) {
let (name, parser) = CommandParser::new(data);
let name = str::from_utf8(name).unwrap();
let mut out_command =
OutCommand::new(Direction::S2C, Flags::empty(), PacketType::Command, name);
println!("\nParsing {}", str::from_utf8(data).unwrap());
for item in parser {
println!("Item: {:?}", item);
match item {
CommandItem::NextCommand => out_command.start_new_part(),
CommandItem::Argument(arg) => {
out_command.write_arg(
str::from_utf8(arg.name()).unwrap(),
&arg.value().get_str().unwrap(),
);
}
}
}
let packet = out_command.into_packet();
let in_str = str::from_utf8(result).unwrap();
let out_str = str::from_utf8(packet.content()).unwrap();
assert_eq!(in_str, out_str);
}
/// Parse and write again.
fn test_loop(data: &[u8]) { test_loop_with_result(data, data); }
const TEST_COMMANDS: &[&str] = &[
"cmd a=1 b=2 c=3",
"cmd a=\\s\\\\ b=\\p c=abc\\tdef",
"cmd a=1 c=3 b=2|b=4|b=5",
"initivexpand2 l=AQCVXTlKF+UQc0yga99dOQ9FJCwLaJqtDb1G7xYPMvHFMwIKVfKADF6zAAcAAAAgQW5vbnltb3VzAAAKQo71lhtEMbqAmtuMLlY8Snr0k2Wmymv4hnHNU6tjQCALKHewCykgcA== beta=\\/8kL8lcAYyMJovVOP6MIUC1oZASyuL\\/Y\\/qjVG06R4byuucl9oPAvR7eqZI7z8jGm9jkGmtJ6 omega=MEsDAgcAAgEgAiBxu2eCLQf8zLnuJJ6FtbVjfaOa1210xFgedoXuGzDbTgIgcGk35eqFavKxS4dROi5uKNSNsmzIL4+fyh5Z\\/+FWGxU= ot=1 proof=MEUCIQDRCP4J9e+8IxMJfCLWWI1oIbNPGcChl+3Jr2vIuyDxzAIgOrzRAFPOuJZF4CBw\\/xgbzEsgKMtEtgNobF6WXVNhfUw= tvd time=1544221457",
"clientinitiv alpha=41Te9Ar7hMPx+A== omega=MEwDAgcAAgEgAiEAq2iCMfcijKDZ5tn2tuZcH+\\/GF+dmdxlXjDSFXLPGadACIHzUnbsPQ0FDt34Su4UXF46VFI0+4wjMDNszdoDYocu0 ip",
// Well, that's more corrupted packet, but the parser should be robust
"initserver virtualserver_name=Server\\sder\\sVerplanten \
virtualserver_welcomemessage=This\\sis\\sSplamys\\sWorld \
virtualserver_platform=Linux \
virtualserver_version=3.0.13.8\\s[Build:\\s1500452811] \
virtualserver_maxclients=32 virtualserver_created=0 \
virtualserver_nodec_encryption_mode=1 \
virtualserver_hostmessage=Lé\\sServer\\sde\\sSplamy \
virtualserver_name=Server_mode=0 virtualserver_default_server \
group=8 virtualserver_default_channel_group=8 \
virtualserver_hostbanner_url virtualserver_hostmessagegfx_url \
virtualserver_hostmessagegfx_interval=2000 \
virtualserver_priority_speaker_dimm_modificat",
"channellist cid=2 cpid=0 channel_name=Trusted\\sChannel \
channel_topic channel_codec=0 channel_codec_quality=0 \
channel_maxclients=0 channel_maxfamilyclients=-1 channel_order=1 \
channel_flag_permanent=1 channel_flag_semi_permanent=0 \
channel_flag_default=0 channel_flag_password=0 \
channel_codec_latency_factor=1 channel_codec_is_unencrypted=1 \
channel_delete_delay=0 channel_flag_maxclients_unlimited=0 \
channel_flag_maxfamilyclients_unlimited=0 \
channel_flag_maxfamilyclients_inherited=1 \
channel_needed_talk_power=0 channel_forced_silence=0 \
channel_name_phonetic channel_icon_id=0 \
channel_flag_private=0|cid=4 cpid=2 \
channel_name=Ding\\s•\\s1\\s\\p\\sSplamy´s\\sBett channel_topic \
channel_codec=4 channel_codec_quality=7 channel_maxclients=-1 \
channel_maxfamilyclients=-1 channel_order=0 \
channel_flag_permanent=1 channel_flag_semi_permanent=0 \
channel_flag_default=0 channel_flag_password=0 \
channel_codec_latency_factor=1 channel_codec_is_unencrypted=1 \
channel_delete_delay=0 channel_flag_maxclients_unlimited=1 \
channel_flag_maxfamilyclients_unlimited=0 \
channel_flag_maxfamilyclients_inherited=1 \
channel_needed_talk_power=0 channel_forced_silence=0 \
channel_name_phonetic=Neo\\sSeebi\\sEvangelion channel_icon_id=0 \
channel_flag_private=0", //|cid=6 cpid=2 channel_name=Ding\\s\xe2\x80\xa2\\s2\\s\\p\\sThe\\sBook\\sof\\sHeavy\\sMetal channel_topic channel_codec=2 channel_codec_quality=7 channel_maxclients=-1 channel_maxfamilyclients=-1 channel_order=4 channel_flag_permanent=1 channel_flag_semi_permanent=0 channel_flag_default=0 channel_flag_password=0 channel_codec_latency_factor=1 channel_codec_is_unencrypted=1 channel_delete_delay=0 channel_flag_maxclients_unlimited=1 channel_flag_maxfamilyclients_unlimited=0 channel_flag_maxfamilyclients_inherited=1 channel_needed_talk_power=0 channel_forced_silence=0 channel_name_phonetic=Not\\senought\\sChannels channel_icon_id=0 channel_flag_private=0|cid=30 cpid=2 channel_name=Ding\\s\xe2\x80\xa2\\s3\\s\\p\\sSenpai\\sGef\xc3\xa4hrlich channel_topic channel_codec=2 channel_codec_quality=7 channel_maxclients=-1 channel_maxfamilyclients=-1 channel_order=6 channel_flag_permanent=1 channel_flag_semi_permanent=0 channel_flag_default=0 channel_flag_password=0 channel_codec_latency_factor=1 channel_codec_is_unencrypted=1 channel_delete_delay=0 channel_flag_maxclients_unlimited=1 channel_flag_maxfamilyclients_unlimited=0 channel_flag_maxfamilyclients_inherited=1 channel_needed_talk_power=0 channel_forced_silence=0 channel_name_phonetic=The\\strashcan\\shas\\sthe\\strash channel_icon_id=0 channel_flag_private=0",
"notifychannelsubscribed cid=2|cid=4 es=3867|cid=5 es=18694|cid=6 es=18694|cid=7 es=18694|cid=11 es=18694|cid=13 es=18694|cid=14 es=18694|cid=16 es=18694|cid=22 es=18694|cid=23 es=18694|cid=24 es=18694|cid=25 es=18694|cid=30 es=18694|cid=163 es=18694",
"notifypermissionlist group_id_end=0|group_id_end=7|group_id_end=13|group_id_end=18|group_id_end=21|group_id_end=21|group_id_end=33|group_id_end=47|group_id_end=77|group_id_end=82|group_id_end=83|group_id_end=106|group_id_end=126|group_id_end=132|group_id_end=143|group_id_end=151|group_id_end=160|group_id_end=162|group_id_end=170|group_id_end=172|group_id_end=190|group_id_end=197|group_id_end=215|group_id_end=227|group_id_end=232|group_id_end=248|permname=b_serverinstance_help_view permdesc=Retrieve\\sinformation\\sabout\\sServerQuery\\scommands|permname=b_serverinstance_version_view permdesc=Retrieve\\sglobal\\sserver\\sversion\\s(including\\splatform\\sand\\sbuild\\snumber)|permname=b_serverinstance_info_view permdesc=Retrieve\\sglobal\\sserver\\sinformation|permname=b_serverinstance_virtualserver_list permdesc=List\\svirtual\\sservers\\sstored\\sin\\sthe\\sdatabase",
// Server query
"cmd=1 cid=2",
"channellistfinished",
// With newlines
"sendtextmessage text=\\nmess\\nage\\n return_code=11",
];
#[test]
fn loop_test() {
for cmd in TEST_COMMANDS {
test_loop(cmd.as_bytes());
}
}
#[test]
fn optional_arg() {
test_loop(b"cmd a");
test_loop(b"cmd a b=1");
test_loop_with_result(b"cmd a=", b"cmd a");
test_loop_with_result(b"cmd a= b=1", b"cmd a b=1");
}
#[test]
fn no_slash_escape() {
let in_cmd = "clientinitiv alpha=giGMvmfHzbY3ig== omega=MEsDAgcAAgEgAiAIXJBlj1hQbaH0Eq0DuLlCmH8bl+veTAO2+k9EQjEYSgIgNnImcmKo7ls5mExb6skfK2Tw+u54aeDr0OP1ITsC/50= ot=1 ip";
let out_cmd = "clientinitiv alpha=giGMvmfHzbY3ig== omega=MEsDAgcAAgEgAiAIXJBlj1hQbaH0Eq0DuLlCmH8bl+veTAO2+k9EQjEYSgIgNnImcmKo7ls5mExb6skfK2Tw+u54aeDr0OP1ITsC\\/50= ot=1 ip";
test_loop_with_result(in_cmd.as_bytes(), out_cmd.as_bytes());
}
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/tsproto-types/src/errors.rs | utils/tsproto-types/src/errors.rs | use std::fmt;
use num_derive::{FromPrimitive, ToPrimitive};
include!(concat!(env!("OUT_DIR"), "/errors.rs"));
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(self, f) }
}
impl std::error::Error for Error {
fn description(&self) -> &str { "TeamSpeak error" }
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/tsproto-types/src/lib.rs | utils/tsproto-types/src/lib.rs | //! `tsproto-types` contains basic types and enums that are used within the TeamSpeak protocol.
use std::borrow::{Borrow, Cow};
use std::fmt;
use base64::prelude::*;
use bitflags::bitflags;
use num_derive::{FromPrimitive, ToPrimitive};
use ref_cast::RefCast;
use serde::{Deserialize, Deserializer, Serialize};
use time::OffsetDateTime;
pub mod crypto;
pub mod errors;
pub mod versions;
include!(concat!(env!("OUT_DIR"), "/enums.rs"));
/// A `ClientId` identifies a client which is connected to a server.
///
/// Every client that we see on a server has a `ClientId`, even our own
/// connection.
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub struct ClientId(pub u16);
/// Describes a client or server uid which is a base64
/// encoded hash or a special reserved name.
///
/// This is saved raw, so the base64-decoded TeamSpeak uid.
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub struct UidBuf(pub Vec<u8>);
#[derive(Debug, Eq, PartialEq, RefCast, Serialize)]
#[repr(transparent)]
pub struct Uid(pub [u8]);
impl ToOwned for Uid {
type Owned = UidBuf;
fn to_owned(&self) -> Self::Owned { UidBuf(self.0.to_owned()) }
}
impl Borrow<Uid> for UidBuf {
fn borrow(&self) -> &Uid { Uid::ref_cast(self.0.borrow()) }
}
impl AsRef<Uid> for UidBuf {
fn as_ref(&self) -> &Uid { self.borrow() }
}
impl core::ops::Deref for UidBuf {
type Target = Uid;
fn deref(&self) -> &Self::Target { self.borrow() }
}
impl<'a> From<&'a Uid> for Cow<'a, Uid> {
fn from(u: &'a Uid) -> Self { Cow::Borrowed(u) }
}
impl Uid {
pub fn from_bytes(data: &'_ [u8]) -> &Self { Uid::ref_cast(data) }
/// TeamSpeak uses a different encoding of the uid for fetching avatars.
///
/// The raw data (base64-decoded) is encoded in hex, but instead of using
/// [0-9a-f] with [a-p].
pub fn as_avatar(&self) -> String {
let mut res = String::with_capacity(self.0.len() * 2);
for b in &self.0 {
res.push((b'a' + (b >> 4)) as char);
res.push((b'a' + (b & 0xf)) as char);
}
res
}
pub fn is_server_admin(&self) -> bool { &self.0 == b"ServerAdmin" }
}
impl<'a, 'de: 'a> Deserialize<'de> for &'a Uid {
fn deserialize<D>(d: D) -> Result<&'a Uid, D::Error>
where D: Deserializer<'de> {
let data = <&[u8]>::deserialize(d)?;
Ok(Uid::from_bytes(data))
}
}
/// The database id of a client.
///
/// This is the id which is saved for a client in the database of one specific
/// server.
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub struct ClientDbId(pub u64);
/// Identifies a channel on a server.
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub struct ChannelId(pub u64);
/// Identifies a server group on a server.
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub struct ServerGroupId(pub u64);
/// Identifies a channel group on a server.
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub struct ChannelGroupId(pub u64);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub struct IconId(pub u32);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub struct Permission(pub u32);
impl Permission {
/// Never fails
pub fn from_u32(i: u32) -> Option<Self> { Some(Permission(i)) }
/// Never fails
pub fn to_u32(self) -> Option<u32> { Some(self.0) }
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum ClientType {
Normal,
/// Server query client
Query {
admin: bool,
},
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum MaxClients {
Unlimited,
Inherited,
Limited(u16),
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub struct TalkPowerRequest {
pub time: OffsetDateTime,
pub message: String,
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub struct Invoker {
pub name: String,
pub id: ClientId,
pub uid: Option<UidBuf>,
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub struct InvokerRef<'a> {
pub name: &'a str,
pub id: ClientId,
pub uid: Option<&'a Uid>,
}
impl Invoker {
pub fn as_ref(&self) -> InvokerRef {
InvokerRef { name: &self.name, id: self.id, uid: self.uid.as_ref().map(|u| u.as_ref()) }
}
}
impl fmt::Display for ClientId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) }
}
impl fmt::Display for Uid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", BASE64_STANDARD.encode(&self.0))
}
}
impl fmt::Display for ClientDbId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) }
}
impl fmt::Display for ChannelId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) }
}
impl fmt::Display for ServerGroupId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) }
}
impl fmt::Display for ChannelGroupId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) }
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/tsproto-types/src/crypto.rs | utils/tsproto-types/src/crypto.rs | //! This module contains cryptography related code.
use std::convert::TryInto;
use std::{cmp, fmt, str};
use base64::prelude::*;
use curve25519_dalek_ng::constants;
use curve25519_dalek_ng::edwards::{CompressedEdwardsY, EdwardsPoint};
use curve25519_dalek_ng::scalar::Scalar;
use elliptic_curve::sec1::{FromEncodedPoint, ToEncodedPoint};
use generic_array::typenum::Unsigned;
use generic_array::GenericArray;
use num_bigint::{BigInt, Sign};
use p256::ecdsa::signature::{Signer, Verifier};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use sha1::{Digest, Sha1};
use simple_asn1::ASN1Block;
use thiserror::Error;
type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Debug, Clone, Error)]
#[non_exhaustive]
pub enum Error {
#[error("More than one ASN.1 block")]
TooManyAsn1Blocks,
#[error("Invalid ASN.1: Expected a public key, not a private key")]
UnexpectedPrivateKey,
#[error("Invalid ASN.1: Does not contain a private key")]
NoPrivateKey,
#[error("Invalid ASN.1: Public key not found")]
PublicKeyNotFound,
#[error("Invalid ASN.1: Expected a bitstring")]
ExpectedBitString,
#[error("Invalid ASN.1: Expected a sequence")]
ExpectedSequence,
#[error("Key data is empty")]
EmptyKeyData,
#[error("Any known methods to decode the key failed")]
KeyDecodeError,
#[error("Failed to parse short private key")]
NoShortKey,
#[error("Not a obfuscated TeamSpeak key")]
NoObfuscatedKey,
#[error("Found no initial 'V' with a valid number before")]
NoCounterBlock,
#[error("Failed to parse public key")]
ParsePublicKeyFailed,
#[error("Wrong key length")]
WrongKeyLength,
#[error("Failed to parse public key, expected length {expected} but got {got}")]
WrongPublicKeyLength { expected: usize, got: usize },
#[error("Wrong signature")]
WrongSignature { key: EccKeyPubP256, data: Vec<u8>, signature: Vec<u8> },
#[error(transparent)]
Asn1Decode(#[from] simple_asn1::ASN1DecodeErr),
#[error(transparent)]
Asn1Encode(#[from] simple_asn1::ASN1EncodeErr),
#[error(transparent)]
Base64(#[from] base64::DecodeError),
#[error(transparent)]
Utf8(#[from] std::str::Utf8Error),
}
/// Xored onto saved identities in the TeamSpeak client settings file.
const IDENTITY_OBFUSCATION: [u8; 128] = *b"b9dfaa7bee6ac57ac7b65f1094a1c155\
e747327bc2fe5d51c512023fe54a280201004e90ad1daaae1075d53b7d571c30e063b5a\
62a4a017bb394833aa0983e6e";
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum KeyType {
Public,
Private,
}
/// A public ecc key.
///
/// The curve of this key is P-256, or PRIME256v1 as it is called by openssl.
#[derive(Clone, Deserialize, Eq, PartialEq, Serialize)]
pub struct EccKeyPubP256(
#[serde(
deserialize_with = "deserialize_ecc_key_pub_p256",
serialize_with = "serialize_ecc_key_pub_p256"
)]
p256::PublicKey,
);
/// A private ecc key.
///
/// The curve of this key is P-256, or PRIME256v1 as it is called by openssl.
#[derive(Clone)]
pub struct EccKeyPrivP256(p256::SecretKey);
/// A public ecc key.
///
/// The curve of this key is Ed25519.
#[derive(Clone)]
pub struct EccKeyPubEd25519(pub CompressedEdwardsY);
/// A private ecc key.
///
/// The curve of this key is Ed25519.
#[derive(Clone)]
pub struct EccKeyPrivEd25519(pub Scalar);
/// Passwords are encoded as base64(sha1(password)).
pub fn encode_password(password: &[u8]) -> String {
BASE64_STANDARD.encode(Sha1::digest(password).as_slice())
}
fn deserialize_ecc_key_pub_p256<'de, D: Deserializer<'de>>(
de: D,
) -> Result<p256::PublicKey, D::Error> {
let data: Vec<u8> = Deserialize::deserialize(de)?;
Ok(EccKeyPubP256::from_short(&data).map_err(serde::de::Error::custom)?.0)
}
fn serialize_ecc_key_pub_p256<S: Serializer>(
key: &p256::PublicKey, ser: S,
) -> Result<S::Ok, S::Error> {
Serialize::serialize(&EccKeyPubP256(*key).to_short().as_slice(), ser)
}
impl fmt::Debug for EccKeyPubP256 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "EccKeyPubP256({})", self.to_ts())
}
}
impl fmt::Debug for EccKeyPrivP256 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "EccKeyPrivP256({})", BASE64_STANDARD.encode(self.to_short()))
}
}
impl fmt::Debug for EccKeyPubEd25519 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "EccKeyPubEd25519({})", self.to_base64())
}
}
impl fmt::Debug for EccKeyPrivEd25519 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "EccKeyPrivEd25519({})", self.to_base64())
}
}
impl EccKeyPubP256 {
/// The shortest format of a public key.
///
/// This is just the `BigNum` of the x and y coordinates concatenated in
/// this order. Each of the coordinates takes half of the size.
pub fn from_short(data: &[u8]) -> Result<Self> {
p256::PublicKey::from_sec1_bytes(data).map_err(|_| Error::ParsePublicKeyFailed).map(Self)
}
/// From base64 encoded tomcrypt key.
pub fn from_ts(data: &str) -> Result<Self> {
Self::from_tomcrypt(&BASE64_STANDARD.decode(data)?)
}
/// Decodes the public key from an ASN.1 DER object how tomcrypt stores it.
///
/// The format is:
/// - `BitString` where the first bit is 1 if the private key is contained
/// - `Integer`: The key size (32)
/// - `Integer`: X coordinate of the public key
/// - `Integer`: Y coordinate of the public key
pub fn from_tomcrypt(data: &[u8]) -> Result<Self> {
let blocks = simple_asn1::from_der(data)?;
if blocks.len() != 1 {
return Err(Error::TooManyAsn1Blocks);
}
if let ASN1Block::Sequence(_, blocks) = &blocks[0] {
if let Some(ASN1Block::BitString(_, len, content)) = blocks.first() {
if *len != 1 || content[0] & 0x80 != 0 {
return Err(Error::UnexpectedPrivateKey);
}
if let (Some(ASN1Block::Integer(_, x)), Some(ASN1Block::Integer(_, y))) =
(blocks.get(2), blocks.get(3))
{
let x_bytes = x.to_bytes_be().1;
let y_bytes = y.to_bytes_be().1;
let field_size = elliptic_curve::FieldBytesSize::<p256::NistP256>::to_usize();
if x_bytes.len() != field_size {
return Err(Error::WrongPublicKeyLength {
expected: field_size,
got: x_bytes.len(),
});
}
if y_bytes.len() != field_size {
return Err(Error::WrongPublicKeyLength {
expected: field_size,
got: y_bytes.len(),
});
}
let enc_point = p256::EncodedPoint::from_affine_coordinates(
GenericArray::from_slice(x_bytes.as_slice()),
GenericArray::from_slice(y_bytes.as_slice()),
false,
);
let enc_point = p256::PublicKey::from_encoded_point(&enc_point);
if enc_point.is_some().into() {
Ok(Self(enc_point.unwrap()))
} else {
Err(Error::ParsePublicKeyFailed)
}
} else {
Err(Error::PublicKeyNotFound)
}
} else {
Err(Error::ExpectedBitString)
}
} else {
Err(Error::ExpectedSequence)
}
}
/// Convert to base64 encoded public tomcrypt key.
pub fn to_ts(&self) -> String { BASE64_STANDARD.encode(self.to_tomcrypt()) }
pub fn to_tomcrypt(&self) -> Vec<u8> {
let enc_point = self.0.as_affine().to_encoded_point(false);
// We can unwrap here, creating the public key ensures that it is not the identity point,
// which is the only time this returns `None`.
let pubkey_x = BigInt::from_bytes_be(Sign::Plus, enc_point.x().unwrap());
let pubkey_y = BigInt::from_bytes_be(Sign::Plus, enc_point.y().unwrap());
// Only returns an error when encoding wrong objects, so fine to unwrap.
simple_asn1::to_der(&ASN1Block::Sequence(0, vec![
ASN1Block::BitString(0, 1, vec![0]),
ASN1Block::Integer(0, 32.into()),
ASN1Block::Integer(0, pubkey_x),
ASN1Block::Integer(0, pubkey_y),
]))
.unwrap()
}
/// Get the SEC1 encoding of the public key point on the curve.
pub fn to_short(&self) -> Vec<u8> {
// TODO Maybe compress?
self.0.as_affine().to_encoded_point(false).as_bytes().to_vec()
}
/// Compute the uid of this key without encoding it in base64.
///
/// returns sha1(ts encoded key)
pub fn get_uid_no_base64(&self) -> Vec<u8> {
Sha1::digest(self.to_ts().as_bytes()).as_slice().to_vec()
}
/// Compute the uid of this key.
///
/// Uid = base64(sha1(ts encoded key))
pub fn get_uid(&self) -> String { BASE64_STANDARD.encode(self.get_uid_no_base64()) }
pub fn verify(&self, data: &[u8], signature: &[u8]) -> Result<()> {
let sig =
p256::ecdsa::Signature::from_der(signature).map_err(|_| Error::WrongSignature {
key: self.clone(),
data: data.to_vec(),
signature: signature.to_vec(),
})?;
let key = p256::ecdsa::VerifyingKey::from(&self.0);
key.verify(data, &sig).map_err(|_| Error::WrongSignature {
key: self.clone(),
data: data.to_vec(),
signature: signature.to_vec(),
})
}
// For the bookkeeping
#[allow(clippy::should_implement_trait)]
pub fn as_ref(&self) -> &Self { self }
}
impl EccKeyPrivP256 {
/// Create a new key key pair.
pub fn create() -> Self { Self(p256::SecretKey::random(&mut rand::thread_rng())) }
/// Try to import the key from any of the known formats.
pub fn import(data: &[u8]) -> Result<Self> {
if data.is_empty() {
return Err(Error::EmptyKeyData);
}
if let Ok(s) = str::from_utf8(data) {
if let Ok(r) = Self::import_str(s) {
return Ok(r);
}
}
if let Ok(r) = Self::from_tomcrypt(data) {
return Ok(r);
}
if let Ok(r) = Self::from_short(data) {
return Ok(r);
}
Err(Error::KeyDecodeError)
}
/// Try to import the key from any of the known formats.
pub fn import_str(s: &str) -> Result<Self> {
if let Ok(r) = BASE64_STANDARD.decode(s) {
if let Ok(r) = Self::import(&r) {
return Ok(r);
}
}
if let Ok(r) = Self::from_ts_obfuscated(s) {
return Ok(r);
}
Err(Error::KeyDecodeError)
}
/// The shortest format of a private key.
///
/// This is just the `BigNum` of the private key.
pub fn from_short(data: &[u8]) -> Result<Self> {
// TODO !! p256::SecretKey::from_bytes panics when the data is not 32 long !!
// maybe create a pull request for that because das not good?!
if data.len() != 32 {
Err(Error::NoShortKey)
} else {
Ok(Self(
p256::SecretKey::from_bytes(p256::FieldBytes::from_slice(data))
.map_err(|_| Error::NoShortKey)?,
))
}
}
/// The shortest format of a private key.
///
/// This is just the `BigNum` of the private key.
pub fn to_short(&self) -> elliptic_curve::FieldBytes<p256::NistP256> { self.0.to_bytes() }
/// From base64 encoded tomcrypt key.
pub fn from_ts(data: &str) -> Result<Self> {
Self::from_tomcrypt(&BASE64_STANDARD.decode(data)?)
}
/// From the key representation which is used to store identities in the
/// TeamSpeak configuration file.
///
/// Format: Offset for identity level || 'V' || obfuscated key
///
/// This function takes only the obfuscated key without the level.
///
/// Thanks to landave, who put
/// [his deobfuscation code](https://github.com/landave/TSIdentityTool)
/// under the MIT license.
pub fn from_ts_obfuscated(data: &str) -> Result<Self> {
let mut data = BASE64_STANDARD.decode(data)?;
if data.len() < 20 {
return Err(Error::NoObfuscatedKey);
}
// Hash everything until the first 0 byte, starting after the first 20
// bytes.
let pos = data[20..].iter().position(|b| *b == b'\0').unwrap_or(data.len() - 20);
let hash = Sha1::digest(&data[20..20 + pos]);
let hash = hash.as_slice();
// Xor first 20 bytes of data with the hash
for i in 0..20 {
data[i] ^= hash[i];
}
// Xor first 100 bytes with a static value
#[allow(clippy::needless_range_loop)]
for i in 0..cmp::min(data.len(), 100) {
data[i] ^= IDENTITY_OBFUSCATION[i];
}
Self::from_ts(str::from_utf8(&data)?)
}
/// Decodes the private key from an ASN.1 DER object how tomcrypt stores it.
///
/// The format is:
/// - `BitString` where the first bit is 1 if the private key is contained
/// - `Integer`: The key size (32)
/// - `Integer`: X coordinate of the public key
/// - `Integer`: Y coordinate of the public key
/// - `Integer`: Private key
///
/// The TS3AudioBot stores two 1 bits in the first `BitString` and omits the
/// public key.
pub fn from_tomcrypt(data: &[u8]) -> Result<Self> {
let blocks = simple_asn1::from_der(data)?;
if blocks.len() != 1 {
return Err(Error::TooManyAsn1Blocks);
}
if let ASN1Block::Sequence(_, blocks) = &blocks[0] {
if let Some(ASN1Block::BitString(_, len, content)) = blocks.first() {
if (*len != 1 && *len != 2) || content[0] & 0x80 == 0 {
return Err(Error::NoPrivateKey);
}
if *len == 1 {
if let Some(ASN1Block::Integer(_, i)) = blocks.get(4) {
Self::from_short(&i.to_bytes_be().1)
} else {
Err(Error::NoPrivateKey)
}
} else if let Some(ASN1Block::Integer(_, i)) = blocks.get(2) {
Self::from_short(&i.to_bytes_be().1)
} else {
Err(Error::NoPrivateKey)
}
} else {
Err(Error::ExpectedBitString)
}
} else {
Err(Error::ExpectedSequence)
}
}
/// Convert to base64 encoded private tomcrypt key.
pub fn to_ts(&self) -> String { BASE64_STANDARD.encode(self.to_tomcrypt()) }
/// Store as obfuscated TeamSpeak identity.
pub fn to_ts_obfuscated(&self) -> String {
let mut data = self.to_ts().into_bytes();
// Xor first 100 bytes with a static value
#[allow(clippy::needless_range_loop)]
for i in 0..cmp::min(data.len(), 100) {
data[i] ^= IDENTITY_OBFUSCATION[i];
}
// Hash everything until the first 0 byte, starting after the first 20
// bytes.
let pos = data[20..].iter().position(|b| *b == b'\0').unwrap_or(data.len() - 20);
let hash = Sha1::digest(&data[20..20 + pos]);
let hash = hash.as_slice();
// Xor first 20 bytes of data with the hash
for i in 0..20 {
data[i] ^= hash[i];
}
BASE64_STANDARD.encode(data)
}
pub fn to_tomcrypt(&self) -> Vec<u8> {
let enc_point = self.0.public_key().as_affine().to_encoded_point(false);
// We can unwrap here, creating the public key ensures that it is not the identity point,
// which is the only time this returns `None`.
let pubkey_x = BigInt::from_bytes_be(Sign::Plus, enc_point.x().unwrap());
let pubkey_y = BigInt::from_bytes_be(Sign::Plus, enc_point.y().unwrap());
let privkey = BigInt::from_bytes_be(Sign::Plus, &self.0.to_bytes());
// Only returns an error when encoding wrong objects, so fine to unwrap.
simple_asn1::to_der(&ASN1Block::Sequence(0, vec![
ASN1Block::BitString(0, 1, vec![0x80]),
ASN1Block::Integer(0, 32.into()),
ASN1Block::Integer(0, pubkey_x),
ASN1Block::Integer(0, pubkey_y),
ASN1Block::Integer(0, privkey),
]))
.unwrap()
}
/// This has to be the private key, the other one has to be the public key.
pub fn create_shared_secret(
self, other: EccKeyPubP256,
) -> elliptic_curve::ecdh::SharedSecret<p256::NistP256> {
elliptic_curve::ecdh::diffie_hellman(self.0.to_nonzero_scalar(), other.0.as_affine())
}
pub fn sign(self, data: &[u8]) -> Vec<u8> {
let key = p256::ecdsa::SigningKey::from(self.0);
let sig: p256::ecdsa::DerSignature = key.sign(data);
sig.as_bytes().to_vec()
}
pub fn to_pub(&self) -> EccKeyPubP256 { self.into() }
}
impl<'a> From<&'a EccKeyPrivP256> for EccKeyPubP256 {
fn from(priv_key: &'a EccKeyPrivP256) -> Self { Self(priv_key.0.public_key()) }
}
impl EccKeyPubEd25519 {
pub fn from_bytes(data: [u8; 32]) -> Self { EccKeyPubEd25519(CompressedEdwardsY(data)) }
pub fn from_base64(data: &str) -> Result<Self> {
let decoded = BASE64_STANDARD.decode(data)?;
if decoded.len() != 32 {
return Err(Error::WrongKeyLength);
}
Ok(Self::from_bytes(decoded[..32].try_into().unwrap()))
}
pub fn to_base64(&self) -> String {
let EccKeyPubEd25519(CompressedEdwardsY(ref data)) = *self;
BASE64_STANDARD.encode(data)
}
}
impl EccKeyPrivEd25519 {
/// This is not used to create TeamSpeak keys, as they are not canonical.
pub fn create() -> Self { EccKeyPrivEd25519(Scalar::random(&mut rand::thread_rng())) }
pub fn from_base64(data: &str) -> Result<Self> {
let decoded = BASE64_STANDARD.decode(data)?;
if decoded.len() != 32 {
return Err(Error::WrongKeyLength);
}
Ok(Self::from_bytes(decoded[..32].try_into().unwrap()))
}
pub fn from_bytes(data: [u8; 32]) -> Self {
EccKeyPrivEd25519(Scalar::from_bytes_mod_order(data))
}
pub fn to_base64(&self) -> String { BASE64_STANDARD.encode(self.0.as_bytes()) }
/// This has to be the private key, the other one has to be the public key.
pub fn create_shared_secret(&self, pub_key: &EdwardsPoint) -> [u8; 32] {
let res = pub_key * self.0;
res.compress().0
}
pub fn to_pub(&self) -> EccKeyPubEd25519 { self.into() }
}
impl<'a> From<&'a EccKeyPrivEd25519> for EccKeyPubEd25519 {
fn from(priv_key: &'a EccKeyPrivEd25519) -> Self {
Self((&constants::ED25519_BASEPOINT_TABLE * &priv_key.0).compress())
}
}
#[cfg(test)]
mod tests {
use super::*;
const TEST_PRIV_KEY: &str = "MG0DAgeAAgEgAiAIXJBlj1hQbaH0Eq0DuLlCmH8bl+veTA\
O2+k9EQjEYSgIgNnImcmKo7ls5mExb6skfK2Tw+u54aeDr0OP1ITsC/50CIA8M5nmDB\
nmDM/gZ//4AAAAAAAAAAAAAAAAAAAAZRzOI";
#[test]
fn parse_p256_priv_key() { EccKeyPrivP256::from_ts(TEST_PRIV_KEY).unwrap(); }
#[test]
fn p256_ecdh() {
let priv_key1 = EccKeyPrivP256::create();
let pub_key1 = priv_key1.to_pub();
let priv_key2 = EccKeyPrivP256::create();
let pub_key2 = priv_key2.to_pub();
let res1 = priv_key1.create_shared_secret(pub_key2);
let res2 = priv_key2.create_shared_secret(pub_key1);
assert_eq!(res1.raw_secret_bytes(), res2.raw_secret_bytes());
}
#[test]
fn p256_signature() {
let license =
"AQBM0LZCVmZ7CX/\
miewqdjOyuKa6kI78Fk43LoypifqOkAIOkvUAEn46gAcAAAAgQW5vbnltb3VzAABoruUa34pO9zy1Z5zIOmrkIO06lKg/\
+mBrg6Mw1Rg4OyAPa7A3D2xY9w==";
let server_key = "MEwDAgcAAgEgAiEA96WgYeYU8zoPqXJqicita+rR92FvnTlxYcUUyIDkQ6cCIE/\
KPo+ms3BEzN/HBR71BJ/Z1Fv8918mdDKLetbOGKWt";
let signature = "MEUCIQC+ececxC0NCcuCtrXHAO5h7qbh1s/TGP/\
AaHa6+wV38wIgV9wwSppEdGjwuH3ETAME9tDj3aNkNvL25i0ikF9vs8M=";
let license = BASE64_STANDARD.decode(license).unwrap();
let signature = BASE64_STANDARD.decode(signature).unwrap();
let server_key = EccKeyPubP256::from_ts(server_key).unwrap();
server_key.verify(&license, &signature).unwrap();
}
#[test]
fn obfuscated_priv_key() {
let key = EccKeyPrivP256::from_ts(TEST_PRIV_KEY).unwrap();
let obf = key.to_ts_obfuscated();
let key2 = EccKeyPrivP256::from_ts_obfuscated(&obf).unwrap();
assert_eq!(key.to_short(), key2.to_short());
}
#[test]
fn obfuscated_identity() {
let key = EccKeyPrivP256::from_ts(TEST_PRIV_KEY).unwrap();
let uid = key.to_pub().get_uid();
let expected_uid = "lks7QL5OVMKo4pZ79cEOI5r5oEA=";
assert_eq!(expected_uid, &uid);
}
#[test]
fn tsaudiobot_identity() {
let key = EccKeyPrivP256::import_str(
"MCkDAgbAAgEgAiBhPImh+bO1xMGOrcplwN3G74bhE9XATm+DxVo3aNtBqg==",
)
.unwrap();
let uid = key.to_pub().get_uid();
let expected_uid = "test/9PZ9vww/Bpf5vJxtJhpz80=";
assert_eq!(expected_uid, &uid);
}
#[test]
fn test_p256_priv_key_short() {
let key = EccKeyPrivP256::from_ts(TEST_PRIV_KEY).unwrap();
let short = key.to_short();
let key = EccKeyPrivP256::from_short(short.as_slice()).unwrap();
let short2 = key.to_short();
assert_eq!(short, short2);
}
#[test]
fn parse_ed25519_pub_key() {
EccKeyPubEd25519::from_base64("zQ3irtRjRVCafjz9j2iz3HVVsp3M7HPNGHUPmTgSQIo=").unwrap();
}
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/tsproto-types/src/versions.rs | utils/tsproto-types/src/versions.rs | use std::fmt;
include!(concat!(env!("OUT_DIR"), "/versions.rs"));
impl fmt::Display for Version {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {}", self.get_platform(), self.get_version_string())
}
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/tsproto-types/build/errors.rs | utils/tsproto-types/build/errors.rs | use std::ops::Deref;
use heck::*;
use t4rust_derive::Template;
use tsproto_structs::errors::*;
use tsproto_structs::EnumValue;
use tsproto_structs::{doc_comment, indent};
#[derive(Template)]
#[TemplatePath = "build/Errors.tt"]
#[derive(Default, Debug)]
pub struct Errors;
impl Deref for Errors {
type Target = Vec<EnumValue>;
fn deref(&self) -> &Self::Target { &DATA.0 }
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/tsproto-types/build/enums.rs | utils/tsproto-types/build/enums.rs | use std::ops::Deref;
use heck::*;
use t4rust_derive::Template;
use tsproto_structs::enums;
use tsproto_structs::enums::*;
use tsproto_structs::{doc_comment, indent};
#[derive(Template)]
#[TemplatePath = "build/Enums.tt"]
#[derive(Default, Debug)]
pub struct Enums;
impl Deref for Enums {
type Target = enums::Enums;
fn deref(&self) -> &Self::Target { &DATA }
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/tsproto-types/build/build.rs | utils/tsproto-types/build/build.rs | use std::env;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
mod enums;
mod errors;
mod versions;
use crate::enums::Enums;
use crate::errors::Errors;
use crate::versions::Versions;
fn main() {
let out_dir = env::var("OUT_DIR").unwrap();
// Enums
let path = Path::new(&out_dir);
let mut structs = File::create(path.join("enums.rs")).unwrap();
write!(&mut structs, "{}", Enums).unwrap();
// Errors
let path = Path::new(&out_dir);
let mut structs = File::create(path.join("errors.rs")).unwrap();
write!(&mut structs, "{}", Errors).unwrap();
// Versions
let mut structs = File::create(path.join("versions.rs")).unwrap();
write!(&mut structs, "{}", Versions).unwrap();
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/tsproto-types/build/versions.rs | utils/tsproto-types/build/versions.rs | use std::ops::Deref;
use t4rust_derive::Template;
use tsproto_structs::versions::*;
#[derive(Template)]
#[TemplatePath = "build/Versions.tt"]
#[derive(Default, Debug)]
pub struct Versions;
impl Deref for Versions {
type Target = Vec<Version>;
fn deref(&self) -> &Self::Target { &DATA.0 }
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/ts-bookkeeping/src/messages.rs | utils/ts-bookkeeping/src/messages.rs | use std::net::IpAddr;
use thiserror::Error;
use time::{Duration, OffsetDateTime};
use tsproto_packets::commands::CommandParser;
use tsproto_packets::packets::{Direction, InHeader, OutCommand, PacketType};
use tsproto_types::errors::Error;
use crate::*;
type Result<T> = std::result::Result<T, ParseError>;
#[derive(Error, Debug)]
#[non_exhaustive]
pub enum ParseError {
#[error(transparent)]
Base64(#[from] base64::DecodeError),
#[error("Parameter {arg} not found in {name}")]
ParameterNotFound { arg: &'static str, name: &'static str },
#[error("Parameter {arg} not found in {name}")]
ParameterNotFound2 { arg: String, name: String },
#[error("Command {0} is unknown")]
UnknownCommand(String),
#[error(transparent)]
StringParse(#[from] std::str::Utf8Error),
#[error(transparent)]
TsProto(#[from] tsproto_packets::Error),
/// Gets thrown when parsing a specific command with the wrong input.
#[error("Command {0} is wrong")]
WrongCommand(String),
#[error("Wrong newprotocol flag ({0})")]
WrongNewprotocol(bool),
#[error("Wrong packet type {0:?}")]
WrongPacketType(PacketType),
#[error("Wrong direction {0:?}")]
WrongDirection(Direction),
#[error("Cannot parse \"{value}\" as int for parameter {arg} ({source})")]
ParseInt { arg: &'static str, value: String, source: std::num::ParseIntError },
#[error("Cannot parse \"{value}\" as SocketAddr for parameter {arg} ({source})")]
ParseAddr { arg: &'static str, value: String, source: std::net::AddrParseError },
#[error("Cannot parse \"{value}\" as float for parameter {arg} ({source})")]
ParseFloat { arg: &'static str, value: String, source: std::num::ParseFloatError },
#[error("Cannot parse \"{value}\" as bool for parameter {arg}")]
ParseBool { arg: &'static str, value: String },
#[error("Cannot parse \"{value}\" as SocketAddr for parameter {arg} ({source})")]
ParseUid { arg: &'static str, value: String, source: base64::DecodeError },
#[error("Cannot parse \"{value}\" as DateTimeOffset for parameter {arg} ({source})")]
ParseDate { arg: &'static str, value: String, source: time::error::ComponentRange },
#[error("Invalid value \"{value}\" for parameter {arg}")]
InvalidValue { arg: &'static str, value: String },
}
pub trait InMessageTrait {
fn new(header: &InHeader, args: CommandParser) -> Result<Self>
where Self: Sized;
}
pub trait OutMessageTrait {
fn to_packet(self) -> OutCommand;
}
pub trait OutMessageWithReturnTrait {
fn to_packet(self, return_code: Option<&str>) -> OutCommand;
}
impl OutMessageTrait for OutCommand {
fn to_packet(self) -> OutCommand { self }
}
pub mod s2c {
use super::*;
include!(concat!(env!("OUT_DIR"), "/s2c_messages.rs"));
}
pub mod c2s {
use super::*;
include!(concat!(env!("OUT_DIR"), "/c2s_messages.rs"));
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/ts-bookkeeping/src/lib.rs | utils/ts-bookkeeping/src/lib.rs | //! `ts-bookkeeping` contains structs to store the state of a TeamSpeak server, with its clients and
//! channels.
//!
//! The crate can be used to keep track of the state on a server by processing all incoming
//! commands, which is why it is called “bookkeeping”. It also contains generated structs for all
//! TeamSpeak commands.
//!
//! Incoming commands can be applied to the state and generate [`events`]. The main struct is
//! [`data::Connection`].
//!
//! The structs have methods to create packets for various actions. The generated packets can be
//! sent to a server.
use std::fmt;
use std::net::{IpAddr, SocketAddr};
use serde::{Deserialize, Serialize};
use thiserror::Error;
pub mod data;
pub mod events;
pub mod messages;
// Reexports
pub use tsproto_types::errors::Error as TsError;
pub use tsproto_types::versions::Version;
pub use tsproto_types::{
ChannelGroupId, ChannelId, ChannelPermissionHint, ChannelType, ClientDbId, ClientId,
ClientPermissionHint, ClientType, Codec, CodecEncryptionMode, GroupNamingMode, GroupType,
HostBannerMode, HostMessageMode, IconId, Invoker, InvokerRef, LicenseType, LogLevel,
MaxClients, Permission, PermissionType, PluginTargetMode, Reason, ServerGroupId,
TalkPowerRequest, TextMessageTargetMode, TokenType, Uid, UidBuf,
};
type Result<T> = std::result::Result<T, Error>;
#[derive(Error, Debug)]
#[non_exhaustive]
pub enum Error {
#[error("Target client id missing for a client text message")]
MessageWithoutTargetClientId,
#[error("Unknown TextMessageTargetMode")]
UnknownTextMessageTargetMode,
#[error("{0} {1} not found")]
NotFound(&'static str, String),
#[error("{0} should be removed but does not exist")]
RemoveNotFound(&'static str),
#[error("Failed to parse connection ip: {0}")]
InvalidConnectionIp(#[source] std::net::AddrParseError),
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub enum ServerAddress {
SocketAddr(SocketAddr),
Other(String),
}
impl From<SocketAddr> for ServerAddress {
fn from(addr: SocketAddr) -> Self { ServerAddress::SocketAddr(addr) }
}
impl From<String> for ServerAddress {
fn from(addr: String) -> Self { ServerAddress::Other(addr) }
}
impl<'a> From<&'a str> for ServerAddress {
fn from(addr: &'a str) -> Self { ServerAddress::Other(addr.to_string()) }
}
impl fmt::Display for ServerAddress {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ServerAddress::SocketAddr(a) => fmt::Display::fmt(a, f),
ServerAddress::Other(a) => fmt::Display::fmt(a, f),
}
}
}
/// All possible targets to send messages.
#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub enum MessageTarget {
Server,
Channel,
Client(ClientId),
Poke(ClientId),
}
/// The configuration to create a new connection.
#[derive(Deserialize, Serialize)]
pub struct ConnectOptions {
address: ServerAddress,
local_address: Option<SocketAddr>,
name: String,
version: Version,
log_commands: bool,
log_packets: bool,
log_udp_packets: bool,
}
impl ConnectOptions {
/// Start creating the configuration of a new connection.
///
/// # Arguments
/// The address of the server has to be supplied. The address can be a
/// [`SocketAddr`](std::net::SocketAddr), a string or directly a [`ServerAddress`]. A string
/// will automatically be resolved from all formats supported by TeamSpeak.
/// For details, see [`resolver::resolve`].
#[inline]
pub fn new<A: Into<ServerAddress>>(address: A) -> Self {
Self {
address: address.into(),
local_address: None,
name: String::from("TeamSpeakUser"),
version: Version::Linux_3_2_1,
log_commands: false,
log_packets: false,
log_udp_packets: false,
}
}
/// The address for the socket of our client
///
/// # Default
/// The default is `0.0.0:0` when connecting to an IPv4 address and `[::]:0`
/// when connecting to an IPv6 address.
#[inline]
pub fn local_address(mut self, local_address: SocketAddr) -> Self {
self.local_address = Some(local_address);
self
}
/// The name of the user.
///
/// # Default
/// `TeamSpeakUser`
#[inline]
pub fn name(mut self, name: String) -> Self {
self.name = name;
self
}
/// The displayed version of the client.
///
/// # Default
/// `3.2.1 on Linux`
#[inline]
pub fn version(mut self, version: Version) -> Self {
self.version = version;
self
}
/// If the content of all commands should be written to the logger.
///
/// # Default
/// `false`
#[inline]
pub fn log_commands(mut self, log_commands: bool) -> Self {
self.log_commands = log_commands;
self
}
/// If the content of all packets in high-level form should be written to
/// the logger.
///
/// # Default
/// `false`
#[inline]
pub fn log_packets(mut self, log_packets: bool) -> Self {
self.log_packets = log_packets;
self
}
/// If the content of all udp packets in byte-array form should be written
/// to the logger.
///
/// # Default
/// `false`
#[inline]
pub fn log_udp_packets(mut self, log_udp_packets: bool) -> Self {
self.log_udp_packets = log_udp_packets;
self
}
}
impl fmt::Debug for ConnectOptions {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Error if attributes are added
let ConnectOptions {
address,
local_address,
name,
version,
log_commands,
log_packets,
log_udp_packets,
} = self;
write!(
f,
"ConnectOptions {{ address: {:?}, local_address: {:?}, name: {}, version: {}, \
log_commands: {}, log_packets: {}, log_udp_packets: {} }}",
address, local_address, name, version, log_commands, log_packets, log_udp_packets,
)?;
Ok(())
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct DisconnectOptions {
reason: Option<Reason>,
message: Option<String>,
}
impl Default for DisconnectOptions {
#[inline]
fn default() -> Self { Self { reason: None, message: None } }
}
impl DisconnectOptions {
#[inline]
pub fn new() -> Self { Self::default() }
/// Set the reason for leaving.
///
/// # Default
///
/// None
#[inline]
pub fn reason(mut self, reason: Reason) -> Self {
self.reason = Some(reason);
self
}
/// Set the leave message.
///
/// You also have to set the reason, otherwise the message will not be
/// displayed.
///
/// # Default
///
/// None
#[inline]
pub fn message<S: Into<String>>(mut self, message: S) -> Self {
self.message = Some(message.into());
self
}
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/ts-bookkeeping/src/events.rs | utils/ts-bookkeeping/src/events.rs | use serde::{Deserialize, Serialize};
use time::{Duration, OffsetDateTime};
use tsproto_types::crypto::EccKeyPubP256;
use crate::data::{
Channel, ChannelGroup, Client, Connection, ConnectionClientData, ConnectionServerData,
OptionalChannelData, OptionalClientData, OptionalServerData, Server, ServerGroup,
};
use crate::*;
include!(concat!(env!("OUT_DIR"), "/events.rs"));
/// Additional data for some events.
#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
pub struct ExtraInfo {
/// Set for e.g. new clients to distinguish if they joined or are made available because we
/// subscribed to a channel.
pub reason: Option<Reason>,
}
/// An event gets fired when something in the data structure of a connection
/// changes or something happens like we receive a text message or get poked.
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub enum Event {
/// The object with this id was added.
///
/// You can find the new object inside the connection data structure.
///
/// Like a client gets assigned a new server group or a new client joins the
/// server.
PropertyAdded { id: PropertyId, invoker: Option<Invoker>, extra: ExtraInfo },
/// The attribute with this id has changed.
///
/// The second tuple item holds the old value of the changed attribute.
///
/// E.g. a client changes its nickname or switches to another channel.
PropertyChanged {
id: PropertyId,
old: PropertyValue,
invoker: Option<Invoker>,
extra: ExtraInfo,
},
/// The object with this id was removed.
///
/// The object is not accessible anymore in the connection data structure,
/// but the second tuple item holds the removed object.
///
/// This happens when a client leaves the server (including our own client)
/// or a channel is removed.
PropertyRemoved {
id: PropertyId,
old: PropertyValue,
invoker: Option<Invoker>,
extra: ExtraInfo,
},
Message {
/// Where this message was sent to, in the server or channel chat or
/// directly to client.
///
/// This is our own client for private messages from others.
target: MessageTarget,
/// The user who wrote the message.
invoker: Invoker,
/// The content of the message.
message: String,
},
}
impl Event {
pub fn get_invoker(&self) -> Option<&Invoker> {
match self {
Event::PropertyAdded { invoker, .. }
| Event::PropertyChanged { invoker, .. }
| Event::PropertyRemoved { invoker, .. } => invoker.as_ref(),
Event::Message { invoker, .. } => Some(invoker),
}
}
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/ts-bookkeeping/src/data.rs | utils/ts-bookkeeping/src/data.rs | use std::borrow::Cow;
use std::collections::{HashMap, HashSet};
use std::net::{IpAddr, SocketAddr};
use std::{iter, mem};
use serde::{Deserialize, Serialize};
use time::{Duration, OffsetDateTime};
use tsproto_packets::packets::OutCommand;
use tsproto_types::crypto::EccKeyPubP256;
use tsproto_types::*;
use crate::events::{Event, ExtraInfo, PropertyId, PropertyValue, PropertyValueRef};
use crate::messages::s2c::InMessage;
use crate::messages::{c2s, s2c};
use crate::{Error, MessageTarget, Result};
include!(concat!(env!("OUT_DIR"), "/m2bdecls.rs"));
include!(concat!(env!("OUT_DIR"), "/structs.rs"));
include!(concat!(env!("OUT_DIR"), "/properties.rs"));
pub mod exts {
use super::*;
include!(concat!(env!("OUT_DIR"), "/b2mdecls.rs"));
}
macro_rules! max_clients {
($msg:ident) => {{
if $msg.is_max_clients_unlimited.unwrap_or_default() {
Some(MaxClients::Unlimited)
} else if $msg.max_clients.map(|i| i >= 0 && i <= u16::MAX as i32).unwrap_or_default() {
Some(MaxClients::Limited($msg.max_clients.unwrap() as u16))
} else {
// Max clients is less than zero or too high so ignore it
None
}
}};
}
macro_rules! copy_attrs {
($from:ident, $to:ident; $($attr:ident),* $(,)*; $($extra:ident: $ex:expr),* $(,)*) => {
$to {
$($attr: $from.$attr.clone(),)*
$($extra: $ex,)*
}
};
}
impl Connection {
pub fn new(public_key: EccKeyPubP256, msg: &s2c::InInitServer) -> Self {
let packet = msg.iter().next().unwrap();
Self {
own_client: packet.client_id,
server: copy_attrs!(packet, Server;
administrative_domain,
ask_for_privilegekey,
codec_encryption_mode,
created,
default_channel_group,
default_server_group,
hostbanner_gfx_interval,
hostbanner_gfx_url,
hostbanner_mode,
hostbanner_url,
hostbutton_gfx_url,
hostbutton_tooltip,
hostbutton_url,
hostmessage_mode,
hostmessage,
icon,
max_clients,
name,
nickname,
phonetic_name,
platform,
priority_speaker_dimm_modificator,
protocol_version,
temp_channel_default_delete_delay,
version,
welcome_message,
;
id: packet.virtual_server_id,
public_key: public_key,
ips: packet.ips.clone().unwrap_or_default(),
// TODO Get from license struct
license: LicenseType::NoLicense,
optional_data: None,
connection_data: None,
),
clients: HashMap::new(),
channels: HashMap::new(),
channel_groups: HashMap::new(),
server_groups: HashMap::new(),
}
}
pub fn handle_command(&mut self, msg: &s2c::InMessage) -> Result<(Vec<Event>, bool)> {
// Returns if it handled the message so we can warn if a message is
// unhandled.
let (mut handled, mut events) = self.handle_command_generated(msg)?;
// Handle special messages
match msg {
InMessage::TextMessage(msg) => {
for msg in msg.iter() {
let target = match msg.target {
TextMessageTargetMode::Server => MessageTarget::Server,
TextMessageTargetMode::Channel => MessageTarget::Channel,
TextMessageTargetMode::Client => {
let client = if let Some(client) = msg.target_client_id {
client
} else {
return Err(Error::MessageWithoutTargetClientId);
};
MessageTarget::Client(client)
}
TextMessageTargetMode::Unknown => {
return Err(Error::UnknownTextMessageTargetMode);
}
};
events.push(Event::Message {
target,
invoker: Invoker {
name: msg.invoker_name.clone(),
id: msg.invoker_id,
uid: msg.invoker_uid.clone(),
},
message: msg.message.to_string(),
});
handled = true;
}
}
InMessage::ClientPoke(msg) => {
for msg in msg.iter() {
events.push(Event::Message {
target: MessageTarget::Poke(msg.invoker_id),
invoker: Invoker {
name: msg.invoker_name.clone(),
id: msg.invoker_id,
uid: msg.invoker_uid.clone(),
},
message: msg.message.to_string(),
});
handled = true;
}
}
InMessage::CommandError(_) => handled = true,
_ => {}
}
if let Some(invoker) = events.first().and_then(Event::get_invoker) {
// If we know this client and the name change, adjust the name.
if let Ok(client) = self.get_mut_client(invoker.id) {
if client.name != invoker.name {
let old = mem::replace(&mut client.name, invoker.name.clone());
events.push(Event::PropertyChanged {
id: PropertyId::ClientName(client.id),
old: PropertyValue::String(old),
invoker: None,
extra: ExtraInfo { reason: None },
});
}
}
}
Ok((events, handled))
}
fn get_server(&self) -> Result<&Server> { Ok(&self.server) }
fn get_mut_server(&mut self) -> Result<&mut Server> { Ok(&mut self.server) }
fn get_channel_group(&self, group: ChannelGroupId) -> Result<&ChannelGroup> {
self.channel_groups
.get(&group)
.ok_or_else(|| Error::NotFound("ChannelGroup", group.to_string()))
}
fn add_channel_group(
&mut self, group: ChannelGroupId, r: ChannelGroup, _: &mut Vec<Event>,
) -> Result<Option<ChannelGroup>> {
Ok(self.channel_groups.insert(group, r))
}
fn get_server_group(&self, group: ServerGroupId) -> Result<&ServerGroup> {
self.server_groups
.get(&group)
.ok_or_else(|| Error::NotFound("ServerGroup", group.to_string()))
}
fn add_server_group(
&mut self, group: ServerGroupId, r: ServerGroup, _: &mut Vec<Event>,
) -> Result<Option<ServerGroup>> {
Ok(self.server_groups.insert(group, r))
}
fn get_optional_server_data(&self) -> Result<Option<&OptionalServerData>> {
Ok(self.server.optional_data.as_ref())
}
fn replace_optional_server_data(
&mut self, r: OptionalServerData, _: &mut Vec<Event>,
) -> Result<Option<OptionalServerData>> {
Ok(mem::replace(&mut self.server.optional_data, Some(r)))
}
fn remove_optional_server_data(
&mut self, _: &mut Vec<Event>,
) -> Result<Option<OptionalServerData>> {
Ok(self.server.optional_data.take())
}
fn get_connection_server_data(&self) -> Result<Option<&ConnectionServerData>> {
Ok(self.server.connection_data.as_ref())
}
fn replace_connection_server_data(
&mut self, r: ConnectionServerData, _: &mut Vec<Event>,
) -> Result<Option<ConnectionServerData>> {
Ok(mem::replace(&mut self.server.connection_data, Some(r)))
}
fn get_connection(&self) -> Result<&Connection> { Ok(self) }
fn get_client(&self, client: ClientId) -> Result<&Client> {
self.clients.get(&client).ok_or_else(|| Error::NotFound("Client", client.to_string()))
}
fn get_mut_client(&mut self, client: ClientId) -> Result<&mut Client> {
self.clients.get_mut(&client).ok_or_else(|| Error::NotFound("Client", client.to_string()))
}
fn add_client(
&mut self, client: ClientId, r: Client, _: &mut Vec<Event>,
) -> Result<Option<Client>> {
Ok(self.clients.insert(client, r))
}
fn remove_client(&mut self, client: ClientId, _: &mut Vec<Event>) -> Result<Option<Client>> {
Ok(self.clients.remove(&client))
}
fn get_connection_client_data(
&self, client: ClientId,
) -> Result<Option<&ConnectionClientData>> {
if let Some(c) = self.clients.get(&client) {
Ok(c.connection_data.as_ref())
} else {
Err(Error::NotFound("Client", client.to_string()))
}
}
fn replace_connection_client_data(
&mut self, client: ClientId, r: ConnectionClientData, _: &mut Vec<Event>,
) -> Result<Option<ConnectionClientData>> {
if let Some(client) = self.clients.get_mut(&client) {
Ok(mem::replace(&mut client.connection_data, Some(r)))
} else {
Err(Error::NotFound("Client", client.to_string()))
}
}
fn get_optional_client_data(&self, client: ClientId) -> Result<Option<&OptionalClientData>> {
if let Some(c) = self.clients.get(&client) {
Ok(c.optional_data.as_ref())
} else {
Err(Error::NotFound("Client", client.to_string()))
}
}
fn replace_optional_client_data(
&mut self, client: ClientId, r: OptionalClientData, _: &mut Vec<Event>,
) -> Result<Option<OptionalClientData>> {
if let Some(c) = self.clients.get_mut(&client) {
Ok(mem::replace(&mut c.optional_data, Some(r)))
} else {
Err(Error::NotFound("Client", client.to_string()))
}
}
fn get_channel(&self, channel: ChannelId) -> Result<&Channel> {
self.channels.get(&channel).ok_or_else(|| Error::NotFound("Channel", channel.to_string()))
}
fn get_mut_channel(&mut self, channel: ChannelId) -> Result<&mut Channel> {
self.channels
.get_mut(&channel)
.ok_or_else(|| Error::NotFound("Channel", channel.to_string()))
}
fn add_channel(
&mut self, channel: ChannelId, r: Channel, events: &mut Vec<Event>,
) -> Result<Option<Channel>> {
self.channel_order_insert(r.id, r.order, r.parent, events);
Ok(self.channels.insert(channel, r))
}
fn remove_channel(
&mut self, channel: ChannelId, events: &mut Vec<Event>,
) -> Result<Option<Channel>> {
let old = self.channels.remove(&channel);
if let Some(ch) = &old {
self.channel_order_remove(ch.id, ch.order, events);
}
Ok(old)
}
fn get_optional_channel_data(
&self, channel: ChannelId,
) -> Result<Option<&OptionalChannelData>> {
if let Some(c) = self.channels.get(&channel) {
Ok(c.optional_data.as_ref())
} else {
Err(Error::NotFound("Channel", channel.to_string()))
}
}
fn replace_optional_channel_data(
&mut self, channel: ChannelId, r: OptionalChannelData, _: &mut Vec<Event>,
) -> Result<Option<OptionalChannelData>> {
if let Some(c) = self.channels.get_mut(&channel) {
Ok(mem::replace(&mut c.optional_data, Some(r)))
} else {
Err(Error::NotFound("Channel", channel.to_string()))
}
}
fn remove_optional_channel_data(
&mut self, channel: ChannelId, _: &mut Vec<Event>,
) -> Result<Option<OptionalChannelData>> {
if let Some(c) = self.channels.get_mut(&channel) {
Ok(c.optional_data.take())
} else {
Err(Error::NotFound("Channel", channel.to_string()))
}
}
// Backing functions for MessageToBook declarations
fn return_false<T>(&self, _: T, _: &mut Vec<Event>) -> Result<bool> { Ok(false) }
fn return_none<T, O>(&self, _: T, _: &mut Vec<Event>) -> Result<Option<O>> { Ok(None) }
fn return_some_none<T, O>(&self, _: T, _: &mut Vec<Event>) -> Result<Option<Option<O>>> {
Ok(Some(None))
}
fn void_fun<T, U, V>(&self, _: T, _: U, _: V) -> Result<()> { Ok(()) }
fn max_clients_cc_fun(
&self, msg: &s2c::InChannelCreatedPart, _: &mut Vec<Event>,
) -> Result<(Option<MaxClients>, Option<MaxClients>)> {
let ch = max_clients!(msg);
let ch_fam = if msg.is_max_family_clients_unlimited.unwrap_or_default() {
Some(MaxClients::Unlimited)
} else if msg.inherits_max_family_clients.unwrap_or_default() {
Some(MaxClients::Inherited)
} else if msg.max_family_clients.map(|i| i >= 0 && i <= u16::MAX as i32).unwrap_or_default()
{
Some(MaxClients::Limited(msg.max_family_clients.unwrap() as u16))
} else {
// Max clients is less than zero or too high so ignore it
None
};
Ok((ch, ch_fam))
}
fn max_clients_ce_fun(
&mut self, channel_id: ChannelId, msg: &s2c::InChannelEditedPart, events: &mut Vec<Event>,
) -> Result<()> {
let channel = self.get_mut_channel(channel_id)?;
let ch = max_clients!(msg);
if let Some(ch) = ch {
events.push(Event::PropertyChanged {
id: PropertyId::ChannelMaxClients(channel_id),
old: PropertyValue::OptionMaxClients(channel.max_clients.take()),
invoker: msg.get_invoker(),
extra: ExtraInfo { reason: Some(msg.reason) },
});
channel.max_clients = Some(ch);
}
let ch_fam = if msg.is_max_family_clients_unlimited.unwrap_or_default() {
Some(MaxClients::Unlimited)
} else if msg.inherits_max_family_clients.unwrap_or_default() {
Some(MaxClients::Inherited)
} else if msg.max_family_clients.map(|i| i >= 0 && i <= u16::MAX as i32).unwrap_or_default()
{
Some(MaxClients::Limited(msg.max_family_clients.unwrap() as u16))
} else {
// Max clients is less than zero or too high so ignore it
None
};
if let Some(ch_fam) = ch_fam {
events.push(Event::PropertyChanged {
id: PropertyId::ChannelMaxFamilyClients(channel_id),
old: PropertyValue::OptionMaxClients(channel.max_family_clients.take()),
invoker: msg.get_invoker(),
extra: ExtraInfo { reason: Some(msg.reason) },
});
channel.max_family_clients = Some(ch_fam);
}
Ok(())
}
fn max_clients_cl_fun(
&self, msg: &s2c::InChannelListPart, _: &mut Vec<Event>,
) -> Result<(Option<MaxClients>, Option<MaxClients>)> {
let max_clients: i32 = msg.max_clients;
let ch = if msg.is_max_clients_unlimited {
Some(MaxClients::Unlimited)
} else if max_clients >= 0 && max_clients <= u16::MAX as i32 {
Some(MaxClients::Limited(max_clients as u16))
} else {
// Max clients is less than zero or too high so ignore it
None
};
let max_clients: i32 = msg.max_family_clients;
let ch_fam = if msg.is_max_family_clients_unlimited {
Some(MaxClients::Unlimited)
} else if msg.inherits_max_family_clients {
Some(MaxClients::Inherited)
} else if max_clients >= 0 && max_clients <= u16::MAX as i32 {
Some(MaxClients::Limited(max_clients as u16))
} else {
// Max clients is less than zero or too high so ignore it
Some(MaxClients::Unlimited)
};
Ok((ch, ch_fam))
}
fn channel_type_cc_fun(
&self, msg: &s2c::InChannelCreatedPart, _: &mut Vec<Event>,
) -> Result<ChannelType> {
Ok(Self::channel_flags_to_type(msg.is_permanent, msg.is_semi_permanent))
}
fn channel_type_ce_fun(
&mut self, channel_id: ChannelId, msg: &s2c::InChannelEditedPart, events: &mut Vec<Event>,
) -> Result<()> {
let channel = self.get_mut_channel(channel_id)?;
if msg.is_permanent.is_none() && msg.is_semi_permanent.is_none() {
return Ok(());
}
let typ = Self::channel_flags_to_type(msg.is_permanent, msg.is_semi_permanent);
events.push(Event::PropertyChanged {
id: PropertyId::ChannelChannelType(channel_id),
old: PropertyValue::ChannelType(channel.channel_type),
invoker: msg.get_invoker(),
extra: ExtraInfo { reason: Some(msg.reason) },
});
channel.channel_type = typ;
Ok(())
}
fn channel_type_cl_fun(
&self, msg: &s2c::InChannelListPart, _: &mut Vec<Event>,
) -> Result<ChannelType> {
Ok(Self::channel_flags_to_type(Some(msg.is_permanent), Some(msg.is_semi_permanent)))
}
fn channel_flags_to_type(perm: Option<bool>, semi: Option<bool>) -> ChannelType {
match (perm.unwrap_or_default(), semi.unwrap_or_default()) {
(true, _) => ChannelType::Permanent,
(_, true) => ChannelType::SemiPermanent,
(false, false) => ChannelType::Temporary,
}
}
fn channel_codec_cc_fun(
&self, msg: &s2c::InChannelCreatedPart, _: &mut Vec<Event>,
) -> Result<Codec> {
Ok(msg.codec.unwrap_or(Codec::OpusVoice))
}
fn away_cev_fun(
&self, msg: &s2c::InClientEnterViewPart, _: &mut Vec<Event>,
) -> Result<Option<String>> {
if msg.is_away { Ok(Some(msg.away_message.clone())) } else { Ok(None) }
}
fn client_type_cev_fun(
&self, msg: &s2c::InClientEnterViewPart, _: &mut Vec<Event>,
) -> Result<ClientType> {
if msg.uid.is_server_admin() {
if let ClientType::Query { .. } = msg.client_type {
return Ok(ClientType::Query { admin: true });
}
}
Ok(msg.client_type)
}
fn away_cu_fun(
&mut self, client_id: ClientId, msg: &s2c::InClientUpdatedPart, events: &mut Vec<Event>,
) -> Result<()> {
let client = self.get_mut_client(client_id)?;
if let Some(is_away) = msg.is_away {
if is_away != client.away_message.is_some() {
let away =
if is_away { Some(msg.away_message.clone().unwrap_or_default()) } else { None };
events.push(Event::PropertyChanged {
id: PropertyId::ClientAwayMessage(client_id),
old: PropertyValue::OptionString(client.away_message.take()),
invoker: msg.get_invoker(),
extra: ExtraInfo { reason: None },
});
client.away_message = away;
}
} else if let Some(away_message) = &msg.away_message {
if let Some(cur_msg) = &client.away_message {
if away_message != cur_msg {
events.push(Event::PropertyChanged {
id: PropertyId::ClientAwayMessage(client_id),
old: PropertyValue::OptionString(client.away_message.take()),
invoker: msg.get_invoker(),
extra: ExtraInfo { reason: None },
});
client.away_message = Some(away_message.clone());
}
}
}
Ok(())
}
fn talk_power_cev_fun(
&self, msg: &s2c::InClientEnterViewPart, _: &mut Vec<Event>,
) -> Result<Option<TalkPowerRequest>> {
if msg.talk_power_request_time.unix_timestamp() > 0 {
Ok(Some(TalkPowerRequest {
time: msg.talk_power_request_time,
message: msg.talk_power_request_message.clone(),
}))
} else {
Ok(None)
}
}
fn talk_power_cu_fun(
&mut self, client_id: ClientId, msg: &s2c::InClientUpdatedPart, events: &mut Vec<Event>,
) -> Result<()> {
if let Some(talk_request) = msg.talk_power_request_time {
let client = self.get_mut_client(client_id)?;
let talk_request = if talk_request.unix_timestamp() > 0 {
Some(TalkPowerRequest {
time: talk_request,
message: msg.talk_power_request_message.clone().unwrap_or_default(),
})
} else {
None
};
events.push(Event::PropertyChanged {
id: PropertyId::ClientTalkPowerRequest(client_id),
old: PropertyValue::OptionTalkPowerRequest(client.talk_power_request.take()),
invoker: msg.get_invoker(),
extra: ExtraInfo { reason: None },
});
client.talk_power_request = talk_request;
}
Ok(())
}
fn address_fun(
&self, msg: &s2c::InClientConnectionInfoPart, _: &mut Vec<Event>,
) -> Result<Option<Option<SocketAddr>>> {
if let (Some(ip), Some(port)) = (&msg.ip, &msg.port) {
if !ip.is_empty() {
return Ok(Some(Some(SocketAddr::new(
ip.trim_matches(&['[', ']'][..]).parse().map_err(Error::InvalidConnectionIp)?,
*port,
))));
}
}
Ok(Some(None))
}
fn channel_subscribe_fun(
&mut self, channel_id: ChannelId, _: &s2c::InChannelSubscribedPart, events: &mut Vec<Event>,
) -> Result<()> {
let channel = self.get_mut_channel(channel_id)?;
events.push(Event::PropertyChanged {
id: PropertyId::ChannelSubscribed(channel_id),
old: PropertyValue::Bool(channel.subscribed),
invoker: None,
extra: ExtraInfo { reason: None },
});
channel.subscribed = true;
Ok(())
}
fn channel_unsubscribe_fun(
&mut self, channel_id: ChannelId, _: &s2c::InChannelUnsubscribedPart,
events: &mut Vec<Event>,
) -> Result<()> {
let channel = self.get_mut_channel(channel_id)?;
events.push(Event::PropertyChanged {
id: PropertyId::ChannelSubscribed(channel_id),
old: PropertyValue::Bool(channel.subscribed),
invoker: None,
extra: ExtraInfo { reason: None },
});
channel.subscribed = false;
// Remove all known clients from this channel
let remove_clients = self
.clients
.values()
.filter_map(|c| if c.channel == channel_id { Some(c.id) } else { None })
.collect::<Vec<_>>();
for id in remove_clients {
events.push(Event::PropertyRemoved {
id: PropertyId::Client(id),
old: PropertyValue::Client(self.clients.remove(&id).unwrap()),
invoker: None,
extra: ExtraInfo { reason: None },
});
}
Ok(())
}
fn channel_order_remove(
&mut self, channel_id: ChannelId, channel_order: ChannelId, events: &mut Vec<Event>,
) {
// [ C:7 | O:_ ]
// [ C:5 | O:7 ] ─>X
// [ C:_ | O:5 ] (Upd: O -> 7)
self.channels.values_mut().any(|c| {
if c.order == channel_id && c.id != channel_id {
events.push(Event::PropertyChanged {
id: PropertyId::ChannelOrder(c.id),
old: PropertyValue::ChannelId(c.order),
invoker: None,
extra: ExtraInfo { reason: None },
});
c.order = channel_order;
true
} else {
false
}
});
}
fn channel_order_insert(
&mut self, channel_id: ChannelId, channel_order: ChannelId, channel_parent: ChannelId,
events: &mut Vec<Event>,
) {
// [ C:7 | O:_ ]
// [ <── (New: C:5 | O:7)
// [ C:_ | O:7 ] (Upd: O -> 5)
//
// Also work for the first channel, the order will be 0.
self.channels.values_mut().any(|c| {
if c.order == channel_order && c.parent == channel_parent && c.id != channel_id {
events.push(Event::PropertyChanged {
id: PropertyId::ChannelOrder(c.id),
old: PropertyValue::ChannelId(c.order),
invoker: None,
extra: ExtraInfo { reason: None },
});
c.order = channel_id;
true
} else {
false
}
});
}
fn channel_order_cc_fun(
&mut self, msg: &s2c::InChannelCreatedPart, events: &mut Vec<Event>,
) -> Result<ChannelId> {
self.channel_order_insert(msg.channel_id, msg.order, msg.parent_id, events);
Ok(msg.order)
}
fn channel_order_ce_fun(
&mut self, channel_id: ChannelId, msg: &s2c::InChannelEditedPart, events: &mut Vec<Event>,
) -> Result<()> {
self.channel_order_move_fun(channel_id, msg.order, msg.parent_id, events)
}
fn channel_order_cm_fun(
&mut self, channel_id: ChannelId, msg: &s2c::InChannelMovedPart, events: &mut Vec<Event>,
) -> Result<()> {
self.channel_order_move_fun(channel_id, Some(msg.order), Some(msg.parent_id), events)
}
fn channel_order_move_fun(
&mut self, channel_id: ChannelId, new_order: Option<ChannelId>, parent: Option<ChannelId>,
events: &mut Vec<Event>,
) -> Result<()> {
if new_order.is_some() || parent.is_some() {
let old_order;
let new_parent;
{
let channel = self.get_mut_channel(channel_id)?;
old_order = channel.order;
new_parent = parent.unwrap_or(channel.parent);
if let Some(order) = new_order {
events.push(Event::PropertyChanged {
id: PropertyId::ChannelOrder(channel.id),
old: PropertyValue::ChannelId(channel.order),
invoker: None,
extra: ExtraInfo { reason: None },
});
channel.order = order;
}
}
self.channel_order_remove(channel_id, old_order, events);
self.channel_order_insert(
channel_id,
new_order.unwrap_or(old_order),
new_parent,
events,
);
}
Ok(())
}
fn subscribe_channel_fun(
&mut self, client_id: ClientId, msg: &s2c::InClientMovedPart, events: &mut Vec<Event>,
) -> Result<()> {
if client_id == self.own_client && msg.target_channel_id.0 != 0 {
let channel = self.get_mut_channel(msg.target_channel_id)?;
events.push(Event::PropertyChanged {
id: PropertyId::ChannelSubscribed(msg.target_channel_id),
old: PropertyValue::Bool(channel.subscribed),
invoker: None,
extra: ExtraInfo { reason: None },
});
channel.subscribed = true;
}
Ok(())
}
// Book to messages
fn away_fun_b2m(msg: Option<&str>) -> (bool, &str) {
if let Some(msg) = msg { (true, msg) } else { (false, "") }
}
}
impl Client {
// Book to messages
fn password_b2m(password: &str) -> String {
tsproto_types::crypto::encode_password(password.as_bytes())
}
fn channel_id_b2m(&self, channel: ChannelId) -> ChannelId { channel }
pub fn send_textmessage(&self, message: &str) -> OutCommand {
c2s::OutSendTextMessageMessage::new(&mut iter::once(c2s::OutSendTextMessagePart {
target: TextMessageTargetMode::Client,
target_client_id: Some(self.id),
message: message.into(),
}))
}
pub fn poke(&self, message: &str) -> OutCommand {
c2s::OutClientPokeRequestMessage::new(&mut iter::once(c2s::OutClientPokeRequestPart {
client_id: self.id,
message: message.into(),
}))
}
}
impl Channel {
// Book to messages
fn password_b2m(&self, password: &str) -> String {
tsproto_types::crypto::encode_password(password.as_bytes())
}
fn password_b2m2(password: &str) -> String {
tsproto_types::crypto::encode_password(password.as_bytes())
}
fn password_flagged_b2m(password: Option<&str>) -> (bool, Cow<'static, str>) {
if let Some(password) = password {
(true, tsproto_types::crypto::encode_password(password.as_bytes()).into())
} else {
(false, "".into())
}
}
fn channel_type_fun_b2m(channel_type: ChannelType) -> (bool, bool) {
match channel_type {
ChannelType::Temporary => (false, false),
ChannelType::SemiPermanent => (true, false),
ChannelType::Permanent => (false, true),
}
}
fn max_clients_fun_b2m(max_clients: MaxClients) -> (i32, bool) {
match max_clients {
MaxClients::Inherited => (0, false),
MaxClients::Unlimited => (0, true),
MaxClients::Limited(num) => (num.into(), false),
}
}
fn max_family_clients_fun_b2m(max_clients: MaxClients) -> (i32, bool, bool) {
match max_clients {
MaxClients::Inherited => (0, false, true),
MaxClients::Unlimited => (0, true, false),
MaxClients::Limited(num) => (num.into(), false, false),
}
}
fn channel_id_b2m(&self, channel: ChannelId) -> ChannelId { channel }
pub fn set_subscribed(&self, subscribed: bool) -> OutCommand {
if subscribed {
c2s::OutChannelSubscribeMessage::new(&mut iter::once(c2s::OutChannelSubscribePart {
channel_id: self.id,
}))
} else {
c2s::OutChannelUnsubscribeMessage::new(&mut iter::once(
c2s::OutChannelUnsubscribePart { channel_id: self.id },
))
}
}
}
/// The `ChannelOptions` are used to set initial properties of a new channel.
///
/// A channel can be created with [`ServerMut::add_channel`]. The only necessary
/// property of a channel is the name, all other properties will be set to their
/// default value.
#[derive(Debug, Deserialize, Eq, PartialEq, Serialize)]
pub struct ChannelOptions<'a> {
name: &'a str,
description: Option<&'a str>,
parent_id: Option<ChannelId>,
codec: Option<Codec>,
codec_quality: Option<u8>,
delete_delay: Option<Duration>,
password: Option<&'a str>,
is_default: bool,
max_clients: Option<MaxClients>,
max_family_clients: Option<MaxClients>,
channel_type: Option<ChannelType>,
is_unencrypted: Option<bool>,
order: Option<ChannelId>,
phonetic_name: Option<&'a str>,
topic: Option<&'a str>,
}
impl<'a> ChannelOptions<'a> {
/// Create new `ChannelOptions` to add a new channel to a server.
///
/// # Arguments
/// You have to supply a name for the new channel. All other properties are
/// optional.
pub fn new(name: &'a str) -> Self {
Self {
name,
description: None,
parent_id: None,
codec: None,
codec_quality: None,
delete_delay: None,
password: None,
is_default: false,
max_clients: None,
max_family_clients: None,
channel_type: None,
is_unencrypted: None,
order: None,
phonetic_name: None,
topic: None,
}
}
pub fn description(mut self, description: &'a str) -> Self {
self.description = Some(description);
self
}
pub fn parent_id(mut self, parent_id: ChannelId) -> Self {
self.parent_id = Some(parent_id);
self
}
pub fn codec(mut self, codec: Codec) -> Self {
self.codec = Some(codec);
self
}
pub fn codec_quality(mut self, codec_quality: u8) -> Self {
self.codec_quality = Some(codec_quality);
self
}
pub fn delete_delay(mut self, delete_delay: Duration) -> Self {
self.delete_delay = Some(delete_delay);
self
}
pub fn password(mut self, password: &'a str) -> Self {
self.password = Some(password);
self
}
pub fn default(mut self) -> Self {
self.is_default = true;
self
}
pub fn max_clients(mut self, max_clients: MaxClients) -> Self {
self.max_clients = Some(max_clients);
self
}
pub fn max_family_clients(mut self, max_family_clients: MaxClients) -> Self {
self.max_family_clients = Some(max_family_clients);
self
}
pub fn channel_type(mut self, channel_type: ChannelType) -> Self {
self.channel_type = Some(channel_type);
self
}
pub fn is_unencrypted(mut self, is_unencrypted: bool) -> Self {
self.is_unencrypted = Some(is_unencrypted);
self
}
/// The previous order
pub fn order(mut self, order: ChannelId) -> Self {
self.order = Some(order);
self
}
pub fn phonetic_name(mut self, phonetic_name: &'a str) -> Self {
self.phonetic_name = Some(phonetic_name);
self
}
pub fn topic(mut self, topic: &'a str) -> Self {
self.topic = Some(topic);
self
}
}
impl Server {
pub fn add_channel(&self, options: ChannelOptions) -> OutCommand {
let inherits_max_family_clients = options
.max_family_clients
.as_ref()
.and_then(|m| if let MaxClients::Inherited = m { Some(true) } else { None });
let is_max_family_clients_unlimited = options
.max_family_clients
.as_ref()
.and_then(|m| if let MaxClients::Unlimited = m { Some(true) } else { None });
let max_family_clients = options
.max_family_clients
.as_ref()
.and_then(|m| if let MaxClients::Limited(n) = m { Some(*n as i32) } else { None });
let is_max_clients_unlimited = options
.max_clients
.as_ref()
.and_then(|m| if let MaxClients::Unlimited = m { Some(true) } else { None });
let max_clients = options
.max_clients
.as_ref()
.and_then(|m| if let MaxClients::Limited(n) = m { Some(*n as i32) } else { None });
let is_permanent = options
.channel_type
.as_ref()
.and_then(|t| if *t == ChannelType::Permanent { Some(true) } else { None });
let is_semi_permanent = options
.channel_type
.as_ref()
.and_then(|t| if *t == ChannelType::SemiPermanent { Some(true) } else { None });
c2s::OutChannelCreateMessage::new(&mut iter::once(c2s::OutChannelCreatePart {
name: options.name.into(),
description: options.description.map(Into::into),
parent_id: options.parent_id,
codec: options.codec,
codec_quality: options.codec_quality,
delete_delay: options.delete_delay,
has_password: if options.password.is_some() { Some(true) } else { None },
is_default: if options.is_default { Some(true) } else { None },
inherits_max_family_clients,
is_max_family_clients_unlimited,
is_max_clients_unlimited,
is_permanent,
is_semi_permanent,
max_family_clients,
max_clients,
is_unencrypted: options.is_unencrypted,
order: options.order,
password: options
.password
.map(|p| tsproto_types::crypto::encode_password(p.as_bytes()).into()),
phonetic_name: options.phonetic_name.map(Into::into),
topic: options.topic.map(Into::into),
}))
}
pub fn send_textmessage(&self, message: &str) -> OutCommand {
c2s::OutSendTextMessageMessage::new(&mut iter::once(c2s::OutSendTextMessagePart {
target: TextMessageTargetMode::Server,
target_client_id: None,
message: message.into(),
}))
}
/// Subscribe or unsubscribe from all channels.
pub fn set_subscribed(&self, subscribed: bool) -> OutCommand {
if subscribed {
c2s::OutChannelSubscribeAllMessage::new()
} else {
c2s::OutChannelUnsubscribeAllMessage::new()
}
}
fn zero_channel_id(&self) -> ChannelId { ChannelId(0) }
fn empty_string(&self) -> &'static str { "" }
// Book to messages
fn password_b2m(password: Option<&str>) -> Cow<'static, str> {
if let Some(password) = password {
tsproto_types::crypto::encode_password(password.as_bytes()).into()
} else {
"".into()
}
}
}
impl Connection {
pub fn send_message(&self, target: MessageTarget, message: &str) -> OutCommand {
match target {
MessageTarget::Server => {
c2s::OutSendTextMessageMessage::new(&mut iter::once(c2s::OutSendTextMessagePart {
target: TextMessageTargetMode::Server,
target_client_id: None,
message: message.into(),
}))
}
MessageTarget::Channel => {
c2s::OutSendTextMessageMessage::new(&mut iter::once(c2s::OutSendTextMessagePart {
target: TextMessageTargetMode::Channel,
target_client_id: None,
message: message.into(),
}))
}
MessageTarget::Client(id) => {
c2s::OutSendTextMessageMessage::new(&mut iter::once(c2s::OutSendTextMessagePart {
target: TextMessageTargetMode::Client,
target_client_id: Some(id),
message: message.into(),
}))
}
MessageTarget::Poke(id) => c2s::OutClientPokeRequestMessage::new(&mut iter::once(
c2s::OutClientPokeRequestPart { client_id: id, message: message.into() },
)),
}
}
pub fn disconnect(&self, options: crate::DisconnectOptions) -> OutCommand {
c2s::OutDisconnectMessage::new(&mut iter::once(c2s::OutDisconnectPart {
reason: options.reason,
reason_message: options.message.map(Into::into),
}))
}
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/ts-bookkeeping/benches/command.rs | utils/ts-bookkeeping/benches/command.rs | use std::iter;
use criterion::{criterion_group, criterion_main, Bencher, Criterion};
use once_cell::sync::Lazy;
use ts_bookkeeping::messages::s2c::{self, InMessage};
use tsproto_packets::packets::{Direction, Flags, OutPacket, PacketType};
const SHORT_CMD: &[u8] = b"notifyclientleftview cfid=1 ctid=0 clid=61";
const LONG_CMD: &[u8] = b"channellist cid=2 cpid=0 channel_name=Trusted\\sChannel channel_topic channel_codec=0 channel_codec_quality=0 channel_maxclients=0 channel_maxfamilyclients=-1 channel_order=1 channel_flag_permanent=1 channel_flag_semi_permanent=0 channel_flag_default=0 channel_flag_password=0 channel_codec_latency_factor=1 channel_codec_is_unencrypted=1 channel_delete_delay=0 channel_flag_maxclients_unlimited=0 channel_flag_maxfamilyclients_unlimited=0 channel_flag_maxfamilyclients_inherited=1 channel_needed_talk_power=0 channel_forced_silence=0 channel_name_phonetic channel_icon_id=0 channel_flag_private=0|cid=4 cpid=2 channel_name=Ding\\s\xe2\x80\xa2\\s1\\s\\p\\sSplamy\xc2\xb4s\\sBett channel_topic channel_codec=4 channel_codec_quality=7 channel_maxclients=-1 channel_maxfamilyclients=-1 channel_order=0 channel_flag_permanent=1 channel_flag_semi_permanent=0 channel_flag_default=0 channel_flag_password=0 channel_codec_latency_factor=1 channel_codec_is_unencrypted=1 channel_delete_delay=0 channel_flag_maxclients_unlimited=1 channel_flag_maxfamilyclients_unlimited=0 channel_flag_maxfamilyclients_inherited=1 channel_needed_talk_power=0 channel_forced_silence=0 channel_name_phonetic=Neo\\sSeebi\\sEvangelion channel_icon_id=0 channel_flag_private=0|cid=6 cpid=2 channel_name=Ding\\s\xe2\x80\xa2\\s2\\s\\p\\sThe\\sBook\\sof\\sHeavy\\sMetal channel_topic channel_codec=2 channel_codec_quality=7 channel_maxclients=-1 channel_maxfamilyclients=-1 channel_order=4 channel_flag_permanent=1 channel_flag_semi_permanent=0 channel_flag_default=0 channel_flag_password=0 channel_codec_latency_factor=1 channel_codec_is_unencrypted=1 channel_delete_delay=0 channel_flag_maxclients_unlimited=1 channel_flag_maxfamilyclients_unlimited=0 channel_flag_maxfamilyclients_inherited=1 channel_needed_talk_power=0 channel_forced_silence=0 channel_name_phonetic=Not\\senought\\sChannels channel_icon_id=0 channel_flag_private=0|cid=30 cpid=2 channel_name=Ding\\s\xe2\x80\xa2\\s3\\s\\p\\sSenpai\\sGef\xc3\xa4hrlich channel_topic channel_codec=2 channel_codec_quality=7 channel_maxclients=-1 channel_maxfamilyclients=-1 channel_order=6 channel_flag_permanent=1 channel_flag_semi_permanent=0 channel_flag_default=0 channel_flag_password=0 channel_codec_latency_factor=1 channel_codec_is_unencrypted=1 channel_delete_delay=0 channel_flag_maxclients_unlimited=1 channel_flag_maxfamilyclients_unlimited=0 channel_flag_maxfamilyclients_inherited=1 channel_needed_talk_power=0 channel_forced_silence=0 channel_name_phonetic=The\\strashcan\\shas\\sthe\\strash channel_icon_id=0 channel_flag_private=0";
static TRACING: Lazy<()> = Lazy::new(|| tracing_subscriber::fmt().with_test_writer().init());
fn parse(b: &mut Bencher, cmd: &[u8]) {
Lazy::force(&TRACING);
let header = OutPacket::new_with_dir(Direction::S2C, Flags::empty(), PacketType::Command);
b.iter(|| InMessage::new(&header.header(), cmd).unwrap());
}
fn write(b: &mut Bencher, cmd: &[u8]) {
Lazy::force(&TRACING);
let header = OutPacket::new_with_dir(Direction::S2C, Flags::empty(), PacketType::Command);
let msg = InMessage::new(&header.header(), cmd).unwrap();
match msg {
InMessage::ClientLeftView(msg) => {
let out_part = msg.iter().next().unwrap().as_out();
b.iter(|| s2c::OutClientLeftViewMessage::new(&mut iter::once(out_part.clone())));
}
InMessage::ChannelList(msg) => {
let out_part = msg.iter().next().unwrap().as_out();
b.iter(|| s2c::OutChannelListMessage::new(&mut iter::once(out_part.clone())));
}
_ => unreachable!("This command type is not supported in this test"),
}
}
fn parse_short(c: &mut Criterion) { c.bench_function("parse short", |b| parse(b, SHORT_CMD)); }
fn parse_long(c: &mut Criterion) { c.bench_function("parse long", |b| parse(b, LONG_CMD)); }
fn write_short(c: &mut Criterion) { c.bench_function("write short", |b| write(b, SHORT_CMD)); }
fn write_long(c: &mut Criterion) { c.bench_function("write long", |b| write(b, LONG_CMD)); }
criterion_group!(benches, parse_short, parse_long, write_short, write_long);
criterion_main!(benches);
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/ts-bookkeeping/build/properties.rs | utils/ts-bookkeeping/build/properties.rs | //! Access properties of a connection with the property structs from events.
use std::default::Default;
use std::fmt::Write;
use heck::*;
use t4rust_derive::Template;
use tsproto_structs::book::*;
use tsproto_structs::embrace;
#[derive(Template)]
#[TemplatePath = "build/Properties.tt"]
#[derive(Debug)]
pub struct Properties<'a>(&'a BookDeclarations);
impl Default for Properties<'static> {
fn default() -> Self { Properties(&DATA) }
}
fn get_ids(struc: &Struct) -> String {
let mut res = String::new();
for i in 0..struc.id.len() {
if !res.is_empty() {
res.push_str(", ");
}
let _ = write!(res, "s{}", i);
}
res
}
fn get_ids2(structs: &[Struct], struc: &Struct) -> String {
let mut res = String::new();
for (i, id) in struc.id.iter().enumerate() {
let p = id.find_property(structs);
if !res.is_empty() {
res.push_str(", ");
}
if p.type_s != "str" {
res.push('*');
}
let _ = write!(res, "s{}", i);
}
res
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
ReSpeak/tsclientlib | https://github.com/ReSpeak/tsclientlib/blob/3fbfa26ead0d3e5b38288b55abe3e2d636a97115/utils/ts-bookkeeping/build/messages_to_book_parser.rs | utils/ts-bookkeeping/build/messages_to_book_parser.rs | use std::collections::HashMap;
use std::default::Default;
use std::ops::Deref;
use heck::*;
use t4rust_derive::Template;
use tsproto_structs::book::{PropId, Property};
use tsproto_structs::messages::Field;
use tsproto_structs::messages_to_book::*;
use tsproto_structs::*;
#[derive(Template)]
#[TemplatePath = "build/MessagesToBook.tt"]
#[derive(Debug)]
pub struct MessagesToBookDeclarations<'a>(&'a messages_to_book::MessagesToBookDeclarations<'a>);
impl<'a> Deref for MessagesToBookDeclarations<'a> {
type Target = messages_to_book::MessagesToBookDeclarations<'a>;
fn deref(&self) -> &Self::Target { self.0 }
}
impl Default for MessagesToBookDeclarations<'static> {
fn default() -> Self { MessagesToBookDeclarations(&DATA) }
}
fn get_property(p: &Property, name: &str) -> String {
format!("PropertyValue::{}({})", p.get_inner_type_as_name().unwrap(), name)
}
| rust | Apache-2.0 | 3fbfa26ead0d3e5b38288b55abe3e2d636a97115 | 2026-01-04T20:19:54.636515Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.